1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include <net/nexthop.h>
6 #include "lag/lag.h"
7 #include "eswitch.h"
8 #include "lib/mlx5.h"
9 
add_mpesw_rule(struct mlx5_lag * ldev)10 static int add_mpesw_rule(struct mlx5_lag *ldev)
11 {
12 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
13 	int err;
14 
15 	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
16 		return 0;
17 
18 	if (ldev->mode != MLX5_LAG_MODE_NONE) {
19 		err = -EINVAL;
20 		goto out_err;
21 	}
22 
23 	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
24 	if (err) {
25 		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
26 		goto out_err;
27 	}
28 
29 	return 0;
30 
31 out_err:
32 	atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
33 	return err;
34 }
35 
del_mpesw_rule(struct mlx5_lag * ldev)36 static void del_mpesw_rule(struct mlx5_lag *ldev)
37 {
38 	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
39 	    ldev->mode == MLX5_LAG_MODE_MPESW)
40 		mlx5_disable_lag(ldev);
41 }
42 
mlx5_mpesw_work(struct work_struct * work)43 static void mlx5_mpesw_work(struct work_struct *work)
44 {
45 	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
46 	struct mlx5_lag *ldev = mpesww->lag;
47 
48 	mutex_lock(&ldev->lock);
49 	if (mpesww->op == MLX5_MPESW_OP_ENABLE)
50 		mpesww->result = add_mpesw_rule(ldev);
51 	else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
52 		del_mpesw_rule(ldev);
53 	mutex_unlock(&ldev->lock);
54 
55 	complete(&mpesww->comp);
56 }
57 
mlx5_lag_mpesw_queue_work(struct mlx5_core_dev * dev,enum mpesw_op op)58 static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
59 				     enum mpesw_op op)
60 {
61 	struct mlx5_lag *ldev = dev->priv.lag;
62 	struct mlx5_mpesw_work_st *work;
63 	int err = 0;
64 
65 	if (!ldev)
66 		return 0;
67 
68 	work = kzalloc(sizeof(*work), GFP_KERNEL);
69 	if (!work)
70 		return -ENOMEM;
71 
72 	INIT_WORK(&work->work, mlx5_mpesw_work);
73 	init_completion(&work->comp);
74 	work->op = op;
75 	work->lag = ldev;
76 
77 	if (!queue_work(ldev->wq, &work->work)) {
78 		mlx5_core_warn(dev, "failed to queue mpesw work\n");
79 		err = -EINVAL;
80 		goto out;
81 	}
82 	wait_for_completion(&work->comp);
83 	err = work->result;
84 out:
85 	kfree(work);
86 	return err;
87 }
88 
mlx5_lag_del_mpesw_rule(struct mlx5_core_dev * dev)89 void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
90 {
91 	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
92 }
93 
mlx5_lag_add_mpesw_rule(struct mlx5_core_dev * dev)94 int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
95 {
96 	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
97 }
98 
mlx5_lag_do_mirred(struct mlx5_core_dev * mdev,struct net_device * out_dev)99 int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
100 {
101 	struct mlx5_lag *ldev = mdev->priv.lag;
102 
103 	if (!netif_is_bond_master(out_dev) || !ldev)
104 		return 0;
105 
106 	if (ldev->mode == MLX5_LAG_MODE_MPESW)
107 		return -EOPNOTSUPP;
108 
109 	return 0;
110 }
111 
mlx5_lag_mpesw_is_activated(struct mlx5_core_dev * dev)112 bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
113 {
114 	bool ret;
115 
116 	ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
117 	return ret;
118 }
119 
mlx5_lag_mpesw_init(struct mlx5_lag * ldev)120 void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
121 {
122 	atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
123 }
124 
mlx5_lag_mpesw_cleanup(struct mlx5_lag * ldev)125 void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
126 {
127 	WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
128 }
129