1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8
esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport * vport)9 static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
10 {
11 if (!vport->egress.offloads.fwd_rule)
12 return;
13
14 mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
15 vport->egress.offloads.fwd_rule = NULL;
16 }
17
esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport * vport)18 static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport)
19 {
20 if (!vport->egress.offloads.bounce_rule)
21 return;
22
23 mlx5_del_flow_rules(vport->egress.offloads.bounce_rule);
24 vport->egress.offloads.bounce_rule = NULL;
25 }
26
esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)27 static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
28 struct mlx5_vport *vport,
29 struct mlx5_flow_destination *fwd_dest)
30 {
31 struct mlx5_flow_act flow_act = {};
32 int err = 0;
33
34 esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
35 vport->vport, fwd_dest->vport.num);
36
37 /* Delete the old egress forward-to-vport rule if any */
38 esw_acl_egress_ofld_fwd2vport_destroy(vport);
39
40 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
41
42 vport->egress.offloads.fwd_rule =
43 mlx5_add_flow_rules(vport->egress.acl, NULL,
44 &flow_act, fwd_dest, 1);
45 if (IS_ERR(vport->egress.offloads.fwd_rule)) {
46 err = PTR_ERR(vport->egress.offloads.fwd_rule);
47 esw_warn(esw->dev,
48 "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
49 vport->vport, err);
50 vport->egress.offloads.fwd_rule = NULL;
51 }
52
53 return err;
54 }
55
esw_acl_egress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)56 static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
57 struct mlx5_vport *vport,
58 struct mlx5_flow_destination *fwd_dest)
59 {
60 int err = 0;
61 int action;
62
63 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
64 /* For prio tag mode, there is only 1 FTEs:
65 * 1) prio tag packets - pop the prio tag VLAN, allow
66 * Unmatched traffic is allowed by default
67 */
68 esw_debug(esw->dev,
69 "vport[%d] configure prio tag egress rules\n", vport->vport);
70
71 action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
72 action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
73 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
74
75 /* prio tag vlan rule - pop it so vport receives untagged packets */
76 err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
77 if (err)
78 goto prio_err;
79 }
80
81 if (fwd_dest) {
82 err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
83 if (err)
84 goto fwd_err;
85 }
86
87 return 0;
88
89 fwd_err:
90 esw_acl_egress_vlan_destroy(vport);
91 prio_err:
92 return err;
93 }
94
esw_acl_egress_ofld_rules_destroy(struct mlx5_vport * vport)95 static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
96 {
97 esw_acl_egress_vlan_destroy(vport);
98 esw_acl_egress_ofld_fwd2vport_destroy(vport);
99 esw_acl_egress_ofld_bounce_rule_destroy(vport);
100 }
101
esw_acl_egress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)102 static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
103 struct mlx5_vport *vport)
104 {
105 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
106 struct mlx5_flow_group *fwd_grp;
107 u32 *flow_group_in;
108 u32 flow_index = 0;
109 int ret = 0;
110
111 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
112 ret = esw_acl_egress_vlan_grp_create(esw, vport);
113 if (ret)
114 return ret;
115
116 flow_index++;
117 }
118
119 if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
120 goto out;
121
122 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
123 if (!flow_group_in) {
124 ret = -ENOMEM;
125 goto fwd_grp_err;
126 }
127
128 /* This group holds 1 FTE to forward all packets to other vport
129 * when bond vports is supported.
130 */
131 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
132 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
133 fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
134 if (IS_ERR(fwd_grp)) {
135 ret = PTR_ERR(fwd_grp);
136 esw_warn(esw->dev,
137 "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
138 vport->vport, ret);
139 kvfree(flow_group_in);
140 goto fwd_grp_err;
141 }
142 vport->egress.offloads.fwd_grp = fwd_grp;
143 kvfree(flow_group_in);
144 return 0;
145
146 fwd_grp_err:
147 esw_acl_egress_vlan_grp_destroy(vport);
148 out:
149 return ret;
150 }
151
esw_acl_egress_ofld_groups_destroy(struct mlx5_vport * vport)152 static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
153 {
154 if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
155 mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
156 vport->egress.offloads.fwd_grp = NULL;
157 }
158
159 if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) {
160 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
161 vport->egress.offloads.bounce_grp = NULL;
162 }
163
164 esw_acl_egress_vlan_grp_destroy(vport);
165 }
166
esw_acl_egress_needed(struct mlx5_eswitch * esw,u16 vport_num)167 static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
168 {
169 return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
170 }
171
esw_acl_egress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)172 int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
173 {
174 int table_size = 0;
175 int err;
176
177 if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
178 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
179 return 0;
180
181 if (!esw_acl_egress_needed(esw, vport->vport))
182 return 0;
183
184 esw_acl_egress_ofld_rules_destroy(vport);
185
186 if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
187 table_size++;
188 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
189 table_size++;
190 vport->egress.acl = esw_acl_table_create(esw, vport,
191 MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
192 if (IS_ERR(vport->egress.acl)) {
193 err = PTR_ERR(vport->egress.acl);
194 vport->egress.acl = NULL;
195 return err;
196 }
197
198 err = esw_acl_egress_ofld_groups_create(esw, vport);
199 if (err)
200 goto group_err;
201
202 esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
203
204 err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
205 if (err)
206 goto rules_err;
207
208 return 0;
209
210 rules_err:
211 esw_acl_egress_ofld_groups_destroy(vport);
212 group_err:
213 esw_acl_egress_table_destroy(vport);
214 return err;
215 }
216
esw_acl_egress_ofld_cleanup(struct mlx5_vport * vport)217 void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
218 {
219 esw_acl_egress_ofld_rules_destroy(vport);
220 esw_acl_egress_ofld_groups_destroy(vport);
221 esw_acl_egress_table_destroy(vport);
222 }
223
mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch * esw,u16 active_vport_num,u16 passive_vport_num)224 int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
225 u16 passive_vport_num)
226 {
227 struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
228 struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
229 struct mlx5_flow_destination fwd_dest = {};
230
231 if (IS_ERR(active_vport))
232 return PTR_ERR(active_vport);
233 if (IS_ERR(passive_vport))
234 return PTR_ERR(passive_vport);
235
236 /* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
237 esw_acl_egress_ofld_rules_destroy(active_vport);
238 esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
239
240 /* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
241 esw_acl_egress_ofld_rules_destroy(passive_vport);
242 fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
243 fwd_dest.vport.num = active_vport_num;
244 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
245 fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
246
247 return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
248 }
249
mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch * esw,u16 vport_num)250 int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
251 {
252 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
253
254 if (IS_ERR(vport))
255 return PTR_ERR(vport);
256
257 esw_acl_egress_ofld_rules_destroy(vport);
258 return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
259 }
260