1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22
23 struct mlxsw_sp_acl {
24 struct mlxsw_sp *mlxsw_sp;
25 struct mlxsw_afk *afk;
26 struct mlxsw_sp_fid *dummy_fid;
27 struct rhashtable ruleset_ht;
28 struct list_head rules;
29 struct mutex rules_lock; /* Protects rules list */
30 struct {
31 struct delayed_work dw;
32 unsigned long interval; /* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 } rule_activity_update;
35 struct mlxsw_sp_acl_tcam tcam;
36 };
37
mlxsw_sp_acl_afk(struct mlxsw_sp_acl * acl)38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 return acl->afk;
41 }
42
43 struct mlxsw_sp_acl_ruleset_ht_key {
44 struct mlxsw_sp_flow_block *block;
45 u32 chain_index;
46 const struct mlxsw_sp_acl_profile_ops *ops;
47 };
48
49 struct mlxsw_sp_acl_ruleset {
50 struct rhash_head ht_node; /* Member of acl HT */
51 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
52 struct rhashtable rule_ht;
53 unsigned int ref_count;
54 unsigned int min_prio;
55 unsigned int max_prio;
56 unsigned long priv[];
57 /* priv has to be always the last item */
58 };
59
60 struct mlxsw_sp_acl_rule {
61 struct rhash_head ht_node; /* Member of rule HT */
62 struct list_head list;
63 unsigned long cookie; /* HT key */
64 struct mlxsw_sp_acl_ruleset *ruleset;
65 struct mlxsw_sp_acl_rule_info *rulei;
66 u64 last_used;
67 u64 last_packets;
68 u64 last_bytes;
69 u64 last_drops;
70 unsigned long priv[];
71 /* priv has to be always the last item */
72 };
73
74 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
75 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
76 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
77 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
78 .automatic_shrinking = true,
79 };
80
81 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
82 .key_len = sizeof(unsigned long),
83 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
84 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
85 .automatic_shrinking = true,
86 };
87
mlxsw_sp_acl_dummy_fid(struct mlxsw_sp * mlxsw_sp)88 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
89 {
90 return mlxsw_sp->acl->dummy_fid;
91 }
92
93 static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset * ruleset)94 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
95 {
96 /* We hold a reference on ruleset ourselves */
97 return ruleset->ref_count == 2;
98 }
99
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,struct mlxsw_sp_flow_block_binding * binding)100 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
101 struct mlxsw_sp_flow_block *block,
102 struct mlxsw_sp_flow_block_binding *binding)
103 {
104 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
105 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
106
107 return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
108 binding->mlxsw_sp_port, binding->ingress);
109 }
110
mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,struct mlxsw_sp_flow_block_binding * binding)111 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
112 struct mlxsw_sp_flow_block *block,
113 struct mlxsw_sp_flow_block_binding *binding)
114 {
115 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
116 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
117
118 ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
119 binding->mlxsw_sp_port, binding->ingress);
120 }
121
122 static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,struct mlxsw_sp_flow_block * block)123 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
124 struct mlxsw_sp_acl_ruleset *ruleset,
125 struct mlxsw_sp_flow_block *block)
126 {
127 struct mlxsw_sp_flow_block_binding *binding;
128 int err;
129
130 block->ruleset_zero = ruleset;
131 list_for_each_entry(binding, &block->binding_list, list) {
132 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
133 if (err)
134 goto rollback;
135 }
136 return 0;
137
138 rollback:
139 list_for_each_entry_continue_reverse(binding, &block->binding_list,
140 list)
141 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
142 block->ruleset_zero = NULL;
143
144 return err;
145 }
146
147 static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,struct mlxsw_sp_flow_block * block)148 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
149 struct mlxsw_sp_acl_ruleset *ruleset,
150 struct mlxsw_sp_flow_block *block)
151 {
152 struct mlxsw_sp_flow_block_binding *binding;
153
154 list_for_each_entry(binding, &block->binding_list, list)
155 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
156 block->ruleset_zero = NULL;
157 }
158
159 static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,const struct mlxsw_sp_acl_profile_ops * ops,struct mlxsw_afk_element_usage * tmplt_elusage)160 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
161 struct mlxsw_sp_flow_block *block, u32 chain_index,
162 const struct mlxsw_sp_acl_profile_ops *ops,
163 struct mlxsw_afk_element_usage *tmplt_elusage)
164 {
165 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
166 struct mlxsw_sp_acl_ruleset *ruleset;
167 size_t alloc_size;
168 int err;
169
170 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
171 ruleset = kzalloc(alloc_size, GFP_KERNEL);
172 if (!ruleset)
173 return ERR_PTR(-ENOMEM);
174 ruleset->ref_count = 1;
175 ruleset->ht_key.block = block;
176 ruleset->ht_key.chain_index = chain_index;
177 ruleset->ht_key.ops = ops;
178
179 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
180 if (err)
181 goto err_rhashtable_init;
182
183 err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
184 tmplt_elusage, &ruleset->min_prio,
185 &ruleset->max_prio);
186 if (err)
187 goto err_ops_ruleset_add;
188
189 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
190 mlxsw_sp_acl_ruleset_ht_params);
191 if (err)
192 goto err_ht_insert;
193
194 return ruleset;
195
196 err_ht_insert:
197 ops->ruleset_del(mlxsw_sp, ruleset->priv);
198 err_ops_ruleset_add:
199 rhashtable_destroy(&ruleset->rule_ht);
200 err_rhashtable_init:
201 kfree(ruleset);
202 return ERR_PTR(err);
203 }
204
mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)205 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
206 struct mlxsw_sp_acl_ruleset *ruleset)
207 {
208 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
209 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
210
211 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
212 mlxsw_sp_acl_ruleset_ht_params);
213 ops->ruleset_del(mlxsw_sp, ruleset->priv);
214 rhashtable_destroy(&ruleset->rule_ht);
215 kfree(ruleset);
216 }
217
mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset * ruleset)218 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
219 {
220 ruleset->ref_count++;
221 }
222
mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)223 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
224 struct mlxsw_sp_acl_ruleset *ruleset)
225 {
226 if (--ruleset->ref_count)
227 return;
228 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
229 }
230
231 static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl * acl,struct mlxsw_sp_flow_block * block,u32 chain_index,const struct mlxsw_sp_acl_profile_ops * ops)232 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
233 struct mlxsw_sp_flow_block *block, u32 chain_index,
234 const struct mlxsw_sp_acl_profile_ops *ops)
235 {
236 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
237
238 memset(&ht_key, 0, sizeof(ht_key));
239 ht_key.block = block;
240 ht_key.chain_index = chain_index;
241 ht_key.ops = ops;
242 return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
243 mlxsw_sp_acl_ruleset_ht_params);
244 }
245
246 struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,enum mlxsw_sp_acl_profile profile)247 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
248 struct mlxsw_sp_flow_block *block, u32 chain_index,
249 enum mlxsw_sp_acl_profile profile)
250 {
251 const struct mlxsw_sp_acl_profile_ops *ops;
252 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
253 struct mlxsw_sp_acl_ruleset *ruleset;
254
255 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
256 if (!ops)
257 return ERR_PTR(-EINVAL);
258 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
259 if (!ruleset)
260 return ERR_PTR(-ENOENT);
261 return ruleset;
262 }
263
264 struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,enum mlxsw_sp_acl_profile profile,struct mlxsw_afk_element_usage * tmplt_elusage)265 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_flow_block *block, u32 chain_index,
267 enum mlxsw_sp_acl_profile profile,
268 struct mlxsw_afk_element_usage *tmplt_elusage)
269 {
270 const struct mlxsw_sp_acl_profile_ops *ops;
271 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
272 struct mlxsw_sp_acl_ruleset *ruleset;
273
274 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
275 if (!ops)
276 return ERR_PTR(-EINVAL);
277
278 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
279 if (ruleset) {
280 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
281 return ruleset;
282 }
283 return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
284 tmplt_elusage);
285 }
286
mlxsw_sp_acl_ruleset_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)287 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
288 struct mlxsw_sp_acl_ruleset *ruleset)
289 {
290 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
291 }
292
mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset * ruleset)293 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
294 {
295 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
296
297 return ops->ruleset_group_id(ruleset->priv);
298 }
299
mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset * ruleset,unsigned int * p_min_prio,unsigned int * p_max_prio)300 void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
301 unsigned int *p_min_prio,
302 unsigned int *p_max_prio)
303 {
304 *p_min_prio = ruleset->min_prio;
305 *p_max_prio = ruleset->max_prio;
306 }
307
308 struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl * acl,struct mlxsw_afa_block * afa_block)309 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
310 struct mlxsw_afa_block *afa_block)
311 {
312 struct mlxsw_sp_acl_rule_info *rulei;
313 int err;
314
315 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
316 if (!rulei)
317 return ERR_PTR(-ENOMEM);
318
319 if (afa_block) {
320 rulei->act_block = afa_block;
321 return rulei;
322 }
323
324 rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
325 if (IS_ERR(rulei->act_block)) {
326 err = PTR_ERR(rulei->act_block);
327 goto err_afa_block_create;
328 }
329 rulei->action_created = 1;
330 return rulei;
331
332 err_afa_block_create:
333 kfree(rulei);
334 return ERR_PTR(err);
335 }
336
mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info * rulei)337 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
338 {
339 if (rulei->action_created)
340 mlxsw_afa_block_destroy(rulei->act_block);
341 kfree(rulei);
342 }
343
mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info * rulei)344 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
345 {
346 return mlxsw_afa_block_commit(rulei->act_block);
347 }
348
mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info * rulei,unsigned int priority)349 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
350 unsigned int priority)
351 {
352 rulei->priority = priority;
353 }
354
mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info * rulei,enum mlxsw_afk_element element,u32 key_value,u32 mask_value)355 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
356 enum mlxsw_afk_element element,
357 u32 key_value, u32 mask_value)
358 {
359 mlxsw_afk_values_add_u32(&rulei->values, element,
360 key_value, mask_value);
361 }
362
mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info * rulei,enum mlxsw_afk_element element,const char * key_value,const char * mask_value,unsigned int len)363 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
364 enum mlxsw_afk_element element,
365 const char *key_value,
366 const char *mask_value, unsigned int len)
367 {
368 mlxsw_afk_values_add_buf(&rulei->values, element,
369 key_value, mask_value, len);
370 }
371
mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info * rulei)372 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
373 {
374 return mlxsw_afa_block_continue(rulei->act_block);
375 }
376
mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info * rulei,u16 group_id)377 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
378 u16 group_id)
379 {
380 return mlxsw_afa_block_jump(rulei->act_block, group_id);
381 }
382
mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info * rulei)383 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
384 {
385 return mlxsw_afa_block_terminate(rulei->act_block);
386 }
387
mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info * rulei,bool ingress,const struct flow_action_cookie * fa_cookie,struct netlink_ext_ack * extack)388 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
389 bool ingress,
390 const struct flow_action_cookie *fa_cookie,
391 struct netlink_ext_ack *extack)
392 {
393 return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
394 fa_cookie, extack);
395 }
396
mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info * rulei)397 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
398 {
399 return mlxsw_afa_block_append_trap(rulei->act_block,
400 MLXSW_TRAP_ID_ACL0);
401 }
402
mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct net_device * out_dev,struct netlink_ext_ack * extack)403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
404 struct mlxsw_sp_acl_rule_info *rulei,
405 struct net_device *out_dev,
406 struct netlink_ext_ack *extack)
407 {
408 struct mlxsw_sp_port *mlxsw_sp_port;
409 u16 local_port;
410 bool in_port;
411
412 if (out_dev) {
413 if (!mlxsw_sp_port_dev_check(out_dev)) {
414 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
415 return -EINVAL;
416 }
417 mlxsw_sp_port = netdev_priv(out_dev);
418 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
419 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
420 return -EINVAL;
421 }
422 local_port = mlxsw_sp_port->local_port;
423 in_port = false;
424 } else {
425 /* If out_dev is NULL, the caller wants to
426 * set forward to ingress port.
427 */
428 local_port = 0;
429 in_port = true;
430 }
431 return mlxsw_afa_block_append_fwd(rulei->act_block,
432 local_port, in_port, extack);
433 }
434
mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_flow_block * block,struct net_device * out_dev,struct netlink_ext_ack * extack)435 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
436 struct mlxsw_sp_acl_rule_info *rulei,
437 struct mlxsw_sp_flow_block *block,
438 struct net_device *out_dev,
439 struct netlink_ext_ack *extack)
440 {
441 struct mlxsw_sp_flow_block_binding *binding;
442 struct mlxsw_sp_port *in_port;
443
444 if (!list_is_singular(&block->binding_list)) {
445 NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
446 return -EOPNOTSUPP;
447 }
448 binding = list_first_entry(&block->binding_list,
449 struct mlxsw_sp_flow_block_binding, list);
450 in_port = binding->mlxsw_sp_port;
451
452 return mlxsw_afa_block_append_mirror(rulei->act_block,
453 in_port->local_port,
454 out_dev,
455 binding->ingress,
456 extack);
457 }
458
mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 action,u16 vid,u16 proto,u8 prio,struct netlink_ext_ack * extack)459 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
460 struct mlxsw_sp_acl_rule_info *rulei,
461 u32 action, u16 vid, u16 proto, u8 prio,
462 struct netlink_ext_ack *extack)
463 {
464 u8 ethertype;
465
466 if (action == FLOW_ACTION_VLAN_MANGLE) {
467 switch (proto) {
468 case ETH_P_8021Q:
469 ethertype = 0;
470 break;
471 case ETH_P_8021AD:
472 ethertype = 1;
473 break;
474 default:
475 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
476 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
477 proto);
478 return -EINVAL;
479 }
480
481 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
482 vid, prio, ethertype,
483 extack);
484 } else {
485 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
486 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
487 return -EINVAL;
488 }
489 }
490
mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 prio,struct netlink_ext_ack * extack)491 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
492 struct mlxsw_sp_acl_rule_info *rulei,
493 u32 prio, struct netlink_ext_ack *extack)
494 {
495 /* Even though both Linux and Spectrum switches support 16 priorities,
496 * spectrum_qdisc only processes the first eight priomap elements, and
497 * the DCB and PFC features are tied to 8 priorities as well. Therefore
498 * bounce attempts to prioritize packets to higher priorities.
499 */
500 if (prio >= IEEE_8021QAZ_MAX_TCS) {
501 NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
502 return -EINVAL;
503 }
504 return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
505 extack);
506 }
507
508 struct mlxsw_sp_acl_mangle_action {
509 enum flow_action_mangle_base htype;
510 /* Offset is u32-aligned. */
511 u32 offset;
512 /* Mask bits are unset for the modified field. */
513 u32 mask;
514 /* Shift required to extract the set value. */
515 u32 shift;
516 enum mlxsw_sp_acl_mangle_field field;
517 };
518
519 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
520 { \
521 .htype = _htype, \
522 .offset = _offset, \
523 .mask = _mask, \
524 .shift = _shift, \
525 .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
526 }
527
528 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
529 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
530 _offset, _mask, _shift, _field)
531
532 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
533 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
534 _offset, _mask, _shift, _field)
535
536 #define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
537 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
538
539 #define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
540 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
541
542 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
543 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
544 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
545 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
546
547 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
548 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
549 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
550
551 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
552 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT),
553
554 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
555 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
556
557 MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
558 MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
559
560 MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
561 MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
562 MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
563 MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
564 MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
565 MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
566 MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
567 MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
568 };
569
570 static int
mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)571 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
572 struct mlxsw_sp_acl_rule_info *rulei,
573 struct mlxsw_sp_acl_mangle_action *mact,
574 u32 val, struct netlink_ext_ack *extack)
575 {
576 switch (mact->field) {
577 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
578 return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
579 val, extack);
580 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
581 return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
582 val, extack);
583 case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
584 return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
585 val, extack);
586 default:
587 return -EOPNOTSUPP;
588 }
589 }
590
mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)591 static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_acl_rule_info *rulei,
593 struct mlxsw_sp_acl_mangle_action *mact,
594 u32 val, struct netlink_ext_ack *extack)
595 {
596 int err;
597
598 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
599 if (err != -EOPNOTSUPP)
600 return err;
601
602 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
603 return err;
604 }
605
606 static int
mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info * rulei,enum mlxsw_sp_acl_mangle_field field,u32 val,struct netlink_ext_ack * extack)607 mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
608 enum mlxsw_sp_acl_mangle_field field,
609 u32 val, struct netlink_ext_ack *extack)
610 {
611 if (!rulei->ipv6_valid) {
612 rulei->ipv6.prev_val = val;
613 rulei->ipv6_valid = true;
614 rulei->ipv6.prev_field = field;
615 return 0;
616 }
617
618 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
619 return -EOPNOTSUPP;
620 }
621
mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)622 static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
623 struct mlxsw_sp_acl_rule_info *rulei,
624 struct mlxsw_sp_acl_mangle_action *mact,
625 u32 val, struct netlink_ext_ack *extack)
626 {
627 int err;
628
629 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
630 if (err != -EOPNOTSUPP)
631 return err;
632
633 switch (mact->field) {
634 case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
635 return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
636 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
637 return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
638 /* IPv4 fields */
639 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
640 return mlxsw_afa_block_append_ip(rulei->act_block, false,
641 true, val, 0, extack);
642 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
643 return mlxsw_afa_block_append_ip(rulei->act_block, true,
644 true, val, 0, extack);
645 /* IPv6 fields */
646 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
647 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
648 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
649 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
650 return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
651 mact->field,
652 val, extack);
653 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
654 if (rulei->ipv6_valid &&
655 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
656 rulei->ipv6_valid = false;
657 return mlxsw_afa_block_append_ip(rulei->act_block,
658 false, false, val,
659 rulei->ipv6.prev_val,
660 extack);
661 }
662 break;
663 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
664 if (rulei->ipv6_valid &&
665 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
666 rulei->ipv6_valid = false;
667 return mlxsw_afa_block_append_ip(rulei->act_block,
668 false, true, val,
669 rulei->ipv6.prev_val,
670 extack);
671 }
672 break;
673 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
674 if (rulei->ipv6_valid &&
675 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
676 rulei->ipv6_valid = false;
677 return mlxsw_afa_block_append_ip(rulei->act_block,
678 true, false, val,
679 rulei->ipv6.prev_val,
680 extack);
681 }
682 break;
683 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
684 if (rulei->ipv6_valid &&
685 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
686 rulei->ipv6_valid = false;
687 return mlxsw_afa_block_append_ip(rulei->act_block,
688 true, true, val,
689 rulei->ipv6.prev_val,
690 extack);
691 }
692 break;
693 default:
694 break;
695 }
696
697 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
698 return err;
699 }
700
mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,enum flow_action_mangle_base htype,u32 offset,u32 mask,u32 val,struct netlink_ext_ack * extack)701 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
702 struct mlxsw_sp_acl_rule_info *rulei,
703 enum flow_action_mangle_base htype,
704 u32 offset, u32 mask, u32 val,
705 struct netlink_ext_ack *extack)
706 {
707 const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
708 struct mlxsw_sp_acl_mangle_action *mact;
709 size_t i;
710
711 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
712 mact = &mlxsw_sp_acl_mangle_actions[i];
713 if (mact->htype == htype &&
714 mact->offset == offset &&
715 mact->mask == mask) {
716 val >>= mact->shift;
717 return acl_rulei_ops->act_mangle_field(mlxsw_sp,
718 rulei, mact,
719 val, extack);
720 }
721 }
722
723 NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
724 return -EINVAL;
725 }
726
mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 index,u64 rate_bytes_ps,u32 burst,struct netlink_ext_ack * extack)727 int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
728 struct mlxsw_sp_acl_rule_info *rulei,
729 u32 index, u64 rate_bytes_ps,
730 u32 burst, struct netlink_ext_ack *extack)
731 {
732 int err;
733
734 err = mlxsw_afa_block_append_police(rulei->act_block, index,
735 rate_bytes_ps, burst,
736 &rulei->policer_index, extack);
737 if (err)
738 return err;
739
740 rulei->policer_index_valid = true;
741
742 return 0;
743 }
744
mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct netlink_ext_ack * extack)745 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
746 struct mlxsw_sp_acl_rule_info *rulei,
747 struct netlink_ext_ack *extack)
748 {
749 int err;
750
751 err = mlxsw_afa_block_append_counter(rulei->act_block,
752 &rulei->counter_index, extack);
753 if (err)
754 return err;
755 rulei->counter_valid = true;
756 return 0;
757 }
758
mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u16 fid,struct netlink_ext_ack * extack)759 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
760 struct mlxsw_sp_acl_rule_info *rulei,
761 u16 fid, struct netlink_ext_ack *extack)
762 {
763 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
764 }
765
mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_flow_block * block,struct psample_group * psample_group,u32 rate,u32 trunc_size,bool truncate,struct netlink_ext_ack * extack)766 int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
767 struct mlxsw_sp_acl_rule_info *rulei,
768 struct mlxsw_sp_flow_block *block,
769 struct psample_group *psample_group, u32 rate,
770 u32 trunc_size, bool truncate,
771 struct netlink_ext_ack *extack)
772 {
773 struct mlxsw_sp_flow_block_binding *binding;
774 struct mlxsw_sp_port *mlxsw_sp_port;
775
776 if (!list_is_singular(&block->binding_list)) {
777 NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
778 return -EOPNOTSUPP;
779 }
780 binding = list_first_entry(&block->binding_list,
781 struct mlxsw_sp_flow_block_binding, list);
782 mlxsw_sp_port = binding->mlxsw_sp_port;
783
784 return mlxsw_afa_block_append_sampler(rulei->act_block,
785 mlxsw_sp_port->local_port,
786 psample_group, rate, trunc_size,
787 truncate, binding->ingress,
788 extack);
789 }
790
791 struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,unsigned long cookie,struct mlxsw_afa_block * afa_block,struct netlink_ext_ack * extack)792 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
793 struct mlxsw_sp_acl_ruleset *ruleset,
794 unsigned long cookie,
795 struct mlxsw_afa_block *afa_block,
796 struct netlink_ext_ack *extack)
797 {
798 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
799 struct mlxsw_sp_acl_rule *rule;
800 int err;
801
802 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
803 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
804 GFP_KERNEL);
805 if (!rule) {
806 err = -ENOMEM;
807 goto err_alloc;
808 }
809 rule->cookie = cookie;
810 rule->ruleset = ruleset;
811
812 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
813 if (IS_ERR(rule->rulei)) {
814 err = PTR_ERR(rule->rulei);
815 goto err_rulei_create;
816 }
817
818 return rule;
819
820 err_rulei_create:
821 kfree(rule);
822 err_alloc:
823 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
824 return ERR_PTR(err);
825 }
826
mlxsw_sp_acl_rule_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)827 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
828 struct mlxsw_sp_acl_rule *rule)
829 {
830 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
831
832 mlxsw_sp_acl_rulei_destroy(rule->rulei);
833 kfree(rule);
834 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
835 }
836
mlxsw_sp_acl_rule_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)837 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
838 struct mlxsw_sp_acl_rule *rule)
839 {
840 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
841 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
842 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
843 int err;
844
845 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
846 if (err)
847 return err;
848
849 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
850 mlxsw_sp_acl_rule_ht_params);
851 if (err)
852 goto err_rhashtable_insert;
853
854 if (!ruleset->ht_key.chain_index &&
855 mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
856 /* We only need ruleset with chain index 0, the implicit
857 * one, to be directly bound to device. The rest of the
858 * rulesets are bound by "Goto action set".
859 */
860 err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
861 if (err)
862 goto err_ruleset_block_bind;
863 }
864
865 mutex_lock(&mlxsw_sp->acl->rules_lock);
866 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
867 mutex_unlock(&mlxsw_sp->acl->rules_lock);
868 block->rule_count++;
869 block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
870 block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
871 return 0;
872
873 err_ruleset_block_bind:
874 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
875 mlxsw_sp_acl_rule_ht_params);
876 err_rhashtable_insert:
877 ops->rule_del(mlxsw_sp, rule->priv);
878 return err;
879 }
880
mlxsw_sp_acl_rule_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)881 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
882 struct mlxsw_sp_acl_rule *rule)
883 {
884 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
885 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
886 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
887
888 block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
889 block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
890 block->rule_count--;
891 mutex_lock(&mlxsw_sp->acl->rules_lock);
892 list_del(&rule->list);
893 mutex_unlock(&mlxsw_sp->acl->rules_lock);
894 if (!ruleset->ht_key.chain_index &&
895 mlxsw_sp_acl_ruleset_is_singular(ruleset))
896 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
897 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
898 mlxsw_sp_acl_rule_ht_params);
899 ops->rule_del(mlxsw_sp, rule->priv);
900 }
901
mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule,struct mlxsw_afa_block * afa_block)902 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
903 struct mlxsw_sp_acl_rule *rule,
904 struct mlxsw_afa_block *afa_block)
905 {
906 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
907 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
908 struct mlxsw_sp_acl_rule_info *rulei;
909
910 rulei = mlxsw_sp_acl_rule_rulei(rule);
911 rulei->act_block = afa_block;
912
913 return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
914 }
915
916 struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,unsigned long cookie)917 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
918 struct mlxsw_sp_acl_ruleset *ruleset,
919 unsigned long cookie)
920 {
921 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
922 mlxsw_sp_acl_rule_ht_params);
923 }
924
925 struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule * rule)926 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
927 {
928 return rule->rulei;
929 }
930
mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)931 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
932 struct mlxsw_sp_acl_rule *rule)
933 {
934 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
935 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
936 bool active;
937 int err;
938
939 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
940 if (err)
941 return err;
942 if (active)
943 rule->last_used = jiffies;
944 return 0;
945 }
946
mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl * acl)947 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
948 {
949 struct mlxsw_sp_acl_rule *rule;
950 int err;
951
952 mutex_lock(&acl->rules_lock);
953 list_for_each_entry(rule, &acl->rules, list) {
954 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
955 rule);
956 if (err)
957 goto err_rule_update;
958 }
959 mutex_unlock(&acl->rules_lock);
960 return 0;
961
962 err_rule_update:
963 mutex_unlock(&acl->rules_lock);
964 return err;
965 }
966
mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl * acl)967 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
968 {
969 unsigned long interval = acl->rule_activity_update.interval;
970
971 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
972 msecs_to_jiffies(interval));
973 }
974
mlxsw_sp_acl_rule_activity_update_work(struct work_struct * work)975 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
976 {
977 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
978 rule_activity_update.dw.work);
979 int err;
980
981 err = mlxsw_sp_acl_rules_activity_update(acl);
982 if (err)
983 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
984
985 mlxsw_sp_acl_rule_activity_work_schedule(acl);
986 }
987
mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule,u64 * packets,u64 * bytes,u64 * drops,u64 * last_use,enum flow_action_hw_stats * used_hw_stats)988 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
989 struct mlxsw_sp_acl_rule *rule,
990 u64 *packets, u64 *bytes, u64 *drops,
991 u64 *last_use,
992 enum flow_action_hw_stats *used_hw_stats)
993
994 {
995 enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
996 struct mlxsw_sp_acl_rule_info *rulei;
997 u64 current_packets = 0;
998 u64 current_bytes = 0;
999 u64 current_drops = 0;
1000 int err;
1001
1002 rulei = mlxsw_sp_acl_rule_rulei(rule);
1003 if (rulei->counter_valid) {
1004 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
1005 ¤t_packets,
1006 ¤t_bytes);
1007 if (err)
1008 return err;
1009 *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
1010 }
1011 if (rulei->policer_index_valid) {
1012 err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
1013 rulei->policer_index,
1014 ¤t_drops);
1015 if (err)
1016 return err;
1017 }
1018 *packets = current_packets - rule->last_packets;
1019 *bytes = current_bytes - rule->last_bytes;
1020 *drops = current_drops - rule->last_drops;
1021 *last_use = rule->last_used;
1022
1023 rule->last_bytes = current_bytes;
1024 rule->last_packets = current_packets;
1025 rule->last_drops = current_drops;
1026
1027 return 0;
1028 }
1029
mlxsw_sp_acl_init(struct mlxsw_sp * mlxsw_sp)1030 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
1031 {
1032 struct mlxsw_sp_fid *fid;
1033 struct mlxsw_sp_acl *acl;
1034 size_t alloc_size;
1035 int err;
1036
1037 alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
1038 acl = kzalloc(alloc_size, GFP_KERNEL);
1039 if (!acl)
1040 return -ENOMEM;
1041 mlxsw_sp->acl = acl;
1042 acl->mlxsw_sp = mlxsw_sp;
1043 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
1044 ACL_FLEX_KEYS),
1045 mlxsw_sp->afk_ops);
1046 if (!acl->afk) {
1047 err = -ENOMEM;
1048 goto err_afk_create;
1049 }
1050
1051 err = rhashtable_init(&acl->ruleset_ht,
1052 &mlxsw_sp_acl_ruleset_ht_params);
1053 if (err)
1054 goto err_rhashtable_init;
1055
1056 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
1057 if (IS_ERR(fid)) {
1058 err = PTR_ERR(fid);
1059 goto err_fid_get;
1060 }
1061 acl->dummy_fid = fid;
1062
1063 INIT_LIST_HEAD(&acl->rules);
1064 mutex_init(&acl->rules_lock);
1065 err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
1066 if (err)
1067 goto err_acl_ops_init;
1068
1069 /* Create the delayed work for the rule activity_update */
1070 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
1071 mlxsw_sp_acl_rule_activity_update_work);
1072 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
1073 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
1074 return 0;
1075
1076 err_acl_ops_init:
1077 mutex_destroy(&acl->rules_lock);
1078 mlxsw_sp_fid_put(fid);
1079 err_fid_get:
1080 rhashtable_destroy(&acl->ruleset_ht);
1081 err_rhashtable_init:
1082 mlxsw_afk_destroy(acl->afk);
1083 err_afk_create:
1084 kfree(acl);
1085 return err;
1086 }
1087
mlxsw_sp_acl_fini(struct mlxsw_sp * mlxsw_sp)1088 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1089 {
1090 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1091
1092 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
1093 mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
1094 mutex_destroy(&acl->rules_lock);
1095 WARN_ON(!list_empty(&acl->rules));
1096 mlxsw_sp_fid_put(acl->dummy_fid);
1097 rhashtable_destroy(&acl->ruleset_ht);
1098 mlxsw_afk_destroy(acl->afk);
1099 kfree(acl);
1100 }
1101
mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp * mlxsw_sp)1102 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
1103 {
1104 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1105
1106 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
1107 &acl->tcam);
1108 }
1109
mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp * mlxsw_sp,u32 val)1110 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
1111 {
1112 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1113
1114 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
1115 &acl->tcam, val);
1116 }
1117
1118 struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1119 .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1120 };
1121
1122 struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1123 .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1124 };
1125