1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
13
14 #include "reg.h"
15 #include "core.h"
16 #include "resources.h"
17 #include "spectrum.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
20
mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp * mlxsw_sp)21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
22 {
23 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
24
25 return ops->priv_size;
26 }
27
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31
mlxsw_sp_acl_tcam_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
33 struct mlxsw_sp_acl_tcam *tcam)
34 {
35 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
36 u64 max_tcam_regions;
37 u64 max_regions;
38 u64 max_groups;
39 int err;
40
41 mutex_init(&tcam->lock);
42 tcam->vregion_rehash_intrvl =
43 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
44 INIT_LIST_HEAD(&tcam->vregion_list);
45
46 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
47 ACL_MAX_TCAM_REGIONS);
48 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
49
50 /* Use 1:1 mapping between ACL region and TCAM region */
51 if (max_tcam_regions < max_regions)
52 max_regions = max_tcam_regions;
53
54 tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
55 if (!tcam->used_regions)
56 return -ENOMEM;
57 tcam->max_regions = max_regions;
58
59 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
60 tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
61 if (!tcam->used_groups) {
62 err = -ENOMEM;
63 goto err_alloc_used_groups;
64 }
65 tcam->max_groups = max_groups;
66 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
67 ACL_MAX_GROUP_SIZE);
68
69 err = ops->init(mlxsw_sp, tcam->priv, tcam);
70 if (err)
71 goto err_tcam_init;
72
73 return 0;
74
75 err_tcam_init:
76 bitmap_free(tcam->used_groups);
77 err_alloc_used_groups:
78 bitmap_free(tcam->used_regions);
79 return err;
80 }
81
mlxsw_sp_acl_tcam_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)82 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
83 struct mlxsw_sp_acl_tcam *tcam)
84 {
85 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
86
87 mutex_destroy(&tcam->lock);
88 ops->fini(mlxsw_sp, tcam->priv);
89 bitmap_free(tcam->used_groups);
90 bitmap_free(tcam->used_regions);
91 }
92
mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 * priority,bool fillup_priority)93 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
94 struct mlxsw_sp_acl_rule_info *rulei,
95 u32 *priority, bool fillup_priority)
96 {
97 u64 max_priority;
98
99 if (!fillup_priority) {
100 *priority = 0;
101 return 0;
102 }
103
104 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
105 return -EIO;
106
107 /* Priority range is 1..cap_kvd_size-1. */
108 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
109 if (rulei->priority >= max_priority)
110 return -EINVAL;
111
112 /* Unlike in TC, in HW, higher number means higher priority. */
113 *priority = max_priority - rulei->priority;
114 return 0;
115 }
116
mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam * tcam,u16 * p_id)117 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
118 u16 *p_id)
119 {
120 u16 id;
121
122 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
123 if (id < tcam->max_regions) {
124 __set_bit(id, tcam->used_regions);
125 *p_id = id;
126 return 0;
127 }
128 return -ENOBUFS;
129 }
130
mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam * tcam,u16 id)131 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
132 u16 id)
133 {
134 __clear_bit(id, tcam->used_regions);
135 }
136
mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam * tcam,u16 * p_id)137 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
138 u16 *p_id)
139 {
140 u16 id;
141
142 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
143 if (id < tcam->max_groups) {
144 __set_bit(id, tcam->used_groups);
145 *p_id = id;
146 return 0;
147 }
148 return -ENOBUFS;
149 }
150
mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam * tcam,u16 id)151 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
152 u16 id)
153 {
154 __clear_bit(id, tcam->used_groups);
155 }
156
157 struct mlxsw_sp_acl_tcam_pattern {
158 const enum mlxsw_afk_element *elements;
159 unsigned int elements_count;
160 };
161
162 struct mlxsw_sp_acl_tcam_group {
163 struct mlxsw_sp_acl_tcam *tcam;
164 u16 id;
165 struct mutex lock; /* guards region list updates */
166 struct list_head region_list;
167 unsigned int region_count;
168 };
169
170 struct mlxsw_sp_acl_tcam_vgroup {
171 struct mlxsw_sp_acl_tcam_group group;
172 struct list_head vregion_list;
173 struct rhashtable vchunk_ht;
174 const struct mlxsw_sp_acl_tcam_pattern *patterns;
175 unsigned int patterns_count;
176 bool tmplt_elusage_set;
177 struct mlxsw_afk_element_usage tmplt_elusage;
178 bool vregion_rehash_enabled;
179 unsigned int *p_min_prio;
180 unsigned int *p_max_prio;
181 };
182
183 struct mlxsw_sp_acl_tcam_rehash_ctx {
184 void *hints_priv;
185 bool this_is_rollback;
186 struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
187 * currently migrated.
188 */
189 struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
190 * migration from in
191 * a vchunk being
192 * currently migrated.
193 */
194 struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
195 * migration at
196 * a vchunk being
197 * currently migrated.
198 */
199 };
200
201 struct mlxsw_sp_acl_tcam_vregion {
202 struct mutex lock; /* Protects consistency of region, region2 pointers
203 * and vchunk_list.
204 */
205 struct mlxsw_sp_acl_tcam_region *region;
206 struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
207 struct list_head list; /* Member of a TCAM group */
208 struct list_head tlist; /* Member of a TCAM */
209 struct list_head vchunk_list; /* List of vchunks under this vregion */
210 struct mlxsw_afk_key_info *key_info;
211 struct mlxsw_sp_acl_tcam *tcam;
212 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
213 struct {
214 struct delayed_work dw;
215 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
216 } rehash;
217 struct mlxsw_sp *mlxsw_sp;
218 unsigned int ref_count;
219 };
220
221 struct mlxsw_sp_acl_tcam_vchunk;
222
223 struct mlxsw_sp_acl_tcam_chunk {
224 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
225 struct mlxsw_sp_acl_tcam_region *region;
226 unsigned long priv[];
227 /* priv has to be always the last item */
228 };
229
230 struct mlxsw_sp_acl_tcam_vchunk {
231 struct mlxsw_sp_acl_tcam_chunk *chunk;
232 struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
233 struct list_head list; /* Member of a TCAM vregion */
234 struct rhash_head ht_node; /* Member of a chunk HT */
235 struct list_head ventry_list;
236 unsigned int priority; /* Priority within the vregion and group */
237 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
238 struct mlxsw_sp_acl_tcam_vregion *vregion;
239 unsigned int ref_count;
240 };
241
242 struct mlxsw_sp_acl_tcam_entry {
243 struct mlxsw_sp_acl_tcam_ventry *ventry;
244 struct mlxsw_sp_acl_tcam_chunk *chunk;
245 unsigned long priv[];
246 /* priv has to be always the last item */
247 };
248
249 struct mlxsw_sp_acl_tcam_ventry {
250 struct mlxsw_sp_acl_tcam_entry *entry;
251 struct list_head list; /* Member of a TCAM vchunk */
252 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
253 struct mlxsw_sp_acl_rule_info *rulei;
254 };
255
256 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
257 .key_len = sizeof(unsigned int),
258 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
259 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
260 .automatic_shrinking = true,
261 };
262
mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group)263 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
264 struct mlxsw_sp_acl_tcam_group *group)
265 {
266 struct mlxsw_sp_acl_tcam_region *region;
267 char pagt_pl[MLXSW_REG_PAGT_LEN];
268 int acl_index = 0;
269
270 mlxsw_reg_pagt_pack(pagt_pl, group->id);
271 list_for_each_entry(region, &group->region_list, list) {
272 bool multi = false;
273
274 /* Check if the next entry in the list has the same vregion. */
275 if (region->list.next != &group->region_list &&
276 list_next_entry(region, list)->vregion == region->vregion)
277 multi = true;
278 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
279 region->id, multi);
280 }
281 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
282 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
283 }
284
285 static int
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_group * group)286 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
287 struct mlxsw_sp_acl_tcam_group *group)
288 {
289 int err;
290
291 group->tcam = tcam;
292 INIT_LIST_HEAD(&group->region_list);
293
294 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
295 if (err)
296 return err;
297
298 mutex_init(&group->lock);
299
300 return 0;
301 }
302
mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group * group)303 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
304 {
305 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
306
307 mutex_destroy(&group->lock);
308 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
309 WARN_ON(!list_empty(&group->region_list));
310 }
311
312 static int
mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_vgroup * vgroup,const struct mlxsw_sp_acl_tcam_pattern * patterns,unsigned int patterns_count,struct mlxsw_afk_element_usage * tmplt_elusage,bool vregion_rehash_enabled,unsigned int * p_min_prio,unsigned int * p_max_prio)313 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
314 struct mlxsw_sp_acl_tcam *tcam,
315 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
316 const struct mlxsw_sp_acl_tcam_pattern *patterns,
317 unsigned int patterns_count,
318 struct mlxsw_afk_element_usage *tmplt_elusage,
319 bool vregion_rehash_enabled,
320 unsigned int *p_min_prio,
321 unsigned int *p_max_prio)
322 {
323 int err;
324
325 vgroup->patterns = patterns;
326 vgroup->patterns_count = patterns_count;
327 vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
328 vgroup->p_min_prio = p_min_prio;
329 vgroup->p_max_prio = p_max_prio;
330
331 if (tmplt_elusage) {
332 vgroup->tmplt_elusage_set = true;
333 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
334 sizeof(vgroup->tmplt_elusage));
335 }
336 INIT_LIST_HEAD(&vgroup->vregion_list);
337
338 err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
339 if (err)
340 return err;
341
342 err = rhashtable_init(&vgroup->vchunk_ht,
343 &mlxsw_sp_acl_tcam_vchunk_ht_params);
344 if (err)
345 goto err_rhashtable_init;
346
347 return 0;
348
349 err_rhashtable_init:
350 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
351 return err;
352 }
353
354 static void
mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup * vgroup)355 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
356 {
357 rhashtable_destroy(&vgroup->vchunk_ht);
358 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
359 WARN_ON(!list_empty(&vgroup->vregion_list));
360 }
361
362 static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)363 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp_acl_tcam_group *group,
365 struct mlxsw_sp_port *mlxsw_sp_port,
366 bool ingress)
367 {
368 char ppbt_pl[MLXSW_REG_PPBT_LEN];
369
370 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
371 MLXSW_REG_PXBT_E_EACL,
372 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
373 group->id);
374 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
375 }
376
377 static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)378 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
379 struct mlxsw_sp_acl_tcam_group *group,
380 struct mlxsw_sp_port *mlxsw_sp_port,
381 bool ingress)
382 {
383 char ppbt_pl[MLXSW_REG_PPBT_LEN];
384
385 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
386 MLXSW_REG_PXBT_E_EACL,
387 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
388 group->id);
389 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
390 }
391
392 static u16
mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group * group)393 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
394 {
395 return group->id;
396 }
397
398 static unsigned int
mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion * vregion)399 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
400 {
401 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
402
403 if (list_empty(&vregion->vchunk_list))
404 return 0;
405 /* As a priority of a vregion, return priority of the first vchunk */
406 vchunk = list_first_entry(&vregion->vchunk_list,
407 typeof(*vchunk), list);
408 return vchunk->priority;
409 }
410
411 static unsigned int
mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion * vregion)412 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
413 {
414 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
415
416 if (list_empty(&vregion->vchunk_list))
417 return 0;
418 vchunk = list_last_entry(&vregion->vchunk_list,
419 typeof(*vchunk), list);
420 return vchunk->priority;
421 }
422
423 static void
mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup * vgroup)424 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
425 {
426 struct mlxsw_sp_acl_tcam_vregion *vregion;
427
428 if (list_empty(&vgroup->vregion_list))
429 return;
430 vregion = list_first_entry(&vgroup->vregion_list,
431 typeof(*vregion), list);
432 *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
433 vregion = list_last_entry(&vgroup->vregion_list,
434 typeof(*vregion), list);
435 *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
436 }
437
438 static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_acl_tcam_region * region,unsigned int priority,struct mlxsw_sp_acl_tcam_region * next_region)439 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
440 struct mlxsw_sp_acl_tcam_group *group,
441 struct mlxsw_sp_acl_tcam_region *region,
442 unsigned int priority,
443 struct mlxsw_sp_acl_tcam_region *next_region)
444 {
445 struct mlxsw_sp_acl_tcam_region *region2;
446 struct list_head *pos;
447 int err;
448
449 mutex_lock(&group->lock);
450 if (group->region_count == group->tcam->max_group_size) {
451 err = -ENOBUFS;
452 goto err_region_count_check;
453 }
454
455 if (next_region) {
456 /* If the next region is defined, place the new one
457 * before it. The next one is a sibling.
458 */
459 pos = &next_region->list;
460 } else {
461 /* Position the region inside the list according to priority */
462 list_for_each(pos, &group->region_list) {
463 region2 = list_entry(pos, typeof(*region2), list);
464 if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
465 priority)
466 break;
467 }
468 }
469 list_add_tail(®ion->list, pos);
470 region->group = group;
471
472 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
473 if (err)
474 goto err_group_update;
475
476 group->region_count++;
477 mutex_unlock(&group->lock);
478 return 0;
479
480 err_group_update:
481 list_del(®ion->list);
482 err_region_count_check:
483 mutex_unlock(&group->lock);
484 return err;
485 }
486
487 static void
mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)488 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
489 struct mlxsw_sp_acl_tcam_region *region)
490 {
491 struct mlxsw_sp_acl_tcam_group *group = region->group;
492
493 mutex_lock(&group->lock);
494 list_del(®ion->list);
495 group->region_count--;
496 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
497 mutex_unlock(&group->lock);
498 }
499
500 static int
mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_sp_acl_tcam_vregion * vregion,unsigned int priority)501 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
502 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
503 struct mlxsw_sp_acl_tcam_vregion *vregion,
504 unsigned int priority)
505 {
506 struct mlxsw_sp_acl_tcam_vregion *vregion2;
507 struct list_head *pos;
508 int err;
509
510 /* Position the vregion inside the list according to priority */
511 list_for_each(pos, &vgroup->vregion_list) {
512 vregion2 = list_entry(pos, typeof(*vregion2), list);
513 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
514 break;
515 }
516 list_add_tail(&vregion->list, pos);
517
518 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
519 vregion->region,
520 priority, NULL);
521 if (err)
522 goto err_region_attach;
523
524 return 0;
525
526 err_region_attach:
527 list_del(&vregion->list);
528 return err;
529 }
530
531 static void
mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)532 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
533 struct mlxsw_sp_acl_tcam_vregion *vregion)
534 {
535 list_del(&vregion->list);
536 if (vregion->region2)
537 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
538 vregion->region2);
539 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
540 }
541
542 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage,bool * p_need_split)543 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
544 unsigned int priority,
545 struct mlxsw_afk_element_usage *elusage,
546 bool *p_need_split)
547 {
548 struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
549 struct list_head *pos;
550 bool issubset;
551
552 list_for_each(pos, &vgroup->vregion_list) {
553 vregion = list_entry(pos, typeof(*vregion), list);
554
555 /* First, check if the requested priority does not rather belong
556 * under some of the next vregions.
557 */
558 if (pos->next != &vgroup->vregion_list) { /* not last */
559 vregion2 = list_entry(pos->next, typeof(*vregion2),
560 list);
561 if (priority >=
562 mlxsw_sp_acl_tcam_vregion_prio(vregion2))
563 continue;
564 }
565
566 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
567 elusage);
568
569 /* If requested element usage would not fit and the priority
570 * is lower than the currently inspected vregion we cannot
571 * use this region, so return NULL to indicate new vregion has
572 * to be created.
573 */
574 if (!issubset &&
575 priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
576 return NULL;
577
578 /* If requested element usage would not fit and the priority
579 * is higher than the currently inspected vregion we cannot
580 * use this vregion. There is still some hope that the next
581 * vregion would be the fit. So let it be processed and
582 * eventually break at the check right above this.
583 */
584 if (!issubset &&
585 priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
586 continue;
587
588 /* Indicate if the vregion needs to be split in order to add
589 * the requested priority. Split is needed when requested
590 * element usage won't fit into the found vregion.
591 */
592 *p_need_split = !issubset;
593 return vregion;
594 }
595 return NULL; /* New vregion has to be created. */
596 }
597
598 static void
mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_afk_element_usage * elusage,struct mlxsw_afk_element_usage * out)599 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
600 struct mlxsw_afk_element_usage *elusage,
601 struct mlxsw_afk_element_usage *out)
602 {
603 const struct mlxsw_sp_acl_tcam_pattern *pattern;
604 int i;
605
606 /* In case the template is set, we don't have to look up the pattern
607 * and just use the template.
608 */
609 if (vgroup->tmplt_elusage_set) {
610 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
611 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
612 return;
613 }
614
615 for (i = 0; i < vgroup->patterns_count; i++) {
616 pattern = &vgroup->patterns[i];
617 mlxsw_afk_element_usage_fill(out, pattern->elements,
618 pattern->elements_count);
619 if (mlxsw_afk_element_usage_subset(elusage, out))
620 return;
621 }
622 memcpy(out, elusage, sizeof(*out));
623 }
624
625 static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)626 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
627 struct mlxsw_sp_acl_tcam_region *region)
628 {
629 struct mlxsw_afk_key_info *key_info = region->key_info;
630 char ptar_pl[MLXSW_REG_PTAR_LEN];
631 unsigned int encodings_count;
632 int i;
633 int err;
634
635 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
636 region->key_type,
637 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
638 region->id, region->tcam_region_info);
639 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
640 for (i = 0; i < encodings_count; i++) {
641 u16 encoding;
642
643 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
644 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
645 }
646 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
647 if (err)
648 return err;
649 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
650 return 0;
651 }
652
653 static void
mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)654 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
655 struct mlxsw_sp_acl_tcam_region *region)
656 {
657 char ptar_pl[MLXSW_REG_PTAR_LEN];
658
659 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
660 region->key_type, 0, region->id,
661 region->tcam_region_info);
662 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
663 }
664
665 static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)666 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
667 struct mlxsw_sp_acl_tcam_region *region)
668 {
669 char pacl_pl[MLXSW_REG_PACL_LEN];
670
671 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
672 region->tcam_region_info);
673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
674 }
675
676 static void
mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)677 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
678 struct mlxsw_sp_acl_tcam_region *region)
679 {
680 char pacl_pl[MLXSW_REG_PACL_LEN];
681
682 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
683 region->tcam_region_info);
684 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
685 }
686
687 static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_vregion * vregion,void * hints_priv)688 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
689 struct mlxsw_sp_acl_tcam *tcam,
690 struct mlxsw_sp_acl_tcam_vregion *vregion,
691 void *hints_priv)
692 {
693 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
694 struct mlxsw_sp_acl_tcam_region *region;
695 int err;
696
697 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
698 if (!region)
699 return ERR_PTR(-ENOMEM);
700 region->mlxsw_sp = mlxsw_sp;
701 region->vregion = vregion;
702 region->key_info = vregion->key_info;
703
704 err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id);
705 if (err)
706 goto err_region_id_get;
707
708 err = ops->region_associate(mlxsw_sp, region);
709 if (err)
710 goto err_tcam_region_associate;
711
712 region->key_type = ops->key_type;
713 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
714 if (err)
715 goto err_tcam_region_alloc;
716
717 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
718 if (err)
719 goto err_tcam_region_enable;
720
721 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
722 region, hints_priv);
723 if (err)
724 goto err_tcam_region_init;
725
726 return region;
727
728 err_tcam_region_init:
729 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
730 err_tcam_region_enable:
731 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
732 err_tcam_region_alloc:
733 err_tcam_region_associate:
734 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
735 err_region_id_get:
736 kfree(region);
737 return ERR_PTR(err);
738 }
739
740 static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)741 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
742 struct mlxsw_sp_acl_tcam_region *region)
743 {
744 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
745
746 ops->region_fini(mlxsw_sp, region->priv);
747 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
748 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
749 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
750 region->id);
751 kfree(region);
752 }
753
754 static void
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion * vregion)755 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
756 {
757 unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
758
759 if (!interval)
760 return;
761 mlxsw_core_schedule_dw(&vregion->rehash.dw,
762 msecs_to_jiffies(interval));
763 }
764
765 static void
766 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
767 struct mlxsw_sp_acl_tcam_vregion *vregion,
768 int *credits);
769
mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct * work)770 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
771 {
772 struct mlxsw_sp_acl_tcam_vregion *vregion =
773 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
774 rehash.dw.work);
775 int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
776
777 mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
778 if (credits < 0)
779 /* Rehash gone out of credits so it was interrupted.
780 * Schedule the work as soon as possible to continue.
781 */
782 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
783 else
784 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
785 }
786
787 static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk * vchunk)788 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
789 {
790 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
791
792 /* If a rule was added or deleted from vchunk which is currently
793 * under rehash migration, we have to reset the ventry pointers
794 * to make sure all rules are properly migrated.
795 */
796 if (vregion->rehash.ctx.current_vchunk == vchunk) {
797 vregion->rehash.ctx.start_ventry = NULL;
798 vregion->rehash.ctx.stop_ventry = NULL;
799 }
800 }
801
802 static void
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion * vregion)803 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
804 {
805 /* If a chunk was added or deleted from vregion we have to reset
806 * the current chunk pointer to make sure all chunks
807 * are properly migrated.
808 */
809 vregion->rehash.ctx.current_vchunk = NULL;
810 }
811
812 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)813 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
814 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
815 unsigned int priority,
816 struct mlxsw_afk_element_usage *elusage)
817 {
818 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
819 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
820 struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
821 struct mlxsw_sp_acl_tcam_vregion *vregion;
822 int err;
823
824 vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
825 if (!vregion)
826 return ERR_PTR(-ENOMEM);
827 INIT_LIST_HEAD(&vregion->vchunk_list);
828 mutex_init(&vregion->lock);
829 vregion->tcam = tcam;
830 vregion->mlxsw_sp = mlxsw_sp;
831 vregion->vgroup = vgroup;
832 vregion->ref_count = 1;
833
834 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
835 if (IS_ERR(vregion->key_info)) {
836 err = PTR_ERR(vregion->key_info);
837 goto err_key_info_get;
838 }
839
840 vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
841 vregion, NULL);
842 if (IS_ERR(vregion->region)) {
843 err = PTR_ERR(vregion->region);
844 goto err_region_create;
845 }
846
847 err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
848 priority);
849 if (err)
850 goto err_vgroup_vregion_attach;
851
852 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
853 /* Create the delayed work for vregion periodic rehash */
854 INIT_DELAYED_WORK(&vregion->rehash.dw,
855 mlxsw_sp_acl_tcam_vregion_rehash_work);
856 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
857 mutex_lock(&tcam->lock);
858 list_add_tail(&vregion->tlist, &tcam->vregion_list);
859 mutex_unlock(&tcam->lock);
860 }
861
862 return vregion;
863
864 err_vgroup_vregion_attach:
865 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
866 err_region_create:
867 mlxsw_afk_key_info_put(vregion->key_info);
868 err_key_info_get:
869 kfree(vregion);
870 return ERR_PTR(err);
871 }
872
873 static void
mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)874 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
875 struct mlxsw_sp_acl_tcam_vregion *vregion)
876 {
877 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
878 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
879 struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
880
881 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
882 mutex_lock(&tcam->lock);
883 list_del(&vregion->tlist);
884 mutex_unlock(&tcam->lock);
885 cancel_delayed_work_sync(&vregion->rehash.dw);
886 }
887 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
888 if (vregion->region2)
889 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
890 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
891 mlxsw_afk_key_info_put(vregion->key_info);
892 mutex_destroy(&vregion->lock);
893 kfree(vregion);
894 }
895
mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)896 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
897 struct mlxsw_sp_acl_tcam *tcam)
898 {
899 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
900 u32 vregion_rehash_intrvl;
901
902 if (WARN_ON(!ops->region_rehash_hints_get))
903 return 0;
904 vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
905 return vregion_rehash_intrvl;
906 }
907
mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,u32 val)908 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
909 struct mlxsw_sp_acl_tcam *tcam,
910 u32 val)
911 {
912 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
913 struct mlxsw_sp_acl_tcam_vregion *vregion;
914
915 if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
916 return -EINVAL;
917 if (WARN_ON(!ops->region_rehash_hints_get))
918 return -EOPNOTSUPP;
919 tcam->vregion_rehash_intrvl = val;
920 mutex_lock(&tcam->lock);
921 list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
922 if (val)
923 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
924 else
925 cancel_delayed_work_sync(&vregion->rehash.dw);
926 }
927 mutex_unlock(&tcam->lock);
928 return 0;
929 }
930
931 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)932 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
933 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
934 unsigned int priority,
935 struct mlxsw_afk_element_usage *elusage)
936 {
937 struct mlxsw_afk_element_usage vregion_elusage;
938 struct mlxsw_sp_acl_tcam_vregion *vregion;
939 bool need_split;
940
941 vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
942 elusage, &need_split);
943 if (vregion) {
944 if (need_split) {
945 /* According to priority, new vchunk should belong to
946 * an existing vregion. However, this vchunk needs
947 * elements that vregion does not contain. We need
948 * to split the existing vregion into two and create
949 * a new vregion for the new vchunk in between.
950 * This is not supported now.
951 */
952 return ERR_PTR(-EOPNOTSUPP);
953 }
954 vregion->ref_count++;
955 return vregion;
956 }
957
958 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
959 &vregion_elusage);
960
961 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
962 &vregion_elusage);
963 }
964
965 static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)966 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
967 struct mlxsw_sp_acl_tcam_vregion *vregion)
968 {
969 if (--vregion->ref_count)
970 return;
971 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
972 }
973
974 static struct mlxsw_sp_acl_tcam_chunk *
mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region)975 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
976 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
977 struct mlxsw_sp_acl_tcam_region *region)
978 {
979 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
980 struct mlxsw_sp_acl_tcam_chunk *chunk;
981
982 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
983 if (!chunk)
984 return ERR_PTR(-ENOMEM);
985 chunk->vchunk = vchunk;
986 chunk->region = region;
987
988 ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
989 return chunk;
990 }
991
992 static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_chunk * chunk)993 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
994 struct mlxsw_sp_acl_tcam_chunk *chunk)
995 {
996 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
997
998 ops->chunk_fini(chunk->priv);
999 kfree(chunk);
1000 }
1001
1002 static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)1003 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1004 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1005 unsigned int priority,
1006 struct mlxsw_afk_element_usage *elusage)
1007 {
1008 struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1009 struct mlxsw_sp_acl_tcam_vregion *vregion;
1010 struct list_head *pos;
1011 int err;
1012
1013 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1014 return ERR_PTR(-EINVAL);
1015
1016 vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
1017 if (!vchunk)
1018 return ERR_PTR(-ENOMEM);
1019 INIT_LIST_HEAD(&vchunk->ventry_list);
1020 vchunk->priority = priority;
1021 vchunk->vgroup = vgroup;
1022 vchunk->ref_count = 1;
1023
1024 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1025 priority, elusage);
1026 if (IS_ERR(vregion)) {
1027 err = PTR_ERR(vregion);
1028 goto err_vregion_get;
1029 }
1030
1031 vchunk->vregion = vregion;
1032
1033 err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1034 mlxsw_sp_acl_tcam_vchunk_ht_params);
1035 if (err)
1036 goto err_rhashtable_insert;
1037
1038 mutex_lock(&vregion->lock);
1039 vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1040 vchunk->vregion->region);
1041 if (IS_ERR(vchunk->chunk)) {
1042 mutex_unlock(&vregion->lock);
1043 err = PTR_ERR(vchunk->chunk);
1044 goto err_chunk_create;
1045 }
1046
1047 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1048
1049 /* Position the vchunk inside the list according to priority */
1050 list_for_each(pos, &vregion->vchunk_list) {
1051 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1052 if (vchunk2->priority > priority)
1053 break;
1054 }
1055 list_add_tail(&vchunk->list, pos);
1056 mutex_unlock(&vregion->lock);
1057 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1058
1059 return vchunk;
1060
1061 err_chunk_create:
1062 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1063 mlxsw_sp_acl_tcam_vchunk_ht_params);
1064 err_rhashtable_insert:
1065 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1066 err_vregion_get:
1067 kfree(vchunk);
1068 return ERR_PTR(err);
1069 }
1070
1071 static void
mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk)1072 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1073 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1074 {
1075 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1076 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1077
1078 mutex_lock(&vregion->lock);
1079 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1080 list_del(&vchunk->list);
1081 if (vchunk->chunk2)
1082 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1083 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1084 mutex_unlock(&vregion->lock);
1085 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1086 mlxsw_sp_acl_tcam_vchunk_ht_params);
1087 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1088 kfree(vchunk);
1089 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1090 }
1091
1092 static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)1093 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1094 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1095 unsigned int priority,
1096 struct mlxsw_afk_element_usage *elusage)
1097 {
1098 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1099
1100 vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1101 mlxsw_sp_acl_tcam_vchunk_ht_params);
1102 if (vchunk) {
1103 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1104 elusage)))
1105 return ERR_PTR(-EINVAL);
1106 vchunk->ref_count++;
1107 return vchunk;
1108 }
1109 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1110 priority, elusage);
1111 }
1112
1113 static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk)1114 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1115 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1116 {
1117 if (--vchunk->ref_count)
1118 return;
1119 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1120 }
1121
1122 static struct mlxsw_sp_acl_tcam_entry *
mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_tcam_chunk * chunk)1123 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_acl_tcam_ventry *ventry,
1125 struct mlxsw_sp_acl_tcam_chunk *chunk)
1126 {
1127 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1128 struct mlxsw_sp_acl_tcam_entry *entry;
1129 int err;
1130
1131 entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1132 if (!entry)
1133 return ERR_PTR(-ENOMEM);
1134 entry->ventry = ventry;
1135 entry->chunk = chunk;
1136
1137 err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1138 entry->priv, ventry->rulei);
1139 if (err)
1140 goto err_entry_add;
1141
1142 return entry;
1143
1144 err_entry_add:
1145 kfree(entry);
1146 return ERR_PTR(err);
1147 }
1148
mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_entry * entry)1149 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1150 struct mlxsw_sp_acl_tcam_entry *entry)
1151 {
1152 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1153
1154 ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1155 entry->chunk->priv, entry->priv);
1156 kfree(entry);
1157 }
1158
1159 static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_entry * entry,struct mlxsw_sp_acl_rule_info * rulei)1160 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1161 struct mlxsw_sp_acl_tcam_region *region,
1162 struct mlxsw_sp_acl_tcam_entry *entry,
1163 struct mlxsw_sp_acl_rule_info *rulei)
1164 {
1165 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1166
1167 return ops->entry_action_replace(mlxsw_sp, region->priv,
1168 entry->priv, rulei);
1169 }
1170
1171 static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_entry * entry,bool * activity)1172 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1173 struct mlxsw_sp_acl_tcam_entry *entry,
1174 bool *activity)
1175 {
1176 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1177
1178 return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1179 entry->priv, activity);
1180 }
1181
mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_rule_info * rulei)1182 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1183 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1184 struct mlxsw_sp_acl_tcam_ventry *ventry,
1185 struct mlxsw_sp_acl_rule_info *rulei)
1186 {
1187 struct mlxsw_sp_acl_tcam_vregion *vregion;
1188 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1189 int err;
1190
1191 vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1192 &rulei->values.elusage);
1193 if (IS_ERR(vchunk))
1194 return PTR_ERR(vchunk);
1195
1196 ventry->vchunk = vchunk;
1197 ventry->rulei = rulei;
1198 vregion = vchunk->vregion;
1199
1200 mutex_lock(&vregion->lock);
1201 ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1202 vchunk->chunk);
1203 if (IS_ERR(ventry->entry)) {
1204 mutex_unlock(&vregion->lock);
1205 err = PTR_ERR(ventry->entry);
1206 goto err_entry_create;
1207 }
1208
1209 list_add_tail(&ventry->list, &vchunk->ventry_list);
1210 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1211 mutex_unlock(&vregion->lock);
1212
1213 return 0;
1214
1215 err_entry_create:
1216 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1217 return err;
1218 }
1219
mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry)1220 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1221 struct mlxsw_sp_acl_tcam_ventry *ventry)
1222 {
1223 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1224 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1225
1226 mutex_lock(&vregion->lock);
1227 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1228 list_del(&ventry->list);
1229 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1230 mutex_unlock(&vregion->lock);
1231 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1232 }
1233
1234 static int
mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_rule_info * rulei)1235 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1236 struct mlxsw_sp_acl_tcam_ventry *ventry,
1237 struct mlxsw_sp_acl_rule_info *rulei)
1238 {
1239 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1240
1241 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1242 vchunk->vregion->region,
1243 ventry->entry, rulei);
1244 }
1245
1246 static int
mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,bool * activity)1247 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1248 struct mlxsw_sp_acl_tcam_ventry *ventry,
1249 bool *activity)
1250 {
1251 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1252 ventry->entry, activity);
1253 }
1254
1255 static int
mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_tcam_chunk * chunk,int * credits)1256 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1257 struct mlxsw_sp_acl_tcam_ventry *ventry,
1258 struct mlxsw_sp_acl_tcam_chunk *chunk,
1259 int *credits)
1260 {
1261 struct mlxsw_sp_acl_tcam_entry *new_entry;
1262
1263 /* First check if the entry is not already where we want it to be. */
1264 if (ventry->entry->chunk == chunk)
1265 return 0;
1266
1267 if (--(*credits) < 0)
1268 return 0;
1269
1270 new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1271 if (IS_ERR(new_entry))
1272 return PTR_ERR(new_entry);
1273 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1274 ventry->entry = new_entry;
1275 return 0;
1276 }
1277
1278 static int
mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1279 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1280 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1281 struct mlxsw_sp_acl_tcam_region *region,
1282 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1283 {
1284 struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1285
1286 new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1287 if (IS_ERR(new_chunk))
1288 return PTR_ERR(new_chunk);
1289 vchunk->chunk2 = vchunk->chunk;
1290 vchunk->chunk = new_chunk;
1291 ctx->current_vchunk = vchunk;
1292 ctx->start_ventry = NULL;
1293 ctx->stop_ventry = NULL;
1294 return 0;
1295 }
1296
1297 static void
mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1298 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1299 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1300 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1301 {
1302 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1303 vchunk->chunk2 = NULL;
1304 ctx->current_vchunk = NULL;
1305 }
1306
1307 static int
mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1308 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1309 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1310 struct mlxsw_sp_acl_tcam_region *region,
1311 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1312 int *credits)
1313 {
1314 struct mlxsw_sp_acl_tcam_ventry *ventry;
1315 int err;
1316
1317 if (vchunk->chunk->region != region) {
1318 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1319 region, ctx);
1320 if (err)
1321 return err;
1322 } else if (!vchunk->chunk2) {
1323 /* The chunk is already as it should be, nothing to do. */
1324 return 0;
1325 }
1326
1327 /* If the migration got interrupted, we have the ventry to start from
1328 * stored in context.
1329 */
1330 if (ctx->start_ventry)
1331 ventry = ctx->start_ventry;
1332 else
1333 ventry = list_first_entry(&vchunk->ventry_list,
1334 typeof(*ventry), list);
1335
1336 list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1337 /* During rollback, once we reach the ventry that failed
1338 * to migrate, we are done.
1339 */
1340 if (ventry == ctx->stop_ventry)
1341 break;
1342
1343 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1344 vchunk->chunk, credits);
1345 if (err) {
1346 if (ctx->this_is_rollback) {
1347 /* Save the ventry which we ended with and try
1348 * to continue later on.
1349 */
1350 ctx->start_ventry = ventry;
1351 return err;
1352 }
1353 /* Swap the chunk and chunk2 pointers so the follow-up
1354 * rollback call will see the original chunk pointer
1355 * in vchunk->chunk.
1356 */
1357 swap(vchunk->chunk, vchunk->chunk2);
1358 /* The rollback has to be done from beginning of the
1359 * chunk, that is why we have to null the start_ventry.
1360 * However, we know where to stop the rollback,
1361 * at the current ventry.
1362 */
1363 ctx->start_ventry = NULL;
1364 ctx->stop_ventry = ventry;
1365 return err;
1366 } else if (*credits < 0) {
1367 /* We are out of credits, the rest of the ventries
1368 * will be migrated later. Save the ventry
1369 * which we ended with.
1370 */
1371 ctx->start_ventry = ventry;
1372 return 0;
1373 }
1374 }
1375
1376 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1377 return 0;
1378 }
1379
1380 static int
mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1381 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1382 struct mlxsw_sp_acl_tcam_vregion *vregion,
1383 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1384 int *credits)
1385 {
1386 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1387 int err;
1388
1389 /* If the migration got interrupted, we have the vchunk
1390 * we are working on stored in context.
1391 */
1392 if (ctx->current_vchunk)
1393 vchunk = ctx->current_vchunk;
1394 else
1395 vchunk = list_first_entry(&vregion->vchunk_list,
1396 typeof(*vchunk), list);
1397
1398 list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1399 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1400 vregion->region,
1401 ctx, credits);
1402 if (err || *credits < 0)
1403 return err;
1404 }
1405 return 0;
1406 }
1407
1408 static int
mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1409 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1410 struct mlxsw_sp_acl_tcam_vregion *vregion,
1411 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1412 int *credits)
1413 {
1414 int err, err2;
1415
1416 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1417 mutex_lock(&vregion->lock);
1418 err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1419 ctx, credits);
1420 if (err) {
1421 /* In case migration was not successful, we need to swap
1422 * so the original region pointer is assigned again
1423 * to vregion->region.
1424 */
1425 swap(vregion->region, vregion->region2);
1426 ctx->current_vchunk = NULL;
1427 ctx->this_is_rollback = true;
1428 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1429 ctx, credits);
1430 if (err2) {
1431 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1432 vregion);
1433 dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1434 /* Let the rollback to be continued later on. */
1435 }
1436 }
1437 mutex_unlock(&vregion->lock);
1438 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1439 return err;
1440 }
1441
1442 static bool
mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1443 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1444 {
1445 return ctx->hints_priv;
1446 }
1447
1448 static int
mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1449 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1450 struct mlxsw_sp_acl_tcam_vregion *vregion,
1451 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1452 {
1453 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1454 unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1455 struct mlxsw_sp_acl_tcam_region *new_region;
1456 void *hints_priv;
1457 int err;
1458
1459 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1460
1461 hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1462 if (IS_ERR(hints_priv))
1463 return PTR_ERR(hints_priv);
1464
1465 new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1466 vregion, hints_priv);
1467 if (IS_ERR(new_region)) {
1468 err = PTR_ERR(new_region);
1469 goto err_region_create;
1470 }
1471
1472 /* vregion->region contains the pointer to the new region
1473 * we are going to migrate to.
1474 */
1475 vregion->region2 = vregion->region;
1476 vregion->region = new_region;
1477 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1478 vregion->region2->group,
1479 new_region, priority,
1480 vregion->region2);
1481 if (err)
1482 goto err_group_region_attach;
1483
1484 ctx->hints_priv = hints_priv;
1485 ctx->this_is_rollback = false;
1486
1487 return 0;
1488
1489 err_group_region_attach:
1490 vregion->region = vregion->region2;
1491 vregion->region2 = NULL;
1492 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1493 err_region_create:
1494 ops->region_rehash_hints_put(hints_priv);
1495 return err;
1496 }
1497
1498 static void
mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1499 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1500 struct mlxsw_sp_acl_tcam_vregion *vregion,
1501 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1502 {
1503 struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1504 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1505
1506 vregion->region2 = NULL;
1507 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1508 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1509 ops->region_rehash_hints_put(ctx->hints_priv);
1510 ctx->hints_priv = NULL;
1511 }
1512
1513 static void
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,int * credits)1514 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1515 struct mlxsw_sp_acl_tcam_vregion *vregion,
1516 int *credits)
1517 {
1518 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1519 int err;
1520
1521 /* Check if the previous rehash work was interrupted
1522 * which means we have to continue it now.
1523 * If not, start a new rehash.
1524 */
1525 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1526 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1527 vregion, ctx);
1528 if (err) {
1529 if (err != -EAGAIN)
1530 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1531 return;
1532 }
1533 }
1534
1535 err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1536 ctx, credits);
1537 if (err) {
1538 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1539 }
1540
1541 if (*credits >= 0)
1542 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1543 }
1544
1545 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1546 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1547 MLXSW_AFK_ELEMENT_DMAC_32_47,
1548 MLXSW_AFK_ELEMENT_DMAC_0_31,
1549 MLXSW_AFK_ELEMENT_SMAC_32_47,
1550 MLXSW_AFK_ELEMENT_SMAC_0_31,
1551 MLXSW_AFK_ELEMENT_ETHERTYPE,
1552 MLXSW_AFK_ELEMENT_IP_PROTO,
1553 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1554 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1555 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1556 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1557 MLXSW_AFK_ELEMENT_VID,
1558 MLXSW_AFK_ELEMENT_PCP,
1559 MLXSW_AFK_ELEMENT_TCP_FLAGS,
1560 MLXSW_AFK_ELEMENT_IP_TTL_,
1561 MLXSW_AFK_ELEMENT_IP_ECN,
1562 MLXSW_AFK_ELEMENT_IP_DSCP,
1563 };
1564
1565 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1566 MLXSW_AFK_ELEMENT_ETHERTYPE,
1567 MLXSW_AFK_ELEMENT_IP_PROTO,
1568 MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1569 MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1570 MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1571 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1572 MLXSW_AFK_ELEMENT_DST_IP_96_127,
1573 MLXSW_AFK_ELEMENT_DST_IP_64_95,
1574 MLXSW_AFK_ELEMENT_DST_IP_32_63,
1575 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1576 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1577 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1578 };
1579
1580 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1581 {
1582 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1583 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1584 },
1585 {
1586 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1587 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1588 },
1589 };
1590
1591 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1592 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1593
1594 struct mlxsw_sp_acl_tcam_flower_ruleset {
1595 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1596 };
1597
1598 struct mlxsw_sp_acl_tcam_flower_rule {
1599 struct mlxsw_sp_acl_tcam_ventry ventry;
1600 };
1601
1602 static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,void * ruleset_priv,struct mlxsw_afk_element_usage * tmplt_elusage,unsigned int * p_min_prio,unsigned int * p_max_prio)1603 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1604 struct mlxsw_sp_acl_tcam *tcam,
1605 void *ruleset_priv,
1606 struct mlxsw_afk_element_usage *tmplt_elusage,
1607 unsigned int *p_min_prio,
1608 unsigned int *p_max_prio)
1609 {
1610 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1611
1612 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1613 mlxsw_sp_acl_tcam_patterns,
1614 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1615 tmplt_elusage, true,
1616 p_min_prio, p_max_prio);
1617 }
1618
1619 static void
mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv)1620 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1621 void *ruleset_priv)
1622 {
1623 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1624
1625 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1626 }
1627
1628 static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1629 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1630 void *ruleset_priv,
1631 struct mlxsw_sp_port *mlxsw_sp_port,
1632 bool ingress)
1633 {
1634 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1635
1636 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1637 mlxsw_sp_port, ingress);
1638 }
1639
1640 static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1641 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1642 void *ruleset_priv,
1643 struct mlxsw_sp_port *mlxsw_sp_port,
1644 bool ingress)
1645 {
1646 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1647
1648 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1649 mlxsw_sp_port, ingress);
1650 }
1651
1652 static u16
mlxsw_sp_acl_tcam_flower_ruleset_group_id(void * ruleset_priv)1653 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1654 {
1655 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1656
1657 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1658 }
1659
1660 static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1661 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1662 void *ruleset_priv, void *rule_priv,
1663 struct mlxsw_sp_acl_rule_info *rulei)
1664 {
1665 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1666 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1667
1668 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1669 &rule->ventry, rulei);
1670 }
1671
1672 static void
mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp * mlxsw_sp,void * rule_priv)1673 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1674 {
1675 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1676
1677 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1678 }
1679
1680 static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp * mlxsw_sp,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1681 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1682 void *rule_priv,
1683 struct mlxsw_sp_acl_rule_info *rulei)
1684 {
1685 return -EOPNOTSUPP;
1686 }
1687
1688 static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp * mlxsw_sp,void * rule_priv,bool * activity)1689 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1690 void *rule_priv, bool *activity)
1691 {
1692 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1693
1694 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1695 activity);
1696 }
1697
1698 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1699 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1700 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
1701 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
1702 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1703 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1704 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1705 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1706 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1707 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
1708 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1709 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1710 };
1711
1712 struct mlxsw_sp_acl_tcam_mr_ruleset {
1713 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1714 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1715 };
1716
1717 struct mlxsw_sp_acl_tcam_mr_rule {
1718 struct mlxsw_sp_acl_tcam_ventry ventry;
1719 };
1720
1721 static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,void * ruleset_priv,struct mlxsw_afk_element_usage * tmplt_elusage,unsigned int * p_min_prio,unsigned int * p_max_prio)1722 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1723 struct mlxsw_sp_acl_tcam *tcam,
1724 void *ruleset_priv,
1725 struct mlxsw_afk_element_usage *tmplt_elusage,
1726 unsigned int *p_min_prio,
1727 unsigned int *p_max_prio)
1728 {
1729 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1730 int err;
1731
1732 err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1733 mlxsw_sp_acl_tcam_patterns,
1734 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1735 tmplt_elusage, false,
1736 p_min_prio, p_max_prio);
1737 if (err)
1738 return err;
1739
1740 /* For most of the TCAM clients it would make sense to take a tcam chunk
1741 * only when the first rule is written. This is not the case for
1742 * multicast router as it is required to bind the multicast router to a
1743 * specific ACL Group ID which must exist in HW before multicast router
1744 * is initialized.
1745 */
1746 ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1747 &ruleset->vgroup, 1,
1748 tmplt_elusage);
1749 if (IS_ERR(ruleset->vchunk)) {
1750 err = PTR_ERR(ruleset->vchunk);
1751 goto err_chunk_get;
1752 }
1753
1754 return 0;
1755
1756 err_chunk_get:
1757 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1758 return err;
1759 }
1760
1761 static void
mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv)1762 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1763 {
1764 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1765
1766 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1767 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1768 }
1769
1770 static int
mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1771 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1772 struct mlxsw_sp_port *mlxsw_sp_port,
1773 bool ingress)
1774 {
1775 /* Binding is done when initializing multicast router */
1776 return 0;
1777 }
1778
1779 static void
mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1780 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1781 void *ruleset_priv,
1782 struct mlxsw_sp_port *mlxsw_sp_port,
1783 bool ingress)
1784 {
1785 }
1786
1787 static u16
mlxsw_sp_acl_tcam_mr_ruleset_group_id(void * ruleset_priv)1788 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1789 {
1790 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1791
1792 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1793 }
1794
1795 static int
mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1796 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1797 void *rule_priv,
1798 struct mlxsw_sp_acl_rule_info *rulei)
1799 {
1800 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1801 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1802
1803 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1804 &rule->ventry, rulei);
1805 }
1806
1807 static void
mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp * mlxsw_sp,void * rule_priv)1808 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1809 {
1810 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1811
1812 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1813 }
1814
1815 static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp * mlxsw_sp,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1816 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1817 void *rule_priv,
1818 struct mlxsw_sp_acl_rule_info *rulei)
1819 {
1820 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1821
1822 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1823 rulei);
1824 }
1825
1826 static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp * mlxsw_sp,void * rule_priv,bool * activity)1827 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1828 void *rule_priv, bool *activity)
1829 {
1830 *activity = false;
1831
1832 return 0;
1833 }
1834
1835 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1836 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1837 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1838 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1839 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1840 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1841 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1842 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1843 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1844 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
1845 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1846 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1847 };
1848
1849 static const struct mlxsw_sp_acl_profile_ops *
1850 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1851 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1852 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1853 };
1854
1855 const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_acl_profile profile)1856 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1857 enum mlxsw_sp_acl_profile profile)
1858 {
1859 const struct mlxsw_sp_acl_profile_ops *ops;
1860
1861 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1862 return NULL;
1863 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1864 if (WARN_ON(!ops))
1865 return NULL;
1866 return ops;
1867 }
1868