1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/pkt_cls.h>
8 
9 #include "sparx5_main.h"
10 #include "sparx5_qos.h"
11 
12 /* Max rates for leak groups */
13 static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
14 	1048568, /*  1.049 Gbps */
15 	2621420, /*  2.621 Gbps */
16 	10485680, /* 10.486 Gbps */
17 	26214200 /* 26.214 Gbps */
18 };
19 
20 static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
21 
sparx5_lg_get_leak_time(struct sparx5 * sparx5,u32 layer,u32 group)22 static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
23 {
24 	u32 value;
25 
26 	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
27 	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
28 }
29 
sparx5_lg_set_leak_time(struct sparx5 * sparx5,u32 layer,u32 group,u32 leak_time)30 static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
31 				    u32 leak_time)
32 {
33 	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
34 		HSCH_HSCH_TIMER_CFG(layer, group));
35 }
36 
sparx5_lg_get_first(struct sparx5 * sparx5,u32 layer,u32 group)37 static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
38 {
39 	u32 value;
40 
41 	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
42 	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
43 }
44 
sparx5_lg_get_next(struct sparx5 * sparx5,u32 layer,u32 group,u32 idx)45 static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
46 			      u32 idx)
47 
48 {
49 	u32 value;
50 
51 	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
52 	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
53 }
54 
sparx5_lg_get_last(struct sparx5 * sparx5,u32 layer,u32 group)55 static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
56 {
57 	u32 itr, next;
58 
59 	itr = sparx5_lg_get_first(sparx5, layer, group);
60 
61 	for (;;) {
62 		next = sparx5_lg_get_next(sparx5, layer, group, itr);
63 		if (itr == next)
64 			return itr;
65 
66 		itr = next;
67 	}
68 }
69 
sparx5_lg_is_last(struct sparx5 * sparx5,u32 layer,u32 group,u32 idx)70 static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
71 			      u32 idx)
72 {
73 	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
74 }
75 
sparx5_lg_is_first(struct sparx5 * sparx5,u32 layer,u32 group,u32 idx)76 static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
77 			       u32 idx)
78 {
79 	return idx == sparx5_lg_get_first(sparx5, layer, group);
80 }
81 
sparx5_lg_is_empty(struct sparx5 * sparx5,u32 layer,u32 group)82 static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
83 {
84 	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
85 }
86 
sparx5_lg_is_singular(struct sparx5 * sparx5,u32 layer,u32 group)87 static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
88 {
89 	if (sparx5_lg_is_empty(sparx5, layer, group))
90 		return false;
91 
92 	return sparx5_lg_get_first(sparx5, layer, group) ==
93 	       sparx5_lg_get_last(sparx5, layer, group);
94 }
95 
sparx5_lg_enable(struct sparx5 * sparx5,u32 layer,u32 group,u32 leak_time)96 static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
97 			     u32 leak_time)
98 {
99 	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
100 }
101 
sparx5_lg_disable(struct sparx5 * sparx5,u32 layer,u32 group)102 static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
103 {
104 	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
105 }
106 
sparx5_lg_get_group_by_index(struct sparx5 * sparx5,u32 layer,u32 idx,u32 * group)107 static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
108 					u32 idx, u32 *group)
109 {
110 	u32 itr, next;
111 	int i;
112 
113 	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
114 		if (sparx5_lg_is_empty(sparx5, layer, i))
115 			continue;
116 
117 		itr = sparx5_lg_get_first(sparx5, layer, i);
118 
119 		for (;;) {
120 			next = sparx5_lg_get_next(sparx5, layer, i, itr);
121 
122 			if (itr == idx) {
123 				*group = i;
124 				return 0; /* Found it */
125 			}
126 			if (itr == next)
127 				break; /* Was not found */
128 
129 			itr = next;
130 		}
131 	}
132 
133 	return -1;
134 }
135 
sparx5_lg_get_group_by_rate(u32 layer,u32 rate,u32 * group)136 static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
137 {
138 	struct sparx5_layer *l = &layers[layer];
139 	struct sparx5_lg *lg;
140 	u32 i;
141 
142 	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
143 		lg = &l->leak_groups[i];
144 		if (rate <= lg->max_rate) {
145 			*group = i;
146 			return 0;
147 		}
148 	}
149 
150 	return -1;
151 }
152 
sparx5_lg_get_adjacent(struct sparx5 * sparx5,u32 layer,u32 group,u32 idx,u32 * prev,u32 * next,u32 * first)153 static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
154 				  u32 idx, u32 *prev, u32 *next, u32 *first)
155 {
156 	u32 itr;
157 
158 	*first = sparx5_lg_get_first(sparx5, layer, group);
159 	*prev = *first;
160 	*next = *first;
161 	itr = *first;
162 
163 	for (;;) {
164 		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
165 
166 		if (itr == idx)
167 			return 0; /* Found it */
168 
169 		if (itr == *next)
170 			return -1; /* Was not found */
171 
172 		*prev = itr;
173 		itr = *next;
174 	}
175 
176 	return -1;
177 }
178 
sparx5_lg_conf_set(struct sparx5 * sparx5,u32 layer,u32 group,u32 se_first,u32 idx,u32 idx_next,bool empty)179 static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
180 			      u32 se_first, u32 idx, u32 idx_next, bool empty)
181 {
182 	u32 leak_time = layers[layer].leak_groups[group].leak_time;
183 
184 	/* Stop leaking */
185 	sparx5_lg_disable(sparx5, layer, group);
186 
187 	if (empty)
188 		return 0;
189 
190 	/* Select layer */
191 	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
192 		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
193 
194 	/* Link elements */
195 	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
196 		HSCH_SE_CONNECT(idx));
197 
198 	/* Set the first element. */
199 	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
200 		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
201 		 HSCH_HSCH_LEAK_CFG(layer, group));
202 
203 	/* Start leaking */
204 	sparx5_lg_enable(sparx5, layer, group, leak_time);
205 
206 	return 0;
207 }
208 
sparx5_lg_del(struct sparx5 * sparx5,u32 layer,u32 group,u32 idx)209 static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
210 {
211 	u32 first, next, prev;
212 	bool empty = false;
213 
214 	/* idx *must* be present in the leak group */
215 	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
216 				       &first) < 0);
217 
218 	if (sparx5_lg_is_singular(sparx5, layer, group)) {
219 		empty = true;
220 	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
221 		/* idx is removed, prev is now last */
222 		idx = prev;
223 		next = prev;
224 	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
225 		/* idx is removed and points to itself, first is next */
226 		first = next;
227 		next = idx;
228 	} else {
229 		/* Next is not touched */
230 		idx = prev;
231 	}
232 
233 	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
234 				  empty);
235 }
236 
sparx5_lg_add(struct sparx5 * sparx5,u32 layer,u32 new_group,u32 idx)237 static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
238 			 u32 idx)
239 {
240 	u32 first, next, old_group;
241 
242 	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
243 		 idx);
244 
245 	/* Is this SE already shaping ? */
246 	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
247 		if (old_group != new_group) {
248 			/* Delete from old group */
249 			sparx5_lg_del(sparx5, layer, old_group, idx);
250 		} else {
251 			/* Nothing to do here */
252 			return 0;
253 		}
254 	}
255 
256 	/* We always add to head of the list */
257 	first = idx;
258 
259 	if (sparx5_lg_is_empty(sparx5, layer, new_group))
260 		next = idx;
261 	else
262 		next = sparx5_lg_get_first(sparx5, layer, new_group);
263 
264 	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
265 				  false);
266 }
267 
sparx5_shaper_conf_set(struct sparx5_port * port,const struct sparx5_shaper * sh,u32 layer,u32 idx,u32 group)268 static int sparx5_shaper_conf_set(struct sparx5_port *port,
269 				  const struct sparx5_shaper *sh, u32 layer,
270 				  u32 idx, u32 group)
271 {
272 	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
273 	struct sparx5 *sparx5 = port->sparx5;
274 
275 	if (!sh->rate && !sh->burst)
276 		sparx5_lg_action = &sparx5_lg_del;
277 	else
278 		sparx5_lg_action = &sparx5_lg_add;
279 
280 	/* Select layer */
281 	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
282 		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
283 
284 	/* Set frame mode */
285 	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
286 		 sparx5, HSCH_SE_CFG(idx));
287 
288 	/* Set committed rate and burst */
289 	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
290 			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
291 		sparx5, HSCH_CIR_CFG(idx));
292 
293 	/* This has to be done after the shaper configuration has been set */
294 	sparx5_lg_action(sparx5, layer, group, idx);
295 
296 	return 0;
297 }
298 
sparx5_weight_to_hw_cost(u32 weight_min,u32 weight)299 static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
300 {
301 	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
302 	       1;
303 }
304 
sparx5_dwrr_conf_set(struct sparx5_port * port,struct sparx5_dwrr * dwrr)305 static int sparx5_dwrr_conf_set(struct sparx5_port *port,
306 				struct sparx5_dwrr *dwrr)
307 {
308 	int i;
309 
310 	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
311 		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
312 		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
313 		 port->sparx5, HSCH_HSCH_CFG_CFG);
314 
315 	/* Number of *lower* indexes that are arbitrated dwrr */
316 	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
317 		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
318 		 HSCH_SE_CFG(port->portno));
319 
320 	for (i = 0; i < dwrr->count; i++) {
321 		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
322 			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
323 			 HSCH_DWRR_ENTRY(i));
324 	}
325 
326 	return 0;
327 }
328 
sparx5_leak_groups_init(struct sparx5 * sparx5)329 static int sparx5_leak_groups_init(struct sparx5 *sparx5)
330 {
331 	struct sparx5_layer *layer;
332 	u32 sys_clk_per_100ps;
333 	struct sparx5_lg *lg;
334 	u32 leak_time_us;
335 	int i, ii;
336 
337 	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
338 
339 	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
340 		layer = &layers[i];
341 		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
342 			lg = &layer->leak_groups[ii];
343 			lg->max_rate = spx5_hsch_max_group_rate[ii];
344 
345 			/* Calculate the leak time in us, to serve a maximum
346 			 * rate of 'max_rate' for this group
347 			 */
348 			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
349 
350 			/* Hardware wants leak time in ns */
351 			lg->leak_time = 1000 * leak_time_us;
352 
353 			/* Calculate resolution */
354 			lg->resolution = 1000 / leak_time_us;
355 
356 			/* Maximum number of shapers that can be served by
357 			 * this leak group
358 			 */
359 			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
360 
361 			/* Example:
362 			 * Wanted bandwidth is 100Mbit:
363 			 *
364 			 * 100 mbps can be served by leak group zero.
365 			 *
366 			 * leak_time is 125000 ns.
367 			 * resolution is: 8
368 			 *
369 			 * cir          = 100000 / 8 = 12500
370 			 * leaks_pr_sec = 125000 / 10^9 = 8000
371 			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
372 			 */
373 
374 			/* Disable by default - this also indicates an empty
375 			 * leak group
376 			 */
377 			sparx5_lg_disable(sparx5, i, ii);
378 		}
379 	}
380 
381 	return 0;
382 }
383 
sparx5_qos_init(struct sparx5 * sparx5)384 int sparx5_qos_init(struct sparx5 *sparx5)
385 {
386 	int ret;
387 
388 	ret = sparx5_leak_groups_init(sparx5);
389 	if (ret < 0)
390 		return ret;
391 
392 	return 0;
393 }
394 
sparx5_tc_mqprio_add(struct net_device * ndev,u8 num_tc)395 int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
396 {
397 	int i;
398 
399 	if (num_tc != SPX5_PRIOS) {
400 		netdev_err(ndev, "Only %d traffic classes supported\n",
401 			   SPX5_PRIOS);
402 		return -EINVAL;
403 	}
404 
405 	netdev_set_num_tc(ndev, num_tc);
406 
407 	for (i = 0; i < num_tc; i++)
408 		netdev_set_tc_queue(ndev, i, 1, i);
409 
410 	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
411 		   ndev->num_tc, ndev->real_num_tx_queues);
412 
413 	return 0;
414 }
415 
sparx5_tc_mqprio_del(struct net_device * ndev)416 int sparx5_tc_mqprio_del(struct net_device *ndev)
417 {
418 	netdev_reset_tc(ndev);
419 
420 	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
421 		   ndev->num_tc, ndev->real_num_tx_queues);
422 
423 	return 0;
424 }
425 
sparx5_tc_tbf_add(struct sparx5_port * port,struct tc_tbf_qopt_offload_replace_params * params,u32 layer,u32 idx)426 int sparx5_tc_tbf_add(struct sparx5_port *port,
427 		      struct tc_tbf_qopt_offload_replace_params *params,
428 		      u32 layer, u32 idx)
429 {
430 	struct sparx5_shaper sh = {
431 		.mode = SPX5_SE_MODE_DATARATE,
432 		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
433 		.burst = params->max_size,
434 	};
435 	struct sparx5_lg *lg;
436 	u32 group;
437 
438 	/* Find suitable group for this se */
439 	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
440 		pr_debug("Could not find leak group for se with rate: %d",
441 			 sh.rate);
442 		return -EINVAL;
443 	}
444 
445 	lg = &layers[layer].leak_groups[group];
446 
447 	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
448 
449 	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
450 		return -EINVAL;
451 
452 	/* Calculate committed rate and burst */
453 	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
454 	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
455 
456 	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
457 		return -EINVAL;
458 
459 	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
460 }
461 
sparx5_tc_tbf_del(struct sparx5_port * port,u32 layer,u32 idx)462 int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
463 {
464 	struct sparx5_shaper sh = {0};
465 	u32 group;
466 
467 	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
468 
469 	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
470 }
471 
sparx5_tc_ets_add(struct sparx5_port * port,struct tc_ets_qopt_offload_replace_params * params)472 int sparx5_tc_ets_add(struct sparx5_port *port,
473 		      struct tc_ets_qopt_offload_replace_params *params)
474 {
475 	struct sparx5_dwrr dwrr = {0};
476 	/* Minimum weight for each iteration */
477 	unsigned int w_min = 100;
478 	int i;
479 
480 	/* Find minimum weight for all dwrr bands */
481 	for (i = 0; i < SPX5_PRIOS; i++) {
482 		if (params->quanta[i] == 0)
483 			continue;
484 		w_min = min(w_min, params->weights[i]);
485 	}
486 
487 	for (i = 0; i < SPX5_PRIOS; i++) {
488 		/* Strict band; skip */
489 		if (params->quanta[i] == 0)
490 			continue;
491 
492 		dwrr.count++;
493 
494 		/* On the sparx5, bands with higher indexes are preferred and
495 		 * arbitrated strict. Strict bands are put in the lower indexes,
496 		 * by tc, so we reverse the bands here.
497 		 *
498 		 * Also convert the weight to something the hardware
499 		 * understands.
500 		 */
501 		dwrr.cost[SPX5_PRIOS - i - 1] =
502 			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
503 	}
504 
505 	return sparx5_dwrr_conf_set(port, &dwrr);
506 }
507 
sparx5_tc_ets_del(struct sparx5_port * port)508 int sparx5_tc_ets_del(struct sparx5_port *port)
509 {
510 	struct sparx5_dwrr dwrr = {0};
511 
512 	return sparx5_dwrr_conf_set(port, &dwrr);
513 }
514