1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/minmax.h>
17 #include <linux/slab.h>
18
19 #include <asm/div64.h>
20
21 #include "clk-rcg.h"
22 #include "common.h"
23
24 #define CMD_REG 0x0
25 #define CMD_UPDATE BIT(0)
26 #define CMD_ROOT_EN BIT(1)
27 #define CMD_DIRTY_CFG BIT(4)
28 #define CMD_DIRTY_N BIT(5)
29 #define CMD_DIRTY_M BIT(6)
30 #define CMD_DIRTY_D BIT(7)
31 #define CMD_ROOT_OFF BIT(31)
32
33 #define CFG_REG 0x4
34 #define CFG_SRC_DIV_SHIFT 0
35 #define CFG_SRC_SEL_SHIFT 8
36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
37 #define CFG_MODE_SHIFT 12
38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
40 #define CFG_HW_CLK_CTRL_MASK BIT(20)
41
42 #define M_REG 0x8
43 #define N_REG 0xc
44 #define D_REG 0x10
45
46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
50
51 /* Dynamic Frequency Scaling */
52 #define MAX_PERF_LEVEL 8
53 #define SE_CMD_DFSR_OFFSET 0x14
54 #define SE_CMD_DFS_EN BIT(0)
55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
58
59 enum freq_policy {
60 FLOOR,
61 CEIL,
62 };
63
clk_rcg2_is_enabled(struct clk_hw * hw)64 static int clk_rcg2_is_enabled(struct clk_hw *hw)
65 {
66 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
67 u32 cmd;
68 int ret;
69
70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
71 if (ret)
72 return ret;
73
74 return (cmd & CMD_ROOT_OFF) == 0;
75 }
76
__clk_rcg2_get_parent(struct clk_hw * hw,u32 cfg)77 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
78 {
79 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
80 int num_parents = clk_hw_get_num_parents(hw);
81 int i;
82
83 cfg &= CFG_SRC_SEL_MASK;
84 cfg >>= CFG_SRC_SEL_SHIFT;
85
86 for (i = 0; i < num_parents; i++)
87 if (cfg == rcg->parent_map[i].cfg)
88 return i;
89
90 pr_debug("%s: Clock %s has invalid parent, using default.\n",
91 __func__, clk_hw_get_name(hw));
92 return 0;
93 }
94
clk_rcg2_get_parent(struct clk_hw * hw)95 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
96 {
97 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
98 u32 cfg;
99 int ret;
100
101 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
102 if (ret) {
103 pr_debug("%s: Unable to read CFG register for %s\n",
104 __func__, clk_hw_get_name(hw));
105 return 0;
106 }
107
108 return __clk_rcg2_get_parent(hw, cfg);
109 }
110
update_config(struct clk_rcg2 * rcg)111 static int update_config(struct clk_rcg2 *rcg)
112 {
113 int count, ret;
114 u32 cmd;
115 struct clk_hw *hw = &rcg->clkr.hw;
116 const char *name = clk_hw_get_name(hw);
117
118 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
119 CMD_UPDATE, CMD_UPDATE);
120 if (ret)
121 return ret;
122
123 /* Wait for update to take effect */
124 for (count = 500; count > 0; count--) {
125 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
126 if (ret)
127 return ret;
128 if (!(cmd & CMD_UPDATE))
129 return 0;
130 udelay(1);
131 }
132
133 WARN(1, "%s: rcg didn't update its configuration.", name);
134 return -EBUSY;
135 }
136
clk_rcg2_set_parent(struct clk_hw * hw,u8 index)137 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
138 {
139 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
140 int ret;
141 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
142
143 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
144 CFG_SRC_SEL_MASK, cfg);
145 if (ret)
146 return ret;
147
148 return update_config(rcg);
149 }
150
151 /*
152 * Calculate m/n:d rate
153 *
154 * parent_rate m
155 * rate = ----------- x ---
156 * hid_div n
157 */
158 static unsigned long
calc_rate(unsigned long rate,u32 m,u32 n,u32 mode,u32 hid_div)159 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
160 {
161 if (hid_div) {
162 rate *= 2;
163 rate /= hid_div + 1;
164 }
165
166 if (mode) {
167 u64 tmp = rate;
168 tmp *= m;
169 do_div(tmp, n);
170 rate = tmp;
171 }
172
173 return rate;
174 }
175
176 static unsigned long
__clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate,u32 cfg)177 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
178 {
179 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
180 u32 hid_div, m = 0, n = 0, mode = 0, mask;
181
182 if (rcg->mnd_width) {
183 mask = BIT(rcg->mnd_width) - 1;
184 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
185 m &= mask;
186 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
187 n = ~n;
188 n &= mask;
189 n += m;
190 mode = cfg & CFG_MODE_MASK;
191 mode >>= CFG_MODE_SHIFT;
192 }
193
194 mask = BIT(rcg->hid_width) - 1;
195 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
196 hid_div &= mask;
197
198 return calc_rate(parent_rate, m, n, mode, hid_div);
199 }
200
201 static unsigned long
clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)202 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
203 {
204 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
205 u32 cfg;
206
207 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
208
209 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
210 }
211
_freq_tbl_determine_rate(struct clk_hw * hw,const struct freq_tbl * f,struct clk_rate_request * req,enum freq_policy policy)212 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
213 struct clk_rate_request *req,
214 enum freq_policy policy)
215 {
216 unsigned long clk_flags, rate = req->rate;
217 struct clk_hw *p;
218 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
219 int index;
220
221 switch (policy) {
222 case FLOOR:
223 f = qcom_find_freq_floor(f, rate);
224 break;
225 case CEIL:
226 f = qcom_find_freq(f, rate);
227 break;
228 default:
229 return -EINVAL;
230 }
231
232 if (!f)
233 return -EINVAL;
234
235 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
236 if (index < 0)
237 return index;
238
239 clk_flags = clk_hw_get_flags(hw);
240 p = clk_hw_get_parent_by_index(hw, index);
241 if (!p)
242 return -EINVAL;
243
244 if (clk_flags & CLK_SET_RATE_PARENT) {
245 rate = f->freq;
246 if (f->pre_div) {
247 if (!rate)
248 rate = req->rate;
249 rate /= 2;
250 rate *= f->pre_div + 1;
251 }
252
253 if (f->n) {
254 u64 tmp = rate;
255 tmp = tmp * f->n;
256 do_div(tmp, f->m);
257 rate = tmp;
258 }
259 } else {
260 rate = clk_hw_get_rate(p);
261 }
262 req->best_parent_hw = p;
263 req->best_parent_rate = rate;
264 req->rate = f->freq;
265
266 return 0;
267 }
268
clk_rcg2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)269 static int clk_rcg2_determine_rate(struct clk_hw *hw,
270 struct clk_rate_request *req)
271 {
272 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
273
274 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
275 }
276
clk_rcg2_determine_floor_rate(struct clk_hw * hw,struct clk_rate_request * req)277 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
278 struct clk_rate_request *req)
279 {
280 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
281
282 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
283 }
284
__clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f,u32 * _cfg)285 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
286 u32 *_cfg)
287 {
288 u32 cfg, mask, d_val, not2d_val, n_minus_m;
289 struct clk_hw *hw = &rcg->clkr.hw;
290 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
291
292 if (index < 0)
293 return index;
294
295 if (rcg->mnd_width && f->n) {
296 mask = BIT(rcg->mnd_width) - 1;
297 ret = regmap_update_bits(rcg->clkr.regmap,
298 RCG_M_OFFSET(rcg), mask, f->m);
299 if (ret)
300 return ret;
301
302 ret = regmap_update_bits(rcg->clkr.regmap,
303 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
304 if (ret)
305 return ret;
306
307 /* Calculate 2d value */
308 d_val = f->n;
309
310 n_minus_m = f->n - f->m;
311 n_minus_m *= 2;
312
313 d_val = clamp_t(u32, d_val, f->m, n_minus_m);
314 not2d_val = ~d_val & mask;
315
316 ret = regmap_update_bits(rcg->clkr.regmap,
317 RCG_D_OFFSET(rcg), mask, not2d_val);
318 if (ret)
319 return ret;
320 }
321
322 mask = BIT(rcg->hid_width) - 1;
323 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
324 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
325 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
326 if (rcg->mnd_width && f->n && (f->m != f->n))
327 cfg |= CFG_MODE_DUAL_EDGE;
328
329 *_cfg &= ~mask;
330 *_cfg |= cfg;
331
332 return 0;
333 }
334
clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)335 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
336 {
337 u32 cfg;
338 int ret;
339
340 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
341 if (ret)
342 return ret;
343
344 ret = __clk_rcg2_configure(rcg, f, &cfg);
345 if (ret)
346 return ret;
347
348 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
349 if (ret)
350 return ret;
351
352 return update_config(rcg);
353 }
354
__clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,enum freq_policy policy)355 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
356 enum freq_policy policy)
357 {
358 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
359 const struct freq_tbl *f;
360
361 switch (policy) {
362 case FLOOR:
363 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
364 break;
365 case CEIL:
366 f = qcom_find_freq(rcg->freq_tbl, rate);
367 break;
368 default:
369 return -EINVAL;
370 }
371
372 if (!f)
373 return -EINVAL;
374
375 return clk_rcg2_configure(rcg, f);
376 }
377
clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)378 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
379 unsigned long parent_rate)
380 {
381 return __clk_rcg2_set_rate(hw, rate, CEIL);
382 }
383
clk_rcg2_set_floor_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)384 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
385 unsigned long parent_rate)
386 {
387 return __clk_rcg2_set_rate(hw, rate, FLOOR);
388 }
389
clk_rcg2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)390 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
391 unsigned long rate, unsigned long parent_rate, u8 index)
392 {
393 return __clk_rcg2_set_rate(hw, rate, CEIL);
394 }
395
clk_rcg2_set_floor_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)396 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
397 unsigned long rate, unsigned long parent_rate, u8 index)
398 {
399 return __clk_rcg2_set_rate(hw, rate, FLOOR);
400 }
401
clk_rcg2_get_duty_cycle(struct clk_hw * hw,struct clk_duty * duty)402 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
403 {
404 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
405 u32 notn_m, n, m, d, not2d, mask;
406
407 if (!rcg->mnd_width) {
408 /* 50 % duty-cycle for Non-MND RCGs */
409 duty->num = 1;
410 duty->den = 2;
411 return 0;
412 }
413
414 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d);
415 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
416 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
417
418 if (!not2d && !m && !notn_m) {
419 /* 50 % duty-cycle always */
420 duty->num = 1;
421 duty->den = 2;
422 return 0;
423 }
424
425 mask = BIT(rcg->mnd_width) - 1;
426
427 d = ~(not2d) & mask;
428 d = DIV_ROUND_CLOSEST(d, 2);
429
430 n = (~(notn_m) + m) & mask;
431
432 duty->num = d;
433 duty->den = n;
434
435 return 0;
436 }
437
clk_rcg2_set_duty_cycle(struct clk_hw * hw,struct clk_duty * duty)438 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
439 {
440 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
441 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
442 int ret;
443
444 /* Duty-cycle cannot be modified for non-MND RCGs */
445 if (!rcg->mnd_width)
446 return -EINVAL;
447
448 mask = BIT(rcg->mnd_width) - 1;
449
450 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
451 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
452 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
453
454 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
455 if (!(cfg & CFG_MODE_MASK))
456 return -EINVAL;
457
458 n = (~(notn_m) + m) & mask;
459
460 duty_per = (duty->num * 100) / duty->den;
461
462 /* Calculate 2d value */
463 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
464
465 /*
466 * Check bit widths of 2d. If D is too big reduce duty cycle.
467 * Also make sure it is never zero.
468 */
469 d = clamp_val(d, 1, mask);
470
471 if ((d / 2) > (n - m))
472 d = (n - m) * 2;
473 else if ((d / 2) < (m / 2))
474 d = m;
475
476 not2d = ~d & mask;
477
478 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
479 not2d);
480 if (ret)
481 return ret;
482
483 return update_config(rcg);
484 }
485
486 const struct clk_ops clk_rcg2_ops = {
487 .is_enabled = clk_rcg2_is_enabled,
488 .get_parent = clk_rcg2_get_parent,
489 .set_parent = clk_rcg2_set_parent,
490 .recalc_rate = clk_rcg2_recalc_rate,
491 .determine_rate = clk_rcg2_determine_rate,
492 .set_rate = clk_rcg2_set_rate,
493 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
494 .get_duty_cycle = clk_rcg2_get_duty_cycle,
495 .set_duty_cycle = clk_rcg2_set_duty_cycle,
496 };
497 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
498
499 const struct clk_ops clk_rcg2_floor_ops = {
500 .is_enabled = clk_rcg2_is_enabled,
501 .get_parent = clk_rcg2_get_parent,
502 .set_parent = clk_rcg2_set_parent,
503 .recalc_rate = clk_rcg2_recalc_rate,
504 .determine_rate = clk_rcg2_determine_floor_rate,
505 .set_rate = clk_rcg2_set_floor_rate,
506 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
507 .get_duty_cycle = clk_rcg2_get_duty_cycle,
508 .set_duty_cycle = clk_rcg2_set_duty_cycle,
509 };
510 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
511
512 struct frac_entry {
513 int num;
514 int den;
515 };
516
517 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
518 { 52, 295 }, /* 119 M */
519 { 11, 57 }, /* 130.25 M */
520 { 63, 307 }, /* 138.50 M */
521 { 11, 50 }, /* 148.50 M */
522 { 47, 206 }, /* 154 M */
523 { 31, 100 }, /* 205.25 M */
524 { 107, 269 }, /* 268.50 M */
525 { },
526 };
527
528 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
529 { 31, 211 }, /* 119 M */
530 { 32, 199 }, /* 130.25 M */
531 { 63, 307 }, /* 138.50 M */
532 { 11, 60 }, /* 148.50 M */
533 { 50, 263 }, /* 154 M */
534 { 31, 120 }, /* 205.25 M */
535 { 119, 359 }, /* 268.50 M */
536 { },
537 };
538
clk_edp_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)539 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
540 unsigned long parent_rate)
541 {
542 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
543 struct freq_tbl f = *rcg->freq_tbl;
544 const struct frac_entry *frac;
545 int delta = 100000;
546 s64 src_rate = parent_rate;
547 s64 request;
548 u32 mask = BIT(rcg->hid_width) - 1;
549 u32 hid_div;
550
551 if (src_rate == 810000000)
552 frac = frac_table_810m;
553 else
554 frac = frac_table_675m;
555
556 for (; frac->num; frac++) {
557 request = rate;
558 request *= frac->den;
559 request = div_s64(request, frac->num);
560 if ((src_rate < (request - delta)) ||
561 (src_rate > (request + delta)))
562 continue;
563
564 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
565 &hid_div);
566 f.pre_div = hid_div;
567 f.pre_div >>= CFG_SRC_DIV_SHIFT;
568 f.pre_div &= mask;
569 f.m = frac->num;
570 f.n = frac->den;
571
572 return clk_rcg2_configure(rcg, &f);
573 }
574
575 return -EINVAL;
576 }
577
clk_edp_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)578 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
579 unsigned long rate, unsigned long parent_rate, u8 index)
580 {
581 /* Parent index is set statically in frequency table */
582 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
583 }
584
clk_edp_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)585 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
586 struct clk_rate_request *req)
587 {
588 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
589 const struct freq_tbl *f = rcg->freq_tbl;
590 const struct frac_entry *frac;
591 int delta = 100000;
592 s64 request;
593 u32 mask = BIT(rcg->hid_width) - 1;
594 u32 hid_div;
595 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
596
597 /* Force the correct parent */
598 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
599 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
600
601 if (req->best_parent_rate == 810000000)
602 frac = frac_table_810m;
603 else
604 frac = frac_table_675m;
605
606 for (; frac->num; frac++) {
607 request = req->rate;
608 request *= frac->den;
609 request = div_s64(request, frac->num);
610 if ((req->best_parent_rate < (request - delta)) ||
611 (req->best_parent_rate > (request + delta)))
612 continue;
613
614 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
615 &hid_div);
616 hid_div >>= CFG_SRC_DIV_SHIFT;
617 hid_div &= mask;
618
619 req->rate = calc_rate(req->best_parent_rate,
620 frac->num, frac->den,
621 !!frac->den, hid_div);
622 return 0;
623 }
624
625 return -EINVAL;
626 }
627
628 const struct clk_ops clk_edp_pixel_ops = {
629 .is_enabled = clk_rcg2_is_enabled,
630 .get_parent = clk_rcg2_get_parent,
631 .set_parent = clk_rcg2_set_parent,
632 .recalc_rate = clk_rcg2_recalc_rate,
633 .set_rate = clk_edp_pixel_set_rate,
634 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
635 .determine_rate = clk_edp_pixel_determine_rate,
636 };
637 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
638
clk_byte_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)639 static int clk_byte_determine_rate(struct clk_hw *hw,
640 struct clk_rate_request *req)
641 {
642 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
643 const struct freq_tbl *f = rcg->freq_tbl;
644 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
645 unsigned long parent_rate, div;
646 u32 mask = BIT(rcg->hid_width) - 1;
647 struct clk_hw *p;
648
649 if (req->rate == 0)
650 return -EINVAL;
651
652 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
653 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
654
655 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
656 div = min_t(u32, div, mask);
657
658 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
659
660 return 0;
661 }
662
clk_byte_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)663 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
664 unsigned long parent_rate)
665 {
666 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
667 struct freq_tbl f = *rcg->freq_tbl;
668 unsigned long div;
669 u32 mask = BIT(rcg->hid_width) - 1;
670
671 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
672 div = min_t(u32, div, mask);
673
674 f.pre_div = div;
675
676 return clk_rcg2_configure(rcg, &f);
677 }
678
clk_byte_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)679 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
680 unsigned long rate, unsigned long parent_rate, u8 index)
681 {
682 /* Parent index is set statically in frequency table */
683 return clk_byte_set_rate(hw, rate, parent_rate);
684 }
685
686 const struct clk_ops clk_byte_ops = {
687 .is_enabled = clk_rcg2_is_enabled,
688 .get_parent = clk_rcg2_get_parent,
689 .set_parent = clk_rcg2_set_parent,
690 .recalc_rate = clk_rcg2_recalc_rate,
691 .set_rate = clk_byte_set_rate,
692 .set_rate_and_parent = clk_byte_set_rate_and_parent,
693 .determine_rate = clk_byte_determine_rate,
694 };
695 EXPORT_SYMBOL_GPL(clk_byte_ops);
696
clk_byte2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)697 static int clk_byte2_determine_rate(struct clk_hw *hw,
698 struct clk_rate_request *req)
699 {
700 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
701 unsigned long parent_rate, div;
702 u32 mask = BIT(rcg->hid_width) - 1;
703 struct clk_hw *p;
704 unsigned long rate = req->rate;
705
706 if (rate == 0)
707 return -EINVAL;
708
709 p = req->best_parent_hw;
710 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
711
712 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
713 div = min_t(u32, div, mask);
714
715 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
716
717 return 0;
718 }
719
clk_byte2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)720 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
721 unsigned long parent_rate)
722 {
723 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
724 struct freq_tbl f = { 0 };
725 unsigned long div;
726 int i, num_parents = clk_hw_get_num_parents(hw);
727 u32 mask = BIT(rcg->hid_width) - 1;
728 u32 cfg;
729
730 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
731 div = min_t(u32, div, mask);
732
733 f.pre_div = div;
734
735 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
736 cfg &= CFG_SRC_SEL_MASK;
737 cfg >>= CFG_SRC_SEL_SHIFT;
738
739 for (i = 0; i < num_parents; i++) {
740 if (cfg == rcg->parent_map[i].cfg) {
741 f.src = rcg->parent_map[i].src;
742 return clk_rcg2_configure(rcg, &f);
743 }
744 }
745
746 return -EINVAL;
747 }
748
clk_byte2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)749 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
750 unsigned long rate, unsigned long parent_rate, u8 index)
751 {
752 /* Read the hardware to determine parent during set_rate */
753 return clk_byte2_set_rate(hw, rate, parent_rate);
754 }
755
756 const struct clk_ops clk_byte2_ops = {
757 .is_enabled = clk_rcg2_is_enabled,
758 .get_parent = clk_rcg2_get_parent,
759 .set_parent = clk_rcg2_set_parent,
760 .recalc_rate = clk_rcg2_recalc_rate,
761 .set_rate = clk_byte2_set_rate,
762 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
763 .determine_rate = clk_byte2_determine_rate,
764 };
765 EXPORT_SYMBOL_GPL(clk_byte2_ops);
766
767 static const struct frac_entry frac_table_pixel[] = {
768 { 3, 8 },
769 { 2, 9 },
770 { 4, 9 },
771 { 1, 1 },
772 { 2, 3 },
773 { }
774 };
775
clk_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)776 static int clk_pixel_determine_rate(struct clk_hw *hw,
777 struct clk_rate_request *req)
778 {
779 unsigned long request, src_rate;
780 int delta = 100000;
781 const struct frac_entry *frac = frac_table_pixel;
782
783 for (; frac->num; frac++) {
784 request = (req->rate * frac->den) / frac->num;
785
786 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
787 if ((src_rate < (request - delta)) ||
788 (src_rate > (request + delta)))
789 continue;
790
791 req->best_parent_rate = src_rate;
792 req->rate = (src_rate * frac->num) / frac->den;
793 return 0;
794 }
795
796 return -EINVAL;
797 }
798
clk_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)799 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
800 unsigned long parent_rate)
801 {
802 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
803 struct freq_tbl f = { 0 };
804 const struct frac_entry *frac = frac_table_pixel;
805 unsigned long request;
806 int delta = 100000;
807 u32 mask = BIT(rcg->hid_width) - 1;
808 u32 hid_div, cfg;
809 int i, num_parents = clk_hw_get_num_parents(hw);
810
811 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
812 cfg &= CFG_SRC_SEL_MASK;
813 cfg >>= CFG_SRC_SEL_SHIFT;
814
815 for (i = 0; i < num_parents; i++)
816 if (cfg == rcg->parent_map[i].cfg) {
817 f.src = rcg->parent_map[i].src;
818 break;
819 }
820
821 for (; frac->num; frac++) {
822 request = (rate * frac->den) / frac->num;
823
824 if ((parent_rate < (request - delta)) ||
825 (parent_rate > (request + delta)))
826 continue;
827
828 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
829 &hid_div);
830 f.pre_div = hid_div;
831 f.pre_div >>= CFG_SRC_DIV_SHIFT;
832 f.pre_div &= mask;
833 f.m = frac->num;
834 f.n = frac->den;
835
836 return clk_rcg2_configure(rcg, &f);
837 }
838 return -EINVAL;
839 }
840
clk_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)841 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
842 unsigned long parent_rate, u8 index)
843 {
844 return clk_pixel_set_rate(hw, rate, parent_rate);
845 }
846
847 const struct clk_ops clk_pixel_ops = {
848 .is_enabled = clk_rcg2_is_enabled,
849 .get_parent = clk_rcg2_get_parent,
850 .set_parent = clk_rcg2_set_parent,
851 .recalc_rate = clk_rcg2_recalc_rate,
852 .set_rate = clk_pixel_set_rate,
853 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
854 .determine_rate = clk_pixel_determine_rate,
855 };
856 EXPORT_SYMBOL_GPL(clk_pixel_ops);
857
clk_gfx3d_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)858 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
859 struct clk_rate_request *req)
860 {
861 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
862 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
863 struct clk_hw *xo, *p0, *p1, *p2;
864 unsigned long p0_rate;
865 u8 mux_div = cgfx->div;
866 int ret;
867
868 p0 = cgfx->hws[0];
869 p1 = cgfx->hws[1];
870 p2 = cgfx->hws[2];
871 /*
872 * This function does ping-pong the RCG between PLLs: if we don't
873 * have at least one fixed PLL and two variable ones,
874 * then it's not going to work correctly.
875 */
876 if (WARN_ON(!p0 || !p1 || !p2))
877 return -EINVAL;
878
879 xo = clk_hw_get_parent_by_index(hw, 0);
880 if (req->rate == clk_hw_get_rate(xo)) {
881 req->best_parent_hw = xo;
882 return 0;
883 }
884
885 if (mux_div == 0)
886 mux_div = 1;
887
888 parent_req.rate = req->rate * mux_div;
889
890 /* This has to be a fixed rate PLL */
891 p0_rate = clk_hw_get_rate(p0);
892
893 if (parent_req.rate == p0_rate) {
894 req->rate = req->best_parent_rate = p0_rate;
895 req->best_parent_hw = p0;
896 return 0;
897 }
898
899 if (req->best_parent_hw == p0) {
900 /* Are we going back to a previously used rate? */
901 if (clk_hw_get_rate(p2) == parent_req.rate)
902 req->best_parent_hw = p2;
903 else
904 req->best_parent_hw = p1;
905 } else if (req->best_parent_hw == p2) {
906 req->best_parent_hw = p1;
907 } else {
908 req->best_parent_hw = p2;
909 }
910
911 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
912 if (ret)
913 return ret;
914
915 req->rate = req->best_parent_rate = parent_req.rate;
916 req->rate /= mux_div;
917
918 return 0;
919 }
920
clk_gfx3d_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)921 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
922 unsigned long parent_rate, u8 index)
923 {
924 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
925 struct clk_rcg2 *rcg = &cgfx->rcg;
926 u32 cfg;
927 int ret;
928
929 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
930 /* On some targets, the GFX3D RCG may need to divide PLL frequency */
931 if (cgfx->div > 1)
932 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
933
934 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
935 if (ret)
936 return ret;
937
938 return update_config(rcg);
939 }
940
clk_gfx3d_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)941 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
942 unsigned long parent_rate)
943 {
944 /*
945 * We should never get here; clk_gfx3d_determine_rate() should always
946 * make us use a different parent than what we're currently using, so
947 * clk_gfx3d_set_rate_and_parent() should always be called.
948 */
949 return 0;
950 }
951
952 const struct clk_ops clk_gfx3d_ops = {
953 .is_enabled = clk_rcg2_is_enabled,
954 .get_parent = clk_rcg2_get_parent,
955 .set_parent = clk_rcg2_set_parent,
956 .recalc_rate = clk_rcg2_recalc_rate,
957 .set_rate = clk_gfx3d_set_rate,
958 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
959 .determine_rate = clk_gfx3d_determine_rate,
960 };
961 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
962
clk_rcg2_set_force_enable(struct clk_hw * hw)963 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
964 {
965 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
966 const char *name = clk_hw_get_name(hw);
967 int ret, count;
968
969 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
970 CMD_ROOT_EN, CMD_ROOT_EN);
971 if (ret)
972 return ret;
973
974 /* wait for RCG to turn ON */
975 for (count = 500; count > 0; count--) {
976 if (clk_rcg2_is_enabled(hw))
977 return 0;
978
979 udelay(1);
980 }
981
982 pr_err("%s: RCG did not turn on\n", name);
983 return -ETIMEDOUT;
984 }
985
clk_rcg2_clear_force_enable(struct clk_hw * hw)986 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
987 {
988 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
989
990 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
991 CMD_ROOT_EN, 0);
992 }
993
994 static int
clk_rcg2_shared_force_enable_clear(struct clk_hw * hw,const struct freq_tbl * f)995 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
996 {
997 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
998 int ret;
999
1000 ret = clk_rcg2_set_force_enable(hw);
1001 if (ret)
1002 return ret;
1003
1004 ret = clk_rcg2_configure(rcg, f);
1005 if (ret)
1006 return ret;
1007
1008 return clk_rcg2_clear_force_enable(hw);
1009 }
1010
clk_rcg2_shared_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1011 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1012 unsigned long parent_rate)
1013 {
1014 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1015 const struct freq_tbl *f;
1016
1017 f = qcom_find_freq(rcg->freq_tbl, rate);
1018 if (!f)
1019 return -EINVAL;
1020
1021 /*
1022 * In case clock is disabled, update the M, N and D registers, cache
1023 * the CFG value in parked_cfg and don't hit the update bit of CMD
1024 * register.
1025 */
1026 if (!clk_hw_is_enabled(hw))
1027 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
1028
1029 return clk_rcg2_shared_force_enable_clear(hw, f);
1030 }
1031
clk_rcg2_shared_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)1032 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
1033 unsigned long rate, unsigned long parent_rate, u8 index)
1034 {
1035 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
1036 }
1037
clk_rcg2_shared_enable(struct clk_hw * hw)1038 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1039 {
1040 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1041 int ret;
1042
1043 /*
1044 * Set the update bit because required configuration has already
1045 * been written in clk_rcg2_shared_set_rate()
1046 */
1047 ret = clk_rcg2_set_force_enable(hw);
1048 if (ret)
1049 return ret;
1050
1051 /* Write back the stored configuration corresponding to current rate */
1052 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
1053 if (ret)
1054 return ret;
1055
1056 ret = update_config(rcg);
1057 if (ret)
1058 return ret;
1059
1060 return clk_rcg2_clear_force_enable(hw);
1061 }
1062
clk_rcg2_shared_disable(struct clk_hw * hw)1063 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1064 {
1065 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1066
1067 /*
1068 * Store current configuration as switching to safe source would clear
1069 * the SRC and DIV of CFG register
1070 */
1071 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1072
1073 /*
1074 * Park the RCG at a safe configuration - sourced off of safe source.
1075 * Force enable and disable the RCG while configuring it to safeguard
1076 * against any update signal coming from the downstream clock.
1077 * The current parent is still prepared and enabled at this point, and
1078 * the safe source is always on while application processor subsystem
1079 * is online. Therefore, the RCG can safely switch its parent.
1080 */
1081 clk_rcg2_set_force_enable(hw);
1082
1083 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1084 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1085
1086 update_config(rcg);
1087
1088 clk_rcg2_clear_force_enable(hw);
1089 }
1090
clk_rcg2_shared_get_parent(struct clk_hw * hw)1091 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
1092 {
1093 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1094
1095 /* If the shared rcg is parked use the cached cfg instead */
1096 if (!clk_hw_is_enabled(hw))
1097 return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
1098
1099 return clk_rcg2_get_parent(hw);
1100 }
1101
clk_rcg2_shared_set_parent(struct clk_hw * hw,u8 index)1102 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
1103 {
1104 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1105
1106 /* If the shared rcg is parked only update the cached cfg */
1107 if (!clk_hw_is_enabled(hw)) {
1108 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
1109 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1110
1111 return 0;
1112 }
1113
1114 return clk_rcg2_set_parent(hw, index);
1115 }
1116
1117 static unsigned long
clk_rcg2_shared_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1118 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1119 {
1120 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1121
1122 /* If the shared rcg is parked use the cached cfg instead */
1123 if (!clk_hw_is_enabled(hw))
1124 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
1125
1126 return clk_rcg2_recalc_rate(hw, parent_rate);
1127 }
1128
1129 const struct clk_ops clk_rcg2_shared_ops = {
1130 .enable = clk_rcg2_shared_enable,
1131 .disable = clk_rcg2_shared_disable,
1132 .get_parent = clk_rcg2_shared_get_parent,
1133 .set_parent = clk_rcg2_shared_set_parent,
1134 .recalc_rate = clk_rcg2_shared_recalc_rate,
1135 .determine_rate = clk_rcg2_determine_rate,
1136 .set_rate = clk_rcg2_shared_set_rate,
1137 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1138 };
1139 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1140
1141 /* Common APIs to be used for DFS based RCGR */
clk_rcg2_dfs_populate_freq(struct clk_hw * hw,unsigned int l,struct freq_tbl * f)1142 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1143 struct freq_tbl *f)
1144 {
1145 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1146 struct clk_hw *p;
1147 unsigned long prate = 0;
1148 u32 val, mask, cfg, mode, src;
1149 int i, num_parents;
1150
1151 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1152
1153 mask = BIT(rcg->hid_width) - 1;
1154 f->pre_div = 1;
1155 if (cfg & mask)
1156 f->pre_div = cfg & mask;
1157
1158 src = cfg & CFG_SRC_SEL_MASK;
1159 src >>= CFG_SRC_SEL_SHIFT;
1160
1161 num_parents = clk_hw_get_num_parents(hw);
1162 for (i = 0; i < num_parents; i++) {
1163 if (src == rcg->parent_map[i].cfg) {
1164 f->src = rcg->parent_map[i].src;
1165 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1166 prate = clk_hw_get_rate(p);
1167 }
1168 }
1169
1170 mode = cfg & CFG_MODE_MASK;
1171 mode >>= CFG_MODE_SHIFT;
1172 if (mode) {
1173 mask = BIT(rcg->mnd_width) - 1;
1174 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1175 &val);
1176 val &= mask;
1177 f->m = val;
1178
1179 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1180 &val);
1181 val = ~val;
1182 val &= mask;
1183 val += f->m;
1184 f->n = val;
1185 }
1186
1187 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1188 }
1189
clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 * rcg)1190 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1191 {
1192 struct freq_tbl *freq_tbl;
1193 int i;
1194
1195 /* Allocate space for 1 extra since table is NULL terminated */
1196 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1197 if (!freq_tbl)
1198 return -ENOMEM;
1199 rcg->freq_tbl = freq_tbl;
1200
1201 for (i = 0; i < MAX_PERF_LEVEL; i++)
1202 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1203
1204 return 0;
1205 }
1206
clk_rcg2_dfs_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1207 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1208 struct clk_rate_request *req)
1209 {
1210 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1211 int ret;
1212
1213 if (!rcg->freq_tbl) {
1214 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1215 if (ret) {
1216 pr_err("Failed to update DFS tables for %s\n",
1217 clk_hw_get_name(hw));
1218 return ret;
1219 }
1220 }
1221
1222 return clk_rcg2_determine_rate(hw, req);
1223 }
1224
1225 static unsigned long
clk_rcg2_dfs_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1226 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1227 {
1228 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1229 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1230
1231 regmap_read(rcg->clkr.regmap,
1232 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1233 level &= GENMASK(4, 1);
1234 level >>= 1;
1235
1236 if (rcg->freq_tbl)
1237 return rcg->freq_tbl[level].freq;
1238
1239 /*
1240 * Assume that parent_rate is actually the parent because
1241 * we can't do any better at figuring it out when the table
1242 * hasn't been populated yet. We only populate the table
1243 * in determine_rate because we can't guarantee the parents
1244 * will be registered with the framework until then.
1245 */
1246 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1247 &cfg);
1248
1249 mask = BIT(rcg->hid_width) - 1;
1250 pre_div = 1;
1251 if (cfg & mask)
1252 pre_div = cfg & mask;
1253
1254 mode = cfg & CFG_MODE_MASK;
1255 mode >>= CFG_MODE_SHIFT;
1256 if (mode) {
1257 mask = BIT(rcg->mnd_width) - 1;
1258 regmap_read(rcg->clkr.regmap,
1259 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1260 m &= mask;
1261
1262 regmap_read(rcg->clkr.regmap,
1263 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1264 n = ~n;
1265 n &= mask;
1266 n += m;
1267 }
1268
1269 return calc_rate(parent_rate, m, n, mode, pre_div);
1270 }
1271
1272 static const struct clk_ops clk_rcg2_dfs_ops = {
1273 .is_enabled = clk_rcg2_is_enabled,
1274 .get_parent = clk_rcg2_get_parent,
1275 .determine_rate = clk_rcg2_dfs_determine_rate,
1276 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1277 };
1278
clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data * data,struct regmap * regmap)1279 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1280 struct regmap *regmap)
1281 {
1282 struct clk_rcg2 *rcg = data->rcg;
1283 struct clk_init_data *init = data->init;
1284 u32 val;
1285 int ret;
1286
1287 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1288 if (ret)
1289 return -EINVAL;
1290
1291 if (!(val & SE_CMD_DFS_EN))
1292 return 0;
1293
1294 /*
1295 * Rate changes with consumer writing a register in
1296 * their own I/O region
1297 */
1298 init->flags |= CLK_GET_RATE_NOCACHE;
1299 init->ops = &clk_rcg2_dfs_ops;
1300
1301 rcg->freq_tbl = NULL;
1302
1303 return 0;
1304 }
1305
qcom_cc_register_rcg_dfs(struct regmap * regmap,const struct clk_rcg_dfs_data * rcgs,size_t len)1306 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1307 const struct clk_rcg_dfs_data *rcgs, size_t len)
1308 {
1309 int i, ret;
1310
1311 for (i = 0; i < len; i++) {
1312 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1313 if (ret)
1314 return ret;
1315 }
1316
1317 return 0;
1318 }
1319 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1320
clk_rcg2_dp_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1321 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1322 unsigned long parent_rate)
1323 {
1324 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1325 struct freq_tbl f = { 0 };
1326 u32 mask = BIT(rcg->hid_width) - 1;
1327 u32 hid_div, cfg;
1328 int i, num_parents = clk_hw_get_num_parents(hw);
1329 unsigned long num, den;
1330
1331 rational_best_approximation(parent_rate, rate,
1332 GENMASK(rcg->mnd_width - 1, 0),
1333 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1334
1335 if (!num || !den)
1336 return -EINVAL;
1337
1338 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1339 hid_div = cfg;
1340 cfg &= CFG_SRC_SEL_MASK;
1341 cfg >>= CFG_SRC_SEL_SHIFT;
1342
1343 for (i = 0; i < num_parents; i++) {
1344 if (cfg == rcg->parent_map[i].cfg) {
1345 f.src = rcg->parent_map[i].src;
1346 break;
1347 }
1348 }
1349
1350 f.pre_div = hid_div;
1351 f.pre_div >>= CFG_SRC_DIV_SHIFT;
1352 f.pre_div &= mask;
1353
1354 if (num != den) {
1355 f.m = num;
1356 f.n = den;
1357 } else {
1358 f.m = 0;
1359 f.n = 0;
1360 }
1361
1362 return clk_rcg2_configure(rcg, &f);
1363 }
1364
clk_rcg2_dp_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)1365 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1366 unsigned long rate, unsigned long parent_rate, u8 index)
1367 {
1368 return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1369 }
1370
clk_rcg2_dp_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1371 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1372 struct clk_rate_request *req)
1373 {
1374 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1375 unsigned long num, den;
1376 u64 tmp;
1377
1378 /* Parent rate is a fixed phy link rate */
1379 rational_best_approximation(req->best_parent_rate, req->rate,
1380 GENMASK(rcg->mnd_width - 1, 0),
1381 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1382
1383 if (!num || !den)
1384 return -EINVAL;
1385
1386 tmp = req->best_parent_rate * num;
1387 do_div(tmp, den);
1388 req->rate = tmp;
1389
1390 return 0;
1391 }
1392
1393 const struct clk_ops clk_dp_ops = {
1394 .is_enabled = clk_rcg2_is_enabled,
1395 .get_parent = clk_rcg2_get_parent,
1396 .set_parent = clk_rcg2_set_parent,
1397 .recalc_rate = clk_rcg2_recalc_rate,
1398 .set_rate = clk_rcg2_dp_set_rate,
1399 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1400 .determine_rate = clk_rcg2_dp_determine_rate,
1401 };
1402 EXPORT_SYMBOL_GPL(clk_dp_ops);
1403