1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * SCI Clock driver for keystone based devices
4 *
5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
6 * Tero Kristo <t-kristo@ti.com>
7 */
8 #include <linux/clk-provider.h>
9 #include <linux/err.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/soc/ti/ti_sci_protocol.h>
16 #include <linux/bsearch.h>
17 #include <linux/list_sort.h>
18
19 #define SCI_CLK_SSC_ENABLE BIT(0)
20 #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
21 #define SCI_CLK_INPUT_TERMINATION BIT(2)
22
23 /**
24 * struct sci_clk_provider - TI SCI clock provider representation
25 * @sci: Handle to the System Control Interface protocol handler
26 * @ops: Pointer to the SCI ops to be used by the clocks
27 * @dev: Device pointer for the clock provider
28 * @clocks: Clocks array for this device
29 * @num_clocks: Total number of clocks for this provider
30 */
31 struct sci_clk_provider {
32 const struct ti_sci_handle *sci;
33 const struct ti_sci_clk_ops *ops;
34 struct device *dev;
35 struct sci_clk **clocks;
36 int num_clocks;
37 };
38
39 /**
40 * struct sci_clk - TI SCI clock representation
41 * @hw: Hardware clock cookie for common clock framework
42 * @dev_id: Device index
43 * @clk_id: Clock index
44 * @num_parents: Number of parents for this clock
45 * @provider: Master clock provider
46 * @flags: Flags for the clock
47 * @node: Link for handling clocks probed via DT
48 * @cached_req: Cached requested freq for determine rate calls
49 * @cached_res: Cached result freq for determine rate calls
50 */
51 struct sci_clk {
52 struct clk_hw hw;
53 u16 dev_id;
54 u32 clk_id;
55 u32 num_parents;
56 struct sci_clk_provider *provider;
57 u8 flags;
58 struct list_head node;
59 unsigned long cached_req;
60 unsigned long cached_res;
61 };
62
63 #define to_sci_clk(_hw) container_of(_hw, struct sci_clk, hw)
64
65 /**
66 * sci_clk_prepare - Prepare (enable) a TI SCI clock
67 * @hw: clock to prepare
68 *
69 * Prepares a clock to be actively used. Returns the SCI protocol status.
70 */
sci_clk_prepare(struct clk_hw * hw)71 static int sci_clk_prepare(struct clk_hw *hw)
72 {
73 struct sci_clk *clk = to_sci_clk(hw);
74 bool enable_ssc = clk->flags & SCI_CLK_SSC_ENABLE;
75 bool allow_freq_change = clk->flags & SCI_CLK_ALLOW_FREQ_CHANGE;
76 bool input_termination = clk->flags & SCI_CLK_INPUT_TERMINATION;
77
78 return clk->provider->ops->get_clock(clk->provider->sci, clk->dev_id,
79 clk->clk_id, enable_ssc,
80 allow_freq_change,
81 input_termination);
82 }
83
84 /**
85 * sci_clk_unprepare - Un-prepares (disables) a TI SCI clock
86 * @hw: clock to unprepare
87 *
88 * Un-prepares a clock from active state.
89 */
sci_clk_unprepare(struct clk_hw * hw)90 static void sci_clk_unprepare(struct clk_hw *hw)
91 {
92 struct sci_clk *clk = to_sci_clk(hw);
93 int ret;
94
95 ret = clk->provider->ops->put_clock(clk->provider->sci, clk->dev_id,
96 clk->clk_id);
97 if (ret)
98 dev_err(clk->provider->dev,
99 "unprepare failed for dev=%d, clk=%d, ret=%d\n",
100 clk->dev_id, clk->clk_id, ret);
101 }
102
103 /**
104 * sci_clk_is_prepared - Check if a TI SCI clock is prepared or not
105 * @hw: clock to check status for
106 *
107 * Checks if a clock is prepared (enabled) in hardware. Returns non-zero
108 * value if clock is enabled, zero otherwise.
109 */
sci_clk_is_prepared(struct clk_hw * hw)110 static int sci_clk_is_prepared(struct clk_hw *hw)
111 {
112 struct sci_clk *clk = to_sci_clk(hw);
113 bool req_state, current_state;
114 int ret;
115
116 ret = clk->provider->ops->is_on(clk->provider->sci, clk->dev_id,
117 clk->clk_id, &req_state,
118 ¤t_state);
119 if (ret) {
120 dev_err(clk->provider->dev,
121 "is_prepared failed for dev=%d, clk=%d, ret=%d\n",
122 clk->dev_id, clk->clk_id, ret);
123 return 0;
124 }
125
126 return req_state;
127 }
128
129 /**
130 * sci_clk_recalc_rate - Get clock rate for a TI SCI clock
131 * @hw: clock to get rate for
132 * @parent_rate: parent rate provided by common clock framework, not used
133 *
134 * Gets the current clock rate of a TI SCI clock. Returns the current
135 * clock rate, or zero in failure.
136 */
sci_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)137 static unsigned long sci_clk_recalc_rate(struct clk_hw *hw,
138 unsigned long parent_rate)
139 {
140 struct sci_clk *clk = to_sci_clk(hw);
141 u64 freq;
142 int ret;
143
144 ret = clk->provider->ops->get_freq(clk->provider->sci, clk->dev_id,
145 clk->clk_id, &freq);
146 if (ret) {
147 dev_err(clk->provider->dev,
148 "recalc-rate failed for dev=%d, clk=%d, ret=%d\n",
149 clk->dev_id, clk->clk_id, ret);
150 return 0;
151 }
152
153 return freq;
154 }
155
156 /**
157 * sci_clk_determine_rate - Determines a clock rate a clock can be set to
158 * @hw: clock to change rate for
159 * @req: requested rate configuration for the clock
160 *
161 * Determines a suitable clock rate and parent for a TI SCI clock.
162 * The parent handling is un-used, as generally the parent clock rates
163 * are not known by the kernel; instead these are internally handled
164 * by the firmware. Returns 0 on success, negative error value on failure.
165 */
sci_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)166 static int sci_clk_determine_rate(struct clk_hw *hw,
167 struct clk_rate_request *req)
168 {
169 struct sci_clk *clk = to_sci_clk(hw);
170 int ret;
171 u64 new_rate;
172
173 if (clk->cached_req && clk->cached_req == req->rate) {
174 req->rate = clk->cached_res;
175 return 0;
176 }
177
178 ret = clk->provider->ops->get_best_match_freq(clk->provider->sci,
179 clk->dev_id,
180 clk->clk_id,
181 req->min_rate,
182 req->rate,
183 req->max_rate,
184 &new_rate);
185 if (ret) {
186 dev_err(clk->provider->dev,
187 "determine-rate failed for dev=%d, clk=%d, ret=%d\n",
188 clk->dev_id, clk->clk_id, ret);
189 return ret;
190 }
191
192 clk->cached_req = req->rate;
193 clk->cached_res = new_rate;
194
195 req->rate = new_rate;
196
197 return 0;
198 }
199
200 /**
201 * sci_clk_set_rate - Set rate for a TI SCI clock
202 * @hw: clock to change rate for
203 * @rate: target rate for the clock
204 * @parent_rate: rate of the clock parent, not used for TI SCI clocks
205 *
206 * Sets a clock frequency for a TI SCI clock. Returns the TI SCI
207 * protocol status.
208 */
sci_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)209 static int sci_clk_set_rate(struct clk_hw *hw, unsigned long rate,
210 unsigned long parent_rate)
211 {
212 struct sci_clk *clk = to_sci_clk(hw);
213
214 return clk->provider->ops->set_freq(clk->provider->sci, clk->dev_id,
215 clk->clk_id, rate / 10 * 9, rate,
216 rate / 10 * 11);
217 }
218
219 /**
220 * sci_clk_get_parent - Get the current parent of a TI SCI clock
221 * @hw: clock to get parent for
222 *
223 * Returns the index of the currently selected parent for a TI SCI clock.
224 */
sci_clk_get_parent(struct clk_hw * hw)225 static u8 sci_clk_get_parent(struct clk_hw *hw)
226 {
227 struct sci_clk *clk = to_sci_clk(hw);
228 u32 parent_id = 0;
229 int ret;
230
231 ret = clk->provider->ops->get_parent(clk->provider->sci, clk->dev_id,
232 clk->clk_id, (void *)&parent_id);
233 if (ret) {
234 dev_err(clk->provider->dev,
235 "get-parent failed for dev=%d, clk=%d, ret=%d\n",
236 clk->dev_id, clk->clk_id, ret);
237 return 0;
238 }
239
240 parent_id = parent_id - clk->clk_id - 1;
241
242 return (u8)parent_id;
243 }
244
245 /**
246 * sci_clk_set_parent - Set the parent of a TI SCI clock
247 * @hw: clock to set parent for
248 * @index: new parent index for the clock
249 *
250 * Sets the parent of a TI SCI clock. Return TI SCI protocol status.
251 */
sci_clk_set_parent(struct clk_hw * hw,u8 index)252 static int sci_clk_set_parent(struct clk_hw *hw, u8 index)
253 {
254 struct sci_clk *clk = to_sci_clk(hw);
255
256 clk->cached_req = 0;
257
258 return clk->provider->ops->set_parent(clk->provider->sci, clk->dev_id,
259 clk->clk_id,
260 index + 1 + clk->clk_id);
261 }
262
263 static const struct clk_ops sci_clk_ops = {
264 .prepare = sci_clk_prepare,
265 .unprepare = sci_clk_unprepare,
266 .is_prepared = sci_clk_is_prepared,
267 .recalc_rate = sci_clk_recalc_rate,
268 .determine_rate = sci_clk_determine_rate,
269 .set_rate = sci_clk_set_rate,
270 .get_parent = sci_clk_get_parent,
271 .set_parent = sci_clk_set_parent,
272 };
273
274 /**
275 * _sci_clk_get - Gets a handle for an SCI clock
276 * @provider: Handle to SCI clock provider
277 * @sci_clk: Handle to the SCI clock to populate
278 *
279 * Gets a handle to an existing TI SCI hw clock, or builds a new clock
280 * entry and registers it with the common clock framework. Called from
281 * the common clock framework, when a corresponding of_clk_get call is
282 * executed, or recursively from itself when parsing parent clocks.
283 * Returns 0 on success, negative error code on failure.
284 */
_sci_clk_build(struct sci_clk_provider * provider,struct sci_clk * sci_clk)285 static int _sci_clk_build(struct sci_clk_provider *provider,
286 struct sci_clk *sci_clk)
287 {
288 struct clk_init_data init = { NULL };
289 char *name = NULL;
290 char **parent_names = NULL;
291 int i;
292 int ret = 0;
293
294 name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id,
295 sci_clk->clk_id);
296 if (!name)
297 return -ENOMEM;
298
299 init.name = name;
300
301 /*
302 * From kernel point of view, we only care about a clocks parents,
303 * if it has more than 1 possible parent. In this case, it is going
304 * to have mux functionality. Otherwise it is going to act as a root
305 * clock.
306 */
307 if (sci_clk->num_parents < 2)
308 sci_clk->num_parents = 0;
309
310 if (sci_clk->num_parents) {
311 parent_names = kcalloc(sci_clk->num_parents, sizeof(char *),
312 GFP_KERNEL);
313
314 if (!parent_names) {
315 ret = -ENOMEM;
316 goto err;
317 }
318
319 for (i = 0; i < sci_clk->num_parents; i++) {
320 char *parent_name;
321
322 parent_name = kasprintf(GFP_KERNEL, "clk:%d:%d",
323 sci_clk->dev_id,
324 sci_clk->clk_id + 1 + i);
325 if (!parent_name) {
326 ret = -ENOMEM;
327 goto err;
328 }
329 parent_names[i] = parent_name;
330 }
331 init.parent_names = (void *)parent_names;
332 }
333
334 init.ops = &sci_clk_ops;
335 init.num_parents = sci_clk->num_parents;
336 sci_clk->hw.init = &init;
337
338 ret = devm_clk_hw_register(provider->dev, &sci_clk->hw);
339 if (ret)
340 dev_err(provider->dev, "failed clk register with %d\n", ret);
341
342 err:
343 if (parent_names) {
344 for (i = 0; i < sci_clk->num_parents; i++)
345 kfree(parent_names[i]);
346
347 kfree(parent_names);
348 }
349
350 kfree(name);
351
352 return ret;
353 }
354
_cmp_sci_clk(const void * a,const void * b)355 static int _cmp_sci_clk(const void *a, const void *b)
356 {
357 const struct sci_clk *ca = a;
358 const struct sci_clk *cb = *(struct sci_clk **)b;
359
360 if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
361 return 0;
362 if (ca->dev_id > cb->dev_id ||
363 (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
364 return 1;
365 return -1;
366 }
367
368 /**
369 * sci_clk_get - Xlate function for getting clock handles
370 * @clkspec: device tree clock specifier
371 * @data: pointer to the clock provider
372 *
373 * Xlate function for retrieving clock TI SCI hw clock handles based on
374 * device tree clock specifier. Called from the common clock framework,
375 * when a corresponding of_clk_get call is executed. Returns a pointer
376 * to the TI SCI hw clock struct, or ERR_PTR value in failure.
377 */
sci_clk_get(struct of_phandle_args * clkspec,void * data)378 static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
379 {
380 struct sci_clk_provider *provider = data;
381 struct sci_clk **clk;
382 struct sci_clk key;
383
384 if (clkspec->args_count != 2)
385 return ERR_PTR(-EINVAL);
386
387 key.dev_id = clkspec->args[0];
388 key.clk_id = clkspec->args[1];
389
390 clk = bsearch(&key, provider->clocks, provider->num_clocks,
391 sizeof(clk), _cmp_sci_clk);
392
393 if (!clk)
394 return ERR_PTR(-ENODEV);
395
396 return &(*clk)->hw;
397 }
398
ti_sci_init_clocks(struct sci_clk_provider * p)399 static int ti_sci_init_clocks(struct sci_clk_provider *p)
400 {
401 int i;
402 int ret;
403
404 for (i = 0; i < p->num_clocks; i++) {
405 ret = _sci_clk_build(p, p->clocks[i]);
406 if (ret)
407 return ret;
408 }
409
410 return 0;
411 }
412
413 static const struct of_device_id ti_sci_clk_of_match[] = {
414 { .compatible = "ti,k2g-sci-clk" },
415 { /* Sentinel */ },
416 };
417 MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match);
418
419 #ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW
ti_sci_scan_clocks_from_fw(struct sci_clk_provider * provider)420 static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
421 {
422 int ret;
423 int num_clks = 0;
424 struct sci_clk **clks = NULL;
425 struct sci_clk **tmp_clks;
426 struct sci_clk *sci_clk;
427 int max_clks = 0;
428 int clk_id = 0;
429 int dev_id = 0;
430 u32 num_parents = 0;
431 int gap_size = 0;
432 struct device *dev = provider->dev;
433
434 while (1) {
435 ret = provider->ops->get_num_parents(provider->sci, dev_id,
436 clk_id,
437 (void *)&num_parents);
438 if (ret) {
439 gap_size++;
440 if (!clk_id) {
441 if (gap_size >= 5)
442 break;
443 dev_id++;
444 } else {
445 if (gap_size >= 2) {
446 dev_id++;
447 clk_id = 0;
448 gap_size = 0;
449 } else {
450 clk_id++;
451 }
452 }
453 continue;
454 }
455
456 gap_size = 0;
457
458 if (num_clks == max_clks) {
459 tmp_clks = devm_kmalloc_array(dev, max_clks + 64,
460 sizeof(sci_clk),
461 GFP_KERNEL);
462 memcpy(tmp_clks, clks, max_clks * sizeof(sci_clk));
463 if (max_clks)
464 devm_kfree(dev, clks);
465 max_clks += 64;
466 clks = tmp_clks;
467 }
468
469 sci_clk = devm_kzalloc(dev, sizeof(*sci_clk), GFP_KERNEL);
470 if (!sci_clk)
471 return -ENOMEM;
472 sci_clk->dev_id = dev_id;
473 sci_clk->clk_id = clk_id;
474 sci_clk->provider = provider;
475 sci_clk->num_parents = num_parents;
476
477 clks[num_clks] = sci_clk;
478
479 clk_id++;
480 num_clks++;
481 }
482
483 provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
484 GFP_KERNEL);
485 if (!provider->clocks)
486 return -ENOMEM;
487
488 memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
489
490 provider->num_clocks = num_clks;
491
492 devm_kfree(dev, clks);
493
494 return 0;
495 }
496
497 #else
498
_cmp_sci_clk_list(void * priv,const struct list_head * a,const struct list_head * b)499 static int _cmp_sci_clk_list(void *priv, const struct list_head *a,
500 const struct list_head *b)
501 {
502 struct sci_clk *ca = container_of(a, struct sci_clk, node);
503 struct sci_clk *cb = container_of(b, struct sci_clk, node);
504
505 return _cmp_sci_clk(ca, &cb);
506 }
507
ti_sci_scan_clocks_from_dt(struct sci_clk_provider * provider)508 static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
509 {
510 struct device *dev = provider->dev;
511 struct device_node *np = NULL;
512 int ret;
513 int index;
514 struct of_phandle_args args;
515 struct list_head clks;
516 struct sci_clk *sci_clk, *prev;
517 int num_clks = 0;
518 int num_parents;
519 int clk_id;
520 const char * const clk_names[] = {
521 "clocks", "assigned-clocks", "assigned-clock-parents", NULL
522 };
523 const char * const *clk_name;
524
525 INIT_LIST_HEAD(&clks);
526
527 clk_name = clk_names;
528
529 while (*clk_name) {
530 np = of_find_node_with_property(np, *clk_name);
531 if (!np) {
532 clk_name++;
533 continue;
534 }
535
536 if (!of_device_is_available(np))
537 continue;
538
539 index = 0;
540
541 do {
542 ret = of_parse_phandle_with_args(np, *clk_name,
543 "#clock-cells", index,
544 &args);
545 if (ret)
546 break;
547
548 if (args.args_count == 2 && args.np == dev->of_node) {
549 sci_clk = devm_kzalloc(dev, sizeof(*sci_clk),
550 GFP_KERNEL);
551 if (!sci_clk)
552 return -ENOMEM;
553
554 sci_clk->dev_id = args.args[0];
555 sci_clk->clk_id = args.args[1];
556 sci_clk->provider = provider;
557 provider->ops->get_num_parents(provider->sci,
558 sci_clk->dev_id,
559 sci_clk->clk_id,
560 (void *)&sci_clk->num_parents);
561 list_add_tail(&sci_clk->node, &clks);
562
563 num_clks++;
564
565 num_parents = sci_clk->num_parents;
566 if (num_parents == 1)
567 num_parents = 0;
568
569 /*
570 * Linux kernel has inherent limitation
571 * of 255 clock parents at the moment.
572 * Right now, it is not expected that
573 * any mux clock from sci-clk driver
574 * would exceed that limit either, but
575 * the ABI basically provides that
576 * possibility. Print out a warning if
577 * this happens for any clock.
578 */
579 if (num_parents >= 255) {
580 dev_warn(dev, "too many parents for dev=%d, clk=%d (%d), cropping to 255.\n",
581 sci_clk->dev_id,
582 sci_clk->clk_id, num_parents);
583 num_parents = 255;
584 }
585
586 clk_id = args.args[1] + 1;
587
588 while (num_parents--) {
589 sci_clk = devm_kzalloc(dev,
590 sizeof(*sci_clk),
591 GFP_KERNEL);
592 if (!sci_clk)
593 return -ENOMEM;
594 sci_clk->dev_id = args.args[0];
595 sci_clk->clk_id = clk_id++;
596 sci_clk->provider = provider;
597 list_add_tail(&sci_clk->node, &clks);
598
599 num_clks++;
600 }
601 }
602
603 index++;
604 } while (args.np);
605 }
606
607 list_sort(NULL, &clks, _cmp_sci_clk_list);
608
609 provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
610 GFP_KERNEL);
611 if (!provider->clocks)
612 return -ENOMEM;
613
614 num_clks = 0;
615 prev = NULL;
616
617 list_for_each_entry(sci_clk, &clks, node) {
618 if (prev && prev->dev_id == sci_clk->dev_id &&
619 prev->clk_id == sci_clk->clk_id)
620 continue;
621
622 provider->clocks[num_clks++] = sci_clk;
623 prev = sci_clk;
624 }
625
626 provider->num_clocks = num_clks;
627
628 return 0;
629 }
630 #endif
631
632 /**
633 * ti_sci_clk_probe - Probe function for the TI SCI clock driver
634 * @pdev: platform device pointer to be probed
635 *
636 * Probes the TI SCI clock device. Allocates a new clock provider
637 * and registers this to the common clock framework. Also applies
638 * any required flags to the identified clocks via clock lists
639 * supplied from DT. Returns 0 for success, negative error value
640 * for failure.
641 */
ti_sci_clk_probe(struct platform_device * pdev)642 static int ti_sci_clk_probe(struct platform_device *pdev)
643 {
644 struct device *dev = &pdev->dev;
645 struct device_node *np = dev->of_node;
646 struct sci_clk_provider *provider;
647 const struct ti_sci_handle *handle;
648 int ret;
649
650 handle = devm_ti_sci_get_handle(dev);
651 if (IS_ERR(handle))
652 return PTR_ERR(handle);
653
654 provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
655 if (!provider)
656 return -ENOMEM;
657
658 provider->sci = handle;
659 provider->ops = &handle->ops.clk_ops;
660 provider->dev = dev;
661
662 #ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW
663 ret = ti_sci_scan_clocks_from_fw(provider);
664 if (ret) {
665 dev_err(dev, "scan clocks from FW failed: %d\n", ret);
666 return ret;
667 }
668 #else
669 ret = ti_sci_scan_clocks_from_dt(provider);
670 if (ret) {
671 dev_err(dev, "scan clocks from DT failed: %d\n", ret);
672 return ret;
673 }
674 #endif
675
676 ret = ti_sci_init_clocks(provider);
677 if (ret) {
678 pr_err("ti-sci-init-clocks failed.\n");
679 return ret;
680 }
681
682 return of_clk_add_hw_provider(np, sci_clk_get, provider);
683 }
684
685 /**
686 * ti_sci_clk_remove - Remove TI SCI clock device
687 * @pdev: platform device pointer for the device to be removed
688 *
689 * Removes the TI SCI device. Unregisters the clock provider registered
690 * via common clock framework. Any memory allocated for the device will
691 * be free'd silently via the devm framework. Returns 0 always.
692 */
ti_sci_clk_remove(struct platform_device * pdev)693 static void ti_sci_clk_remove(struct platform_device *pdev)
694 {
695 of_clk_del_provider(pdev->dev.of_node);
696 }
697
698 static struct platform_driver ti_sci_clk_driver = {
699 .probe = ti_sci_clk_probe,
700 .remove_new = ti_sci_clk_remove,
701 .driver = {
702 .name = "ti-sci-clk",
703 .of_match_table = of_match_ptr(ti_sci_clk_of_match),
704 },
705 };
706 module_platform_driver(ti_sci_clk_driver);
707
708 MODULE_LICENSE("GPL v2");
709 MODULE_DESCRIPTION("TI System Control Interface(SCI) Clock driver");
710 MODULE_AUTHOR("Tero Kristo");
711 MODULE_ALIAS("platform:ti-sci-clk");
712