1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 SiFive, Inc.
4 * Copyright (C) 2020 Zong Li
5 */
6
7 #include <linux/clkdev.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/of_device.h>
11 #include "sifive-prci.h"
12 #include "fu540-prci.h"
13 #include "fu740-prci.h"
14
15 /*
16 * Private functions
17 */
18
19 /**
20 * __prci_readl() - read from a PRCI register
21 * @pd: PRCI context
22 * @offs: register offset to read from (in bytes, from PRCI base address)
23 *
24 * Read the register located at offset @offs from the base virtual
25 * address of the PRCI register target described by @pd, and return
26 * the value to the caller.
27 *
28 * Context: Any context.
29 *
30 * Return: the contents of the register described by @pd and @offs.
31 */
__prci_readl(struct __prci_data * pd,u32 offs)32 static u32 __prci_readl(struct __prci_data *pd, u32 offs)
33 {
34 return readl_relaxed(pd->va + offs);
35 }
36
__prci_writel(u32 v,u32 offs,struct __prci_data * pd)37 static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
38 {
39 writel_relaxed(v, pd->va + offs);
40 }
41
42 /* WRPLL-related private functions */
43
44 /**
45 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
46 * @c: ptr to a struct wrpll_cfg record to write config into
47 * @r: value read from the PRCI PLL configuration register
48 *
49 * Given a value @r read from an FU740 PRCI PLL configuration register,
50 * split it into fields and populate it into the WRPLL configuration record
51 * pointed to by @c.
52 *
53 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
54 * have the same register layout.
55 *
56 * Context: Any context.
57 */
__prci_wrpll_unpack(struct wrpll_cfg * c,u32 r)58 static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
59 {
60 u32 v;
61
62 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
63 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
64 c->divr = v;
65
66 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
67 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
68 c->divf = v;
69
70 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
71 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
72 c->divq = v;
73
74 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
75 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
76 c->range = v;
77
78 c->flags &=
79 (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
80
81 /* external feedback mode not supported */
82 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
83 }
84
85 /**
86 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
87 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
88 *
89 * Using a set of WRPLL configuration values pointed to by @c,
90 * assemble a PRCI PLL configuration register value, and return it to
91 * the caller.
92 *
93 * Context: Any context. Caller must ensure that the contents of the
94 * record pointed to by @c do not change during the execution
95 * of this function.
96 *
97 * Returns: a value suitable for writing into a PRCI PLL configuration
98 * register
99 */
__prci_wrpll_pack(const struct wrpll_cfg * c)100 static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
101 {
102 u32 r = 0;
103
104 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
105 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
106 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
107 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
108
109 /* external feedback mode not supported */
110 r |= PRCI_COREPLLCFG0_FSE_MASK;
111
112 return r;
113 }
114
115 /**
116 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
117 * @pd: PRCI context
118 * @pwd: PRCI WRPLL metadata
119 *
120 * Read the current configuration of the PLL identified by @pwd from
121 * the PRCI identified by @pd, and store it into the local configuration
122 * cache in @pwd.
123 *
124 * Context: Any context. Caller must prevent the records pointed to by
125 * @pd and @pwd from changing during execution.
126 */
__prci_wrpll_read_cfg0(struct __prci_data * pd,struct __prci_wrpll_data * pwd)127 static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
128 struct __prci_wrpll_data *pwd)
129 {
130 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
131 }
132
133 /**
134 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
135 * @pd: PRCI context
136 * @pwd: PRCI WRPLL metadata
137 * @c: WRPLL configuration record to write
138 *
139 * Write the WRPLL configuration described by @c into the WRPLL
140 * configuration register identified by @pwd in the PRCI instance
141 * described by @c. Make a cached copy of the WRPLL's current
142 * configuration so it can be used by other code.
143 *
144 * Context: Any context. Caller must prevent the records pointed to by
145 * @pd and @pwd from changing during execution.
146 */
__prci_wrpll_write_cfg0(struct __prci_data * pd,struct __prci_wrpll_data * pwd,struct wrpll_cfg * c)147 static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
148 struct __prci_wrpll_data *pwd,
149 struct wrpll_cfg *c)
150 {
151 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
152
153 memcpy(&pwd->c, c, sizeof(*c));
154 }
155
156 /**
157 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
158 * into the PRCI
159 * @pd: PRCI context
160 * @pwd: PRCI WRPLL metadata
161 * @enable: Clock enable or disable value
162 */
__prci_wrpll_write_cfg1(struct __prci_data * pd,struct __prci_wrpll_data * pwd,u32 enable)163 static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
164 struct __prci_wrpll_data *pwd,
165 u32 enable)
166 {
167 __prci_writel(enable, pwd->cfg1_offs, pd);
168 }
169
170 /*
171 * Linux clock framework integration
172 *
173 * See the Linux clock framework documentation for more information on
174 * these functions.
175 */
176
sifive_prci_wrpll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)177 unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
178 unsigned long parent_rate)
179 {
180 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
181 struct __prci_wrpll_data *pwd = pc->pwd;
182
183 return wrpll_calc_output_rate(&pwd->c, parent_rate);
184 }
185
sifive_prci_wrpll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)186 long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
187 unsigned long rate,
188 unsigned long *parent_rate)
189 {
190 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
191 struct __prci_wrpll_data *pwd = pc->pwd;
192 struct wrpll_cfg c;
193
194 memcpy(&c, &pwd->c, sizeof(c));
195
196 wrpll_configure_for_rate(&c, rate, *parent_rate);
197
198 return wrpll_calc_output_rate(&c, *parent_rate);
199 }
200
sifive_prci_wrpll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)201 int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
202 unsigned long rate, unsigned long parent_rate)
203 {
204 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
205 struct __prci_wrpll_data *pwd = pc->pwd;
206 struct __prci_data *pd = pc->pd;
207 int r;
208
209 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
210 if (r)
211 return r;
212
213 if (pwd->enable_bypass)
214 pwd->enable_bypass(pd);
215
216 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
217
218 udelay(wrpll_calc_max_lock_us(&pwd->c));
219
220 return 0;
221 }
222
sifive_clk_is_enabled(struct clk_hw * hw)223 int sifive_clk_is_enabled(struct clk_hw *hw)
224 {
225 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
226 struct __prci_wrpll_data *pwd = pc->pwd;
227 struct __prci_data *pd = pc->pd;
228 u32 r;
229
230 r = __prci_readl(pd, pwd->cfg1_offs);
231
232 if (r & PRCI_COREPLLCFG1_CKE_MASK)
233 return 1;
234 else
235 return 0;
236 }
237
sifive_prci_clock_enable(struct clk_hw * hw)238 int sifive_prci_clock_enable(struct clk_hw *hw)
239 {
240 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
241 struct __prci_wrpll_data *pwd = pc->pwd;
242 struct __prci_data *pd = pc->pd;
243
244 if (sifive_clk_is_enabled(hw))
245 return 0;
246
247 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
248
249 if (pwd->disable_bypass)
250 pwd->disable_bypass(pd);
251
252 return 0;
253 }
254
sifive_prci_clock_disable(struct clk_hw * hw)255 void sifive_prci_clock_disable(struct clk_hw *hw)
256 {
257 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
258 struct __prci_wrpll_data *pwd = pc->pwd;
259 struct __prci_data *pd = pc->pd;
260 u32 r;
261
262 if (pwd->enable_bypass)
263 pwd->enable_bypass(pd);
264
265 r = __prci_readl(pd, pwd->cfg1_offs);
266 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
267
268 __prci_wrpll_write_cfg1(pd, pwd, r);
269 }
270
271 /* TLCLKSEL clock integration */
272
sifive_prci_tlclksel_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)273 unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
274 unsigned long parent_rate)
275 {
276 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
277 struct __prci_data *pd = pc->pd;
278 u32 v;
279 u8 div;
280
281 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
282 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
283 div = v ? 1 : 2;
284
285 return div_u64(parent_rate, div);
286 }
287
288 /* HFPCLK clock integration */
289
sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)290 unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
291 unsigned long parent_rate)
292 {
293 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
294 struct __prci_data *pd = pc->pd;
295 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
296
297 return div_u64(parent_rate, div + 2);
298 }
299
300 /*
301 * Core clock mux control
302 */
303
304 /**
305 * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
306 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
307 *
308 * Switch the CORECLK mux to the HFCLK input source; return once complete.
309 *
310 * Context: Any context. Caller must prevent concurrent changes to the
311 * PRCI_CORECLKSEL_OFFSET register.
312 */
sifive_prci_coreclksel_use_hfclk(struct __prci_data * pd)313 void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
314 {
315 u32 r;
316
317 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
318 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
319 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
320
321 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
322 }
323
324 /**
325 * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output
326 * COREPLL
327 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
328 *
329 * Switch the CORECLK mux to the COREPLL output clock; return once complete.
330 *
331 * Context: Any context. Caller must prevent concurrent changes to the
332 * PRCI_CORECLKSEL_OFFSET register.
333 */
sifive_prci_coreclksel_use_corepll(struct __prci_data * pd)334 void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
335 {
336 u32 r;
337
338 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
339 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
340 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
341
342 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
343 }
344
345 /**
346 * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
347 * FINAL_COREPLL
348 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
349 *
350 * Switch the CORECLK mux to the final COREPLL output clock; return once
351 * complete.
352 *
353 * Context: Any context. Caller must prevent concurrent changes to the
354 * PRCI_CORECLKSEL_OFFSET register.
355 */
sifive_prci_coreclksel_use_final_corepll(struct __prci_data * pd)356 void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
357 {
358 u32 r;
359
360 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
361 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
362 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
363
364 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
365 }
366
367 /**
368 * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
369 * output DVFS_COREPLL
370 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
371 *
372 * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
373 *
374 * Context: Any context. Caller must prevent concurrent changes to the
375 * PRCI_COREPLLSEL_OFFSET register.
376 */
sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data * pd)377 void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
378 {
379 u32 r;
380
381 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
382 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
383 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
384
385 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
386 }
387
388 /**
389 * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
390 * output COREPLL
391 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
392 *
393 * Switch the COREPLL mux to the COREPLL output clock; return once complete.
394 *
395 * Context: Any context. Caller must prevent concurrent changes to the
396 * PRCI_COREPLLSEL_OFFSET register.
397 */
sifive_prci_corepllsel_use_corepll(struct __prci_data * pd)398 void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
399 {
400 u32 r;
401
402 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
403 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
404 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
405
406 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
407 }
408
409 /**
410 * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
411 * output HFCLK
412 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
413 *
414 * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
415 *
416 * Context: Any context. Caller must prevent concurrent changes to the
417 * PRCI_HFPCLKPLLSEL_OFFSET register.
418 */
sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data * pd)419 void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
420 {
421 u32 r;
422
423 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
424 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
425 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
426
427 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
428 }
429
430 /**
431 * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
432 * output HFPCLKPLL
433 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
434 *
435 * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
436 *
437 * Context: Any context. Caller must prevent concurrent changes to the
438 * PRCI_HFPCLKPLLSEL_OFFSET register.
439 */
sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data * pd)440 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
441 {
442 u32 r;
443
444 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
445 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
446 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
447
448 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
449 }
450
451 /* PCIE AUX clock APIs for enable, disable. */
sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw * hw)452 int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw)
453 {
454 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
455 struct __prci_data *pd = pc->pd;
456 u32 r;
457
458 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
459
460 if (r & PRCI_PCIE_AUX_EN_MASK)
461 return 1;
462 else
463 return 0;
464 }
465
sifive_prci_pcie_aux_clock_enable(struct clk_hw * hw)466 int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw)
467 {
468 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
469 struct __prci_data *pd = pc->pd;
470 u32 r __maybe_unused;
471
472 if (sifive_prci_pcie_aux_clock_is_enabled(hw))
473 return 0;
474
475 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
476 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
477
478 return 0;
479 }
480
sifive_prci_pcie_aux_clock_disable(struct clk_hw * hw)481 void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw)
482 {
483 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
484 struct __prci_data *pd = pc->pd;
485 u32 r __maybe_unused;
486
487 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
488 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
489
490 }
491
492 /**
493 * __prci_register_clocks() - register clock controls in the PRCI
494 * @dev: Linux struct device
495 * @pd: The pointer for PRCI per-device instance data
496 * @desc: The pointer for the information of clocks of each SoCs
497 *
498 * Register the list of clock controls described in __prci_init_clocks[] with
499 * the Linux clock framework.
500 *
501 * Return: 0 upon success or a negative error code upon failure.
502 */
__prci_register_clocks(struct device * dev,struct __prci_data * pd,const struct prci_clk_desc * desc)503 static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
504 const struct prci_clk_desc *desc)
505 {
506 struct clk_init_data init = { };
507 struct __prci_clock *pic;
508 int parent_count, i, r;
509
510 parent_count = of_clk_get_parent_count(dev->of_node);
511 if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
512 dev_err(dev, "expected only two parent clocks, found %d\n",
513 parent_count);
514 return -EINVAL;
515 }
516
517 /* Register PLLs */
518 for (i = 0; i < desc->num_clks; ++i) {
519 pic = &(desc->clks[i]);
520
521 init.name = pic->name;
522 init.parent_names = &pic->parent_name;
523 init.num_parents = 1;
524 init.ops = pic->ops;
525 pic->hw.init = &init;
526
527 pic->pd = pd;
528
529 if (pic->pwd)
530 __prci_wrpll_read_cfg0(pd, pic->pwd);
531
532 r = devm_clk_hw_register(dev, &pic->hw);
533 if (r) {
534 dev_warn(dev, "Failed to register clock %s: %d\n",
535 init.name, r);
536 return r;
537 }
538
539 r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
540 if (r) {
541 dev_warn(dev, "Failed to register clkdev for %s: %d\n",
542 init.name, r);
543 return r;
544 }
545
546 pd->hw_clks.hws[i] = &pic->hw;
547 }
548
549 pd->hw_clks.num = i;
550
551 r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
552 &pd->hw_clks);
553 if (r) {
554 dev_err(dev, "could not add hw_provider: %d\n", r);
555 return r;
556 }
557
558 return 0;
559 }
560
561 /**
562 * sifive_prci_probe() - initialize prci data and check parent count
563 * @pdev: platform device pointer for the prci
564 *
565 * Return: 0 upon success or a negative error code upon failure.
566 */
sifive_prci_probe(struct platform_device * pdev)567 static int sifive_prci_probe(struct platform_device *pdev)
568 {
569 struct device *dev = &pdev->dev;
570 struct resource *res;
571 struct __prci_data *pd;
572 const struct prci_clk_desc *desc;
573 int r;
574
575 desc = of_device_get_match_data(&pdev->dev);
576
577 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
578 if (!pd)
579 return -ENOMEM;
580
581 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
582 pd->va = devm_ioremap_resource(dev, res);
583 if (IS_ERR(pd->va))
584 return PTR_ERR(pd->va);
585
586 pd->reset.rcdev.owner = THIS_MODULE;
587 pd->reset.rcdev.nr_resets = PRCI_RST_NR;
588 pd->reset.rcdev.ops = &reset_simple_ops;
589 pd->reset.rcdev.of_node = pdev->dev.of_node;
590 pd->reset.active_low = true;
591 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
592 spin_lock_init(&pd->reset.lock);
593
594 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
595 if (r) {
596 dev_err(dev, "could not register reset controller: %d\n", r);
597 return r;
598 }
599 r = __prci_register_clocks(dev, pd, desc);
600 if (r) {
601 dev_err(dev, "could not register clocks: %d\n", r);
602 return r;
603 }
604
605 dev_dbg(dev, "SiFive PRCI probed\n");
606
607 return 0;
608 }
609
610 static const struct of_device_id sifive_prci_of_match[] = {
611 {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
612 {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
613 {}
614 };
615
616 static struct platform_driver sifive_prci_driver = {
617 .driver = {
618 .name = "sifive-clk-prci",
619 .of_match_table = sifive_prci_of_match,
620 },
621 .probe = sifive_prci_probe,
622 };
623
sifive_prci_init(void)624 static int __init sifive_prci_init(void)
625 {
626 return platform_driver_register(&sifive_prci_driver);
627 }
628 core_initcall(sifive_prci_init);
629