1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016 Maxime Ripard
4 *
5 * Maxime Ripard <maxime.ripard@free-electrons.com>
6 */
7
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/device.h>
11 #include <linux/iopoll.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14
15 #include "ccu_common.h"
16 #include "ccu_gate.h"
17 #include "ccu_reset.h"
18
19 struct sunxi_ccu {
20 const struct sunxi_ccu_desc *desc;
21 spinlock_t lock;
22 struct ccu_reset reset;
23 };
24
ccu_helper_wait_for_lock(struct ccu_common * common,u32 lock)25 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
26 {
27 void __iomem *addr;
28 u32 reg;
29
30 if (!lock)
31 return;
32
33 if (common->features & CCU_FEATURE_LOCK_REG)
34 addr = common->base + common->lock_reg;
35 else
36 addr = common->base + common->reg;
37
38 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
39 }
40 EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, SUNXI_CCU);
41
42 /*
43 * This clock notifier is called when the frequency of a PLL clock is
44 * changed. In common PLL designs, changes to the dividers take effect
45 * almost immediately, while changes to the multipliers (implemented
46 * as dividers in the feedback loop) take a few cycles to work into
47 * the feedback loop for the PLL to stablize.
48 *
49 * Sometimes when the PLL clock rate is changed, the decrease in the
50 * divider is too much for the decrease in the multiplier to catch up.
51 * The PLL clock rate will spike, and in some cases, might lock up
52 * completely.
53 *
54 * This notifier callback will gate and then ungate the clock,
55 * effectively resetting it, so it proceeds to work. Care must be
56 * taken to reparent consumers to other temporary clocks during the
57 * rate change, and that this notifier callback must be the first
58 * to be registered.
59 */
ccu_pll_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)60 static int ccu_pll_notifier_cb(struct notifier_block *nb,
61 unsigned long event, void *data)
62 {
63 struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
64 int ret = 0;
65
66 if (event != POST_RATE_CHANGE)
67 goto out;
68
69 ccu_gate_helper_disable(pll->common, pll->enable);
70
71 ret = ccu_gate_helper_enable(pll->common, pll->enable);
72 if (ret)
73 goto out;
74
75 ccu_helper_wait_for_lock(pll->common, pll->lock);
76
77 out:
78 return notifier_from_errno(ret);
79 }
80
ccu_pll_notifier_register(struct ccu_pll_nb * pll_nb)81 int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
82 {
83 pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
84
85 return clk_notifier_register(pll_nb->common->hw.clk,
86 &pll_nb->clk_nb);
87 }
88 EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, SUNXI_CCU);
89
sunxi_ccu_probe(struct sunxi_ccu * ccu,struct device * dev,struct device_node * node,void __iomem * reg,const struct sunxi_ccu_desc * desc)90 static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
91 struct device_node *node, void __iomem *reg,
92 const struct sunxi_ccu_desc *desc)
93 {
94 struct ccu_reset *reset;
95 int i, ret;
96
97 ccu->desc = desc;
98
99 spin_lock_init(&ccu->lock);
100
101 for (i = 0; i < desc->num_ccu_clks; i++) {
102 struct ccu_common *cclk = desc->ccu_clks[i];
103
104 if (!cclk)
105 continue;
106
107 cclk->base = reg;
108 cclk->lock = &ccu->lock;
109 }
110
111 for (i = 0; i < desc->hw_clks->num ; i++) {
112 struct clk_hw *hw = desc->hw_clks->hws[i];
113 const char *name;
114
115 if (!hw)
116 continue;
117
118 name = hw->init->name;
119 if (dev)
120 ret = clk_hw_register(dev, hw);
121 else
122 ret = of_clk_hw_register(node, hw);
123 if (ret) {
124 pr_err("Couldn't register clock %d - %s\n", i, name);
125 goto err_clk_unreg;
126 }
127 }
128
129 ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
130 desc->hw_clks);
131 if (ret)
132 goto err_clk_unreg;
133
134 reset = &ccu->reset;
135 reset->rcdev.of_node = node;
136 reset->rcdev.ops = &ccu_reset_ops;
137 reset->rcdev.owner = dev ? dev->driver->owner : THIS_MODULE;
138 reset->rcdev.nr_resets = desc->num_resets;
139 reset->base = reg;
140 reset->lock = &ccu->lock;
141 reset->reset_map = desc->resets;
142
143 ret = reset_controller_register(&reset->rcdev);
144 if (ret)
145 goto err_del_provider;
146
147 return 0;
148
149 err_del_provider:
150 of_clk_del_provider(node);
151 err_clk_unreg:
152 while (--i >= 0) {
153 struct clk_hw *hw = desc->hw_clks->hws[i];
154
155 if (!hw)
156 continue;
157 clk_hw_unregister(hw);
158 }
159 return ret;
160 }
161
devm_sunxi_ccu_release(struct device * dev,void * res)162 static void devm_sunxi_ccu_release(struct device *dev, void *res)
163 {
164 struct sunxi_ccu *ccu = res;
165 const struct sunxi_ccu_desc *desc = ccu->desc;
166 int i;
167
168 reset_controller_unregister(&ccu->reset.rcdev);
169 of_clk_del_provider(dev->of_node);
170
171 for (i = 0; i < desc->hw_clks->num; i++) {
172 struct clk_hw *hw = desc->hw_clks->hws[i];
173
174 if (!hw)
175 continue;
176 clk_hw_unregister(hw);
177 }
178 }
179
devm_sunxi_ccu_probe(struct device * dev,void __iomem * reg,const struct sunxi_ccu_desc * desc)180 int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
181 const struct sunxi_ccu_desc *desc)
182 {
183 struct sunxi_ccu *ccu;
184 int ret;
185
186 ccu = devres_alloc(devm_sunxi_ccu_release, sizeof(*ccu), GFP_KERNEL);
187 if (!ccu)
188 return -ENOMEM;
189
190 ret = sunxi_ccu_probe(ccu, dev, dev->of_node, reg, desc);
191 if (ret) {
192 devres_free(ccu);
193 return ret;
194 }
195
196 devres_add(dev, ccu);
197
198 return 0;
199 }
200 EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, SUNXI_CCU);
201
of_sunxi_ccu_probe(struct device_node * node,void __iomem * reg,const struct sunxi_ccu_desc * desc)202 void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
203 const struct sunxi_ccu_desc *desc)
204 {
205 struct sunxi_ccu *ccu;
206 int ret;
207
208 ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
209 if (!ccu)
210 return;
211
212 ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc);
213 if (ret) {
214 pr_err("%pOF: probing clocks failed: %d\n", node, ret);
215 kfree(ccu);
216 }
217 }
218
219 MODULE_LICENSE("GPL");
220