1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Marvell PXA family clocks
4  *
5  * Copyright (C) 2014 Robert Jarzmik
6  *
7  * Common clock code for PXA clocks ("CKEN" type clocks + DT)
8  */
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clkdev.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/soc/pxa/smemc.h>
15 
16 #include <dt-bindings/clock/pxa-clock.h>
17 #include "clk-pxa.h"
18 
19 #define KHz 1000
20 #define MHz (1000 * 1000)
21 
22 #define MDREFR_K0DB4	(1 << 29)	/* SDCLK0 Divide by 4 Control/Status */
23 #define MDREFR_K2FREE	(1 << 25)	/* SDRAM Free-Running Control */
24 #define MDREFR_K1FREE	(1 << 24)	/* SDRAM Free-Running Control */
25 #define MDREFR_K0FREE	(1 << 23)	/* SDRAM Free-Running Control */
26 #define MDREFR_SLFRSH	(1 << 22)	/* SDRAM Self-Refresh Control/Status */
27 #define MDREFR_APD	(1 << 20)	/* SDRAM/SSRAM Auto-Power-Down Enable */
28 #define MDREFR_K2DB2	(1 << 19)	/* SDCLK2 Divide by 2 Control/Status */
29 #define MDREFR_K2RUN	(1 << 18)	/* SDCLK2 Run Control/Status */
30 #define MDREFR_K1DB2	(1 << 17)	/* SDCLK1 Divide by 2 Control/Status */
31 #define MDREFR_K1RUN	(1 << 16)	/* SDCLK1 Run Control/Status */
32 #define MDREFR_E1PIN	(1 << 15)	/* SDCKE1 Level Control/Status */
33 #define MDREFR_K0DB2	(1 << 14)	/* SDCLK0 Divide by 2 Control/Status */
34 #define MDREFR_K0RUN	(1 << 13)	/* SDCLK0 Run Control/Status */
35 #define MDREFR_E0PIN	(1 << 12)	/* SDCKE0 Level Control/Status */
36 #define MDREFR_DB2_MASK	(MDREFR_K2DB2 | MDREFR_K1DB2)
37 #define MDREFR_DRI_MASK	0xFFF
38 
39 static DEFINE_SPINLOCK(pxa_clk_lock);
40 
41 static struct clk *pxa_clocks[CLK_MAX];
42 static struct clk_onecell_data onecell_data = {
43 	.clks = pxa_clocks,
44 	.clk_num = CLK_MAX,
45 };
46 
47 struct pxa_clk {
48 	struct clk_hw hw;
49 	struct clk_fixed_factor lp;
50 	struct clk_fixed_factor hp;
51 	struct clk_gate gate;
52 	bool (*is_in_low_power)(void);
53 };
54 
55 #define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
56 
cken_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)57 static unsigned long cken_recalc_rate(struct clk_hw *hw,
58 				      unsigned long parent_rate)
59 {
60 	struct pxa_clk *pclk = to_pxa_clk(hw);
61 	struct clk_fixed_factor *fix;
62 
63 	if (!pclk->is_in_low_power || pclk->is_in_low_power())
64 		fix = &pclk->lp;
65 	else
66 		fix = &pclk->hp;
67 	__clk_hw_set_clk(&fix->hw, hw);
68 	return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
69 }
70 
71 static const struct clk_ops cken_rate_ops = {
72 	.recalc_rate = cken_recalc_rate,
73 };
74 
cken_get_parent(struct clk_hw * hw)75 static u8 cken_get_parent(struct clk_hw *hw)
76 {
77 	struct pxa_clk *pclk = to_pxa_clk(hw);
78 
79 	if (!pclk->is_in_low_power)
80 		return 0;
81 	return pclk->is_in_low_power() ? 0 : 1;
82 }
83 
84 static const struct clk_ops cken_mux_ops = {
85 	.get_parent = cken_get_parent,
86 	.set_parent = dummy_clk_set_parent,
87 };
88 
clkdev_pxa_register(int ckid,const char * con_id,const char * dev_id,struct clk * clk)89 void __init clkdev_pxa_register(int ckid, const char *con_id,
90 				const char *dev_id, struct clk *clk)
91 {
92 	if (!IS_ERR(clk) && (ckid != CLK_NONE))
93 		pxa_clocks[ckid] = clk;
94 	if (!IS_ERR(clk))
95 		clk_register_clkdev(clk, con_id, dev_id);
96 }
97 
clk_pxa_cken_init(const struct desc_clk_cken * clks,int nb_clks,void __iomem * clk_regs)98 int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
99 			     int nb_clks, void __iomem *clk_regs)
100 {
101 	int i;
102 	struct pxa_clk *pxa_clk;
103 	struct clk *clk;
104 
105 	for (i = 0; i < nb_clks; i++) {
106 		pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
107 		pxa_clk->is_in_low_power = clks[i].is_in_low_power;
108 		pxa_clk->lp = clks[i].lp;
109 		pxa_clk->hp = clks[i].hp;
110 		pxa_clk->gate = clks[i].gate;
111 		pxa_clk->gate.reg = clk_regs + clks[i].cken_reg;
112 		pxa_clk->gate.lock = &pxa_clk_lock;
113 		clk = clk_register_composite(NULL, clks[i].name,
114 					     clks[i].parent_names, 2,
115 					     &pxa_clk->hw, &cken_mux_ops,
116 					     &pxa_clk->hw, &cken_rate_ops,
117 					     &pxa_clk->gate.hw, &clk_gate_ops,
118 					     clks[i].flags);
119 		clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
120 				    clks[i].dev_id, clk);
121 	}
122 	return 0;
123 }
124 
clk_pxa_dt_common_init(struct device_node * np)125 void __init clk_pxa_dt_common_init(struct device_node *np)
126 {
127 	of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
128 }
129 
pxa2xx_core_turbo_switch(bool on)130 void pxa2xx_core_turbo_switch(bool on)
131 {
132 	unsigned long flags;
133 	unsigned int unused, clkcfg;
134 
135 	local_irq_save(flags);
136 
137 	asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
138 	clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
139 	if (on)
140 		clkcfg |= CLKCFG_TURBO;
141 	clkcfg |= CLKCFG_FCS;
142 
143 	asm volatile(
144 	"	b	2f\n"
145 	"	.align	5\n"
146 	"1:	mcr	p14, 0, %1, c6, c0, 0\n"
147 	"	b	3f\n"
148 	"2:	b	1b\n"
149 	"3:	nop\n"
150 		: "=&r" (unused) : "r" (clkcfg));
151 
152 	local_irq_restore(flags);
153 }
154 
pxa2xx_cpll_change(struct pxa2xx_freq * freq,u32 (* mdrefr_dri)(unsigned int),void __iomem * cccr)155 void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
156 			u32 (*mdrefr_dri)(unsigned int),
157 			void __iomem *cccr)
158 {
159 	unsigned int clkcfg = freq->clkcfg;
160 	unsigned int unused, preset_mdrefr, postset_mdrefr;
161 	unsigned long flags;
162 	void __iomem *mdrefr = pxa_smemc_get_mdrefr();
163 
164 	local_irq_save(flags);
165 
166 	/* Calculate the next MDREFR.  If we're slowing down the SDRAM clock
167 	 * we need to preset the smaller DRI before the change.	 If we're
168 	 * speeding up we need to set the larger DRI value after the change.
169 	 */
170 	preset_mdrefr = postset_mdrefr = readl(mdrefr);
171 	if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
172 		preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
173 		preset_mdrefr |= mdrefr_dri(freq->membus_khz);
174 	}
175 	postset_mdrefr =
176 		(postset_mdrefr & ~MDREFR_DRI_MASK) |
177 		mdrefr_dri(freq->membus_khz);
178 
179 	/* If we're dividing the memory clock by two for the SDRAM clock, this
180 	 * must be set prior to the change.  Clearing the divide must be done
181 	 * after the change.
182 	 */
183 	if (freq->div2) {
184 		preset_mdrefr  |= MDREFR_DB2_MASK;
185 		postset_mdrefr |= MDREFR_DB2_MASK;
186 	} else {
187 		postset_mdrefr &= ~MDREFR_DB2_MASK;
188 	}
189 
190 	/* Set new the CCCR and prepare CLKCFG */
191 	writel(freq->cccr, cccr);
192 
193 	asm volatile(
194 	"	ldr	r4, [%1]\n"
195 	"	b	2f\n"
196 	"	.align	5\n"
197 	"1:	str	%3, [%1]		/* preset the MDREFR */\n"
198 	"	mcr	p14, 0, %2, c6, c0, 0	/* set CLKCFG[FCS] */\n"
199 	"	str	%4, [%1]		/* postset the MDREFR */\n"
200 	"	b	3f\n"
201 	"2:	b	1b\n"
202 	"3:	nop\n"
203 	     : "=&r" (unused)
204 	     : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
205 	       "r" (postset_mdrefr)
206 	     : "r4", "r5");
207 
208 	local_irq_restore(flags);
209 }
210 
pxa2xx_determine_rate(struct clk_rate_request * req,struct pxa2xx_freq * freqs,int nb_freqs)211 int pxa2xx_determine_rate(struct clk_rate_request *req,
212 			  struct pxa2xx_freq *freqs, int nb_freqs)
213 {
214 	int i, closest_below = -1, closest_above = -1;
215 	unsigned long rate;
216 
217 	for (i = 0; i < nb_freqs; i++) {
218 		rate = freqs[i].cpll;
219 		if (rate == req->rate)
220 			break;
221 		if (rate < req->min_rate)
222 			continue;
223 		if (rate > req->max_rate)
224 			continue;
225 		if (rate <= req->rate)
226 			closest_below = i;
227 		if ((rate >= req->rate) && (closest_above == -1))
228 			closest_above = i;
229 	}
230 
231 	req->best_parent_hw = NULL;
232 
233 	if (i < nb_freqs) {
234 		rate = req->rate;
235 	} else if (closest_below >= 0) {
236 		rate = freqs[closest_below].cpll;
237 	} else if (closest_above >= 0) {
238 		rate = freqs[closest_above].cpll;
239 	} else {
240 		pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
241 		return -EINVAL;
242 	}
243 
244 	pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
245 	req->rate = rate;
246 
247 	return 0;
248 }
249