1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk-provider.h>
7 #include <linux/delay.h>
8 
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm_8960.xml.h"
12 
13 /*
14  * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
15  *
16  *
17  *                        +------+
18  *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
19  *  F * byte_clk    |     +------+
20  *                  | bit clock divider (F / 8)
21  *                  |
22  *                  |     +------+
23  *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
24  *                  |     +------+                 | (sets parent rate)
25  *                  | byte clock divider (F)       |
26  *                  |                              |
27  *                  |                              o---> To esc RCG
28  *                  |                                (doesn't set parent rate)
29  *                  |
30  *                  |     +------+
31  *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
32  *                        +------+                 | (sets parent rate)
33  *                  dsi clock divider (F * magic)  |
34  *                                                 |
35  *                                                 o---> To pixel rcg
36  *                                                  (doesn't set parent rate)
37  */
38 
39 #define POLL_MAX_READS		8000
40 #define POLL_TIMEOUT_US		1
41 
42 #define VCO_REF_CLK_RATE	27000000
43 #define VCO_MIN_RATE		600000000
44 #define VCO_MAX_RATE		1200000000
45 
46 #define VCO_PREF_DIV_RATIO	27
47 
48 struct pll_28nm_cached_state {
49 	unsigned long vco_rate;
50 	u8 postdiv3;
51 	u8 postdiv2;
52 	u8 postdiv1;
53 };
54 
55 struct clk_bytediv {
56 	struct clk_hw hw;
57 	void __iomem *reg;
58 };
59 
60 struct dsi_pll_28nm {
61 	struct clk_hw clk_hw;
62 
63 	struct msm_dsi_phy *phy;
64 
65 	struct pll_28nm_cached_state cached_state;
66 };
67 
68 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
69 
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,int nb_tries,int timeout_us)70 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
71 				    int nb_tries, int timeout_us)
72 {
73 	bool pll_locked = false;
74 	u32 val;
75 
76 	while (nb_tries--) {
77 		val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
78 		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
79 
80 		if (pll_locked)
81 			break;
82 
83 		udelay(timeout_us);
84 	}
85 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
86 
87 	return pll_locked;
88 }
89 
90 /*
91  * Clock Callbacks
92  */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)93 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
94 				     unsigned long parent_rate)
95 {
96 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
97 	void __iomem *base = pll_28nm->phy->pll_base;
98 	u32 val, temp, fb_divider;
99 
100 	DBG("rate=%lu, parent's=%lu", rate, parent_rate);
101 
102 	temp = rate / 10;
103 	val = VCO_REF_CLK_RATE / 10;
104 	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
105 	fb_divider = fb_divider / 2 - 1;
106 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
107 			fb_divider & 0xff);
108 
109 	val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
110 
111 	val |= (fb_divider >> 8) & 0x07;
112 
113 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
114 			val);
115 
116 	val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
117 
118 	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
119 
120 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
121 			val);
122 
123 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
124 			0xf);
125 
126 	val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
127 	val |= 0x7 << 4;
128 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
129 			val);
130 
131 	return 0;
132 }
133 
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)134 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
135 {
136 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
137 
138 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
139 					POLL_TIMEOUT_US);
140 }
141 
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)142 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
143 						  unsigned long parent_rate)
144 {
145 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
146 	void __iomem *base = pll_28nm->phy->pll_base;
147 	unsigned long vco_rate;
148 	u32 status, fb_divider, temp, ref_divider;
149 
150 	VERB("parent_rate=%lu", parent_rate);
151 
152 	status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
153 
154 	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
155 		fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
156 		fb_divider &= 0xff;
157 		temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
158 		fb_divider = (temp << 8) | fb_divider;
159 		fb_divider += 1;
160 
161 		ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
162 		ref_divider &= 0x3f;
163 		ref_divider += 1;
164 
165 		/* multiply by 2 */
166 		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
167 	} else {
168 		vco_rate = 0;
169 	}
170 
171 	DBG("returning vco rate = %lu", vco_rate);
172 
173 	return vco_rate;
174 }
175 
dsi_pll_28nm_vco_prepare(struct clk_hw * hw)176 static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
177 {
178 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
179 	struct device *dev = &pll_28nm->phy->pdev->dev;
180 	void __iomem *base = pll_28nm->phy->pll_base;
181 	bool locked;
182 	unsigned int bit_div, byte_div;
183 	int max_reads = 1000, timeout_us = 100;
184 	u32 val;
185 
186 	DBG("id=%d", pll_28nm->phy->id);
187 
188 	if (unlikely(pll_28nm->phy->pll_on))
189 		return 0;
190 
191 	/*
192 	 * before enabling the PLL, configure the bit clock divider since we
193 	 * don't expose it as a clock to the outside world
194 	 * 1: read back the byte clock divider that should already be set
195 	 * 2: divide by 8 to get bit clock divider
196 	 * 3: write it to POSTDIV1
197 	 */
198 	val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
199 	byte_div = val + 1;
200 	bit_div = byte_div / 8;
201 
202 	val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
203 	val &= ~0xf;
204 	val |= (bit_div - 1);
205 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
206 
207 	/* enable the PLL */
208 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
209 			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
210 
211 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
212 
213 	if (unlikely(!locked)) {
214 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
215 		return -EINVAL;
216 	}
217 
218 	DBG("DSI PLL lock success");
219 	pll_28nm->phy->pll_on = true;
220 
221 	return 0;
222 }
223 
dsi_pll_28nm_vco_unprepare(struct clk_hw * hw)224 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
225 {
226 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
227 
228 	DBG("id=%d", pll_28nm->phy->id);
229 
230 	if (unlikely(!pll_28nm->phy->pll_on))
231 		return;
232 
233 	dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
234 
235 	pll_28nm->phy->pll_on = false;
236 }
237 
dsi_pll_28nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)238 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
239 		unsigned long rate, unsigned long *parent_rate)
240 {
241 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
242 
243 	if      (rate < pll_28nm->phy->cfg->min_pll_rate)
244 		return  pll_28nm->phy->cfg->min_pll_rate;
245 	else if (rate > pll_28nm->phy->cfg->max_pll_rate)
246 		return  pll_28nm->phy->cfg->max_pll_rate;
247 	else
248 		return rate;
249 }
250 
251 static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
252 	.round_rate = dsi_pll_28nm_clk_round_rate,
253 	.set_rate = dsi_pll_28nm_clk_set_rate,
254 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
255 	.prepare = dsi_pll_28nm_vco_prepare,
256 	.unprepare = dsi_pll_28nm_vco_unprepare,
257 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
258 };
259 
260 /*
261  * Custom byte clock divier clk_ops
262  *
263  * This clock is the entry point to configuring the PLL. The user (dsi host)
264  * will set this clock's rate to the desired byte clock rate. The VCO lock
265  * frequency is a multiple of the byte clock rate. The multiplication factor
266  * (shown as F in the diagram above) is a function of the byte clock rate.
267  *
268  * This custom divider clock ensures that its parent (VCO) is set to the
269  * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
270  * accordingly
271  */
272 #define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
273 
clk_bytediv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)274 static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
275 		unsigned long parent_rate)
276 {
277 	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
278 	unsigned int div;
279 
280 	div = dsi_phy_read(bytediv->reg) & 0xff;
281 
282 	return parent_rate / (div + 1);
283 }
284 
285 /* find multiplication factor(wrt byte clock) at which the VCO should be set */
get_vco_mul_factor(unsigned long byte_clk_rate)286 static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
287 {
288 	unsigned long bit_mhz;
289 
290 	/* convert to bit clock in Mhz */
291 	bit_mhz = (byte_clk_rate * 8) / 1000000;
292 
293 	if (bit_mhz < 125)
294 		return 64;
295 	else if (bit_mhz < 250)
296 		return 32;
297 	else if (bit_mhz < 600)
298 		return 16;
299 	else
300 		return 8;
301 }
302 
clk_bytediv_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)303 static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
304 				   unsigned long *prate)
305 {
306 	unsigned long best_parent;
307 	unsigned int factor;
308 
309 	factor = get_vco_mul_factor(rate);
310 
311 	best_parent = rate * factor;
312 	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
313 
314 	return *prate / factor;
315 }
316 
clk_bytediv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)317 static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
318 				unsigned long parent_rate)
319 {
320 	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
321 	u32 val;
322 	unsigned int factor;
323 
324 	factor = get_vco_mul_factor(rate);
325 
326 	val = dsi_phy_read(bytediv->reg);
327 	val |= (factor - 1) & 0xff;
328 	dsi_phy_write(bytediv->reg, val);
329 
330 	return 0;
331 }
332 
333 /* Our special byte clock divider ops */
334 static const struct clk_ops clk_bytediv_ops = {
335 	.round_rate = clk_bytediv_round_rate,
336 	.set_rate = clk_bytediv_set_rate,
337 	.recalc_rate = clk_bytediv_recalc_rate,
338 };
339 
340 /*
341  * PLL Callbacks
342  */
dsi_28nm_pll_save_state(struct msm_dsi_phy * phy)343 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
344 {
345 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
346 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
347 	void __iomem *base = pll_28nm->phy->pll_base;
348 
349 	cached_state->postdiv3 =
350 			dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
351 	cached_state->postdiv2 =
352 			dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
353 	cached_state->postdiv1 =
354 			dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
355 
356 	cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
357 }
358 
dsi_28nm_pll_restore_state(struct msm_dsi_phy * phy)359 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
360 {
361 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
362 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
363 	void __iomem *base = pll_28nm->phy->pll_base;
364 	int ret;
365 
366 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
367 					cached_state->vco_rate, 0);
368 	if (ret) {
369 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
370 			"restore vco rate failed. ret=%d\n", ret);
371 		return ret;
372 	}
373 
374 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
375 			cached_state->postdiv3);
376 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
377 			cached_state->postdiv2);
378 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
379 			cached_state->postdiv1);
380 
381 	return 0;
382 }
383 
pll_28nm_register(struct dsi_pll_28nm * pll_28nm,struct clk_hw ** provided_clocks)384 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
385 {
386 	char *clk_name, *parent_name, *vco_name;
387 	struct clk_init_data vco_init = {
388 		.parent_data = &(const struct clk_parent_data) {
389 			.fw_name = "ref",
390 		},
391 		.num_parents = 1,
392 		.flags = CLK_IGNORE_UNUSED,
393 		.ops = &clk_ops_dsi_pll_28nm_vco,
394 	};
395 	struct device *dev = &pll_28nm->phy->pdev->dev;
396 	struct clk_hw *hw;
397 	struct clk_bytediv *bytediv;
398 	struct clk_init_data bytediv_init = { };
399 	int ret;
400 
401 	DBG("%d", pll_28nm->phy->id);
402 
403 	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
404 	if (!bytediv)
405 		return -ENOMEM;
406 
407 	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
408 	if (!vco_name)
409 		return -ENOMEM;
410 
411 	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
412 	if (!parent_name)
413 		return -ENOMEM;
414 
415 	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
416 	if (!clk_name)
417 		return -ENOMEM;
418 
419 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
420 	vco_init.name = vco_name;
421 
422 	pll_28nm->clk_hw.init = &vco_init;
423 
424 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
425 	if (ret)
426 		return ret;
427 
428 	/* prepare and register bytediv */
429 	bytediv->hw.init = &bytediv_init;
430 	bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
431 
432 	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
433 	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
434 
435 	bytediv_init.name = clk_name;
436 	bytediv_init.ops = &clk_bytediv_ops;
437 	bytediv_init.flags = CLK_SET_RATE_PARENT;
438 	bytediv_init.parent_names = (const char * const *) &parent_name;
439 	bytediv_init.num_parents = 1;
440 
441 	/* DIV2 */
442 	ret = devm_clk_hw_register(dev, &bytediv->hw);
443 	if (ret)
444 		return ret;
445 	provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
446 
447 	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
448 	/* DIV3 */
449 	hw = devm_clk_hw_register_divider(dev, clk_name,
450 				parent_name, 0, pll_28nm->phy->pll_base +
451 				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
452 				0, 8, 0, NULL);
453 	if (IS_ERR(hw))
454 		return PTR_ERR(hw);
455 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
456 
457 	return 0;
458 }
459 
dsi_pll_28nm_8960_init(struct msm_dsi_phy * phy)460 static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
461 {
462 	struct platform_device *pdev = phy->pdev;
463 	struct dsi_pll_28nm *pll_28nm;
464 	int ret;
465 
466 	if (!pdev)
467 		return -ENODEV;
468 
469 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
470 	if (!pll_28nm)
471 		return -ENOMEM;
472 
473 	pll_28nm->phy = phy;
474 
475 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
476 	if (ret) {
477 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
478 		return ret;
479 	}
480 
481 	phy->vco_hw = &pll_28nm->clk_hw;
482 
483 	return 0;
484 }
485 
dsi_28nm_dphy_set_timing(struct msm_dsi_phy * phy,struct msm_dsi_dphy_timing * timing)486 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
487 		struct msm_dsi_dphy_timing *timing)
488 {
489 	void __iomem *base = phy->base;
490 
491 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
492 		DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
493 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
494 		DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
495 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
496 		DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
497 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
498 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
499 		DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
500 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
501 		DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
502 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
503 		DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
504 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
505 		DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
506 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
507 		DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
508 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
509 		DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
510 		DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
511 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
512 		DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
513 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
514 		DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
515 }
516 
dsi_28nm_phy_regulator_init(struct msm_dsi_phy * phy)517 static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
518 {
519 	void __iomem *base = phy->reg_base;
520 
521 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
522 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
523 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
524 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
525 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
526 		0x100);
527 }
528 
dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy * phy)529 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
530 {
531 	void __iomem *base = phy->reg_base;
532 
533 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
534 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
535 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
536 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
537 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
538 }
539 
dsi_28nm_phy_calibration(struct msm_dsi_phy * phy)540 static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
541 {
542 	void __iomem *base = phy->reg_base;
543 	u32 status;
544 	int i = 5000;
545 
546 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
547 			0x3);
548 
549 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
550 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
551 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
552 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
553 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
554 
555 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
556 	usleep_range(5000, 6000);
557 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
558 
559 	do {
560 		status = dsi_phy_read(base +
561 				REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
562 
563 		if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
564 			break;
565 
566 		udelay(1);
567 	} while (--i > 0);
568 }
569 
dsi_28nm_phy_lane_config(struct msm_dsi_phy * phy)570 static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
571 {
572 	void __iomem *base = phy->base;
573 	int i;
574 
575 	for (i = 0; i < 4; i++) {
576 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
577 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
578 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
579 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
580 			0x00);
581 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
582 			0x01);
583 		dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
584 			0x66);
585 	}
586 
587 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
588 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
589 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
590 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
591 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
592 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
593 }
594 
dsi_28nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)595 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
596 				struct msm_dsi_phy_clk_request *clk_req)
597 {
598 	struct msm_dsi_dphy_timing *timing = &phy->timing;
599 	void __iomem *base = phy->base;
600 
601 	DBG("");
602 
603 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
604 		DRM_DEV_ERROR(&phy->pdev->dev,
605 			"%s: D-PHY timing calculation failed\n", __func__);
606 		return -EINVAL;
607 	}
608 
609 	dsi_28nm_phy_regulator_init(phy);
610 
611 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
612 
613 	/* strength control */
614 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
615 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
616 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
617 
618 	/* phy ctrl */
619 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
620 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
621 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
622 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
623 
624 	dsi_28nm_phy_regulator_ctrl(phy);
625 
626 	dsi_28nm_phy_calibration(phy);
627 
628 	dsi_28nm_phy_lane_config(phy);
629 
630 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
631 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
632 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
633 	dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
634 
635 	dsi_28nm_dphy_set_timing(phy, timing);
636 
637 	return 0;
638 }
639 
dsi_28nm_phy_disable(struct msm_dsi_phy * phy)640 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
641 {
642 	dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
643 
644 	/*
645 	 * Wait for the registers writes to complete in order to
646 	 * ensure that the phy is completely disabled
647 	 */
648 	wmb();
649 }
650 
651 const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
652 	.has_phy_regulator = true,
653 	.reg_cfg = {
654 		.num = 1,
655 		.regs = {
656 			{"vddio", 100000, 100},	/* 1.8 V */
657 		},
658 	},
659 	.ops = {
660 		.enable = dsi_28nm_phy_enable,
661 		.disable = dsi_28nm_phy_disable,
662 		.pll_init = dsi_pll_28nm_8960_init,
663 		.save_pll_state = dsi_28nm_pll_save_state,
664 		.restore_pll_state = dsi_28nm_pll_restore_state,
665 	},
666 	.min_pll_rate = VCO_MIN_RATE,
667 	.max_pll_rate = VCO_MAX_RATE,
668 	.io_start = { 0x4700300, 0x5800300 },
669 	.num_dsi_phy = 2,
670 };
671