1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale eSDHC controller driver.
4  *
5  * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6  * Copyright (c) 2009 MontaVista Software, Inc.
7  * Copyright 2020 NXP
8  *
9  * Authors: Xiaobo Xie <X.Xie@freescale.com>
10  *	    Anton Vorontsov <avorontsov@ru.mvista.com>
11  */
12 
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sys_soc.h>
20 #include <linux/clk.h>
21 #include <linux/ktime.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/iopoll.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 #include "sdhci-pltfm.h"
27 #include "sdhci-esdhc.h"
28 
29 #define VENDOR_V_22	0x12
30 #define VENDOR_V_23	0x13
31 
32 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
33 
34 struct esdhc_clk_fixup {
35 	const unsigned int sd_dflt_max_clk;
36 	const unsigned int max_clk[MMC_TIMING_NUM];
37 };
38 
39 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40 	.sd_dflt_max_clk = 25000000,
41 	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
42 	.max_clk[MMC_TIMING_SD_HS] = 46500000,
43 };
44 
45 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
46 	.sd_dflt_max_clk = 25000000,
47 	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
48 	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49 };
50 
51 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
52 	.sd_dflt_max_clk = 25000000,
53 	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
54 	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55 };
56 
57 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
58 	.sd_dflt_max_clk = 20000000,
59 	.max_clk[MMC_TIMING_LEGACY] = 20000000,
60 	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
61 	.max_clk[MMC_TIMING_SD_HS] = 40000000,
62 };
63 
64 static const struct of_device_id sdhci_esdhc_of_match[] = {
65 	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
66 	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
67 	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
68 	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
69 	{ .compatible = "fsl,mpc8379-esdhc" },
70 	{ .compatible = "fsl,mpc8536-esdhc" },
71 	{ .compatible = "fsl,esdhc" },
72 	{ }
73 };
74 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
75 
76 struct sdhci_esdhc {
77 	u8 vendor_ver;
78 	u8 spec_ver;
79 	bool quirk_incorrect_hostver;
80 	bool quirk_limited_clk_division;
81 	bool quirk_unreliable_pulse_detection;
82 	bool quirk_tuning_erratum_type1;
83 	bool quirk_tuning_erratum_type2;
84 	bool quirk_ignore_data_inhibit;
85 	bool quirk_delay_before_data_reset;
86 	bool quirk_trans_complete_erratum;
87 	bool in_sw_tuning;
88 	unsigned int peripheral_clock;
89 	const struct esdhc_clk_fixup *clk_fixup;
90 	u32 div_ratio;
91 };
92 
93 /**
94  * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
95  *		       to make it compatible with SD spec.
96  *
97  * @host: pointer to sdhci_host
98  * @spec_reg: SD spec register address
99  * @value: 32bit eSDHC register value on spec_reg address
100  *
101  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
102  * registers are 32 bits. There are differences in register size, register
103  * address, register function, bit position and function between eSDHC spec
104  * and SD spec.
105  *
106  * Return a fixed up register value
107  */
esdhc_readl_fixup(struct sdhci_host * host,int spec_reg,u32 value)108 static u32 esdhc_readl_fixup(struct sdhci_host *host,
109 				     int spec_reg, u32 value)
110 {
111 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
112 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
113 	u32 ret;
114 
115 	/*
116 	 * The bit of ADMA flag in eSDHC is not compatible with standard
117 	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
118 	 * supported by eSDHC.
119 	 * And for many FSL eSDHC controller, the reset value of field
120 	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
121 	 * only these vendor version is greater than 2.2/0x12 support ADMA.
122 	 */
123 	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
124 		if (esdhc->vendor_ver > VENDOR_V_22) {
125 			ret = value | SDHCI_CAN_DO_ADMA2;
126 			return ret;
127 		}
128 	}
129 	/*
130 	 * The DAT[3:0] line signal levels and the CMD line signal level are
131 	 * not compatible with standard SDHC register. The line signal levels
132 	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
133 	 * bit 23. All other bits are the same as in the standard SDHC
134 	 * register.
135 	 */
136 	if (spec_reg == SDHCI_PRESENT_STATE) {
137 		ret = value & 0x000fffff;
138 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
139 		ret |= (value << 1) & SDHCI_CMD_LVL;
140 		return ret;
141 	}
142 
143 	/*
144 	 * DTS properties of mmc host are used to enable each speed mode
145 	 * according to soc and board capability. So clean up
146 	 * SDR50/SDR104/DDR50 support bits here.
147 	 */
148 	if (spec_reg == SDHCI_CAPABILITIES_1) {
149 		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
150 				SDHCI_SUPPORT_DDR50);
151 		return ret;
152 	}
153 
154 	/*
155 	 * Some controllers have unreliable Data Line Active
156 	 * bit for commands with busy signal. This affects
157 	 * Command Inhibit (data) bit. Just ignore it since
158 	 * MMC core driver has already polled card status
159 	 * with CMD13 after any command with busy siganl.
160 	 */
161 	if ((spec_reg == SDHCI_PRESENT_STATE) &&
162 	(esdhc->quirk_ignore_data_inhibit == true)) {
163 		ret = value & ~SDHCI_DATA_INHIBIT;
164 		return ret;
165 	}
166 
167 	ret = value;
168 	return ret;
169 }
170 
esdhc_readw_fixup(struct sdhci_host * host,int spec_reg,u32 value)171 static u16 esdhc_readw_fixup(struct sdhci_host *host,
172 				     int spec_reg, u32 value)
173 {
174 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
175 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
176 	u16 ret;
177 	int shift = (spec_reg & 0x2) * 8;
178 
179 	if (spec_reg == SDHCI_TRANSFER_MODE)
180 		return pltfm_host->xfer_mode_shadow;
181 
182 	if (spec_reg == SDHCI_HOST_VERSION)
183 		ret = value & 0xffff;
184 	else
185 		ret = (value >> shift) & 0xffff;
186 	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
187 	 * vendor version and spec version information.
188 	 */
189 	if ((spec_reg == SDHCI_HOST_VERSION) &&
190 	    (esdhc->quirk_incorrect_hostver))
191 		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
192 	return ret;
193 }
194 
esdhc_readb_fixup(struct sdhci_host * host,int spec_reg,u32 value)195 static u8 esdhc_readb_fixup(struct sdhci_host *host,
196 				     int spec_reg, u32 value)
197 {
198 	u8 ret;
199 	u8 dma_bits;
200 	int shift = (spec_reg & 0x3) * 8;
201 
202 	ret = (value >> shift) & 0xff;
203 
204 	/*
205 	 * "DMA select" locates at offset 0x28 in SD specification, but on
206 	 * P5020 or P3041, it locates at 0x29.
207 	 */
208 	if (spec_reg == SDHCI_HOST_CONTROL) {
209 		/* DMA select is 22,23 bits in Protocol Control Register */
210 		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
211 		/* fixup the result */
212 		ret &= ~SDHCI_CTRL_DMA_MASK;
213 		ret |= dma_bits;
214 	}
215 	return ret;
216 }
217 
218 /**
219  * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
220  *			written into eSDHC register.
221  *
222  * @host: pointer to sdhci_host
223  * @spec_reg: SD spec register address
224  * @value: 8/16/32bit SD spec register value that would be written
225  * @old_value: 32bit eSDHC register value on spec_reg address
226  *
227  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
228  * registers are 32 bits. There are differences in register size, register
229  * address, register function, bit position and function between eSDHC spec
230  * and SD spec.
231  *
232  * Return a fixed up register value
233  */
esdhc_writel_fixup(struct sdhci_host * host,int spec_reg,u32 value,u32 old_value)234 static u32 esdhc_writel_fixup(struct sdhci_host *host,
235 				     int spec_reg, u32 value, u32 old_value)
236 {
237 	u32 ret;
238 
239 	/*
240 	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
241 	 * when SYSCTL[RSTD] is set for some special operations.
242 	 * No any impact on other operation.
243 	 */
244 	if (spec_reg == SDHCI_INT_ENABLE)
245 		ret = value | SDHCI_INT_BLK_GAP;
246 	else
247 		ret = value;
248 
249 	return ret;
250 }
251 
esdhc_writew_fixup(struct sdhci_host * host,int spec_reg,u16 value,u32 old_value)252 static u32 esdhc_writew_fixup(struct sdhci_host *host,
253 				     int spec_reg, u16 value, u32 old_value)
254 {
255 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
256 	int shift = (spec_reg & 0x2) * 8;
257 	u32 ret;
258 
259 	switch (spec_reg) {
260 	case SDHCI_TRANSFER_MODE:
261 		/*
262 		 * Postpone this write, we must do it together with a
263 		 * command write that is down below. Return old value.
264 		 */
265 		pltfm_host->xfer_mode_shadow = value;
266 		return old_value;
267 	case SDHCI_COMMAND:
268 		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
269 		return ret;
270 	}
271 
272 	ret = old_value & (~(0xffff << shift));
273 	ret |= (value << shift);
274 
275 	if (spec_reg == SDHCI_BLOCK_SIZE) {
276 		/*
277 		 * Two last DMA bits are reserved, and first one is used for
278 		 * non-standard blksz of 4096 bytes that we don't support
279 		 * yet. So clear the DMA boundary bits.
280 		 */
281 		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
282 	}
283 	return ret;
284 }
285 
esdhc_writeb_fixup(struct sdhci_host * host,int spec_reg,u8 value,u32 old_value)286 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
287 				     int spec_reg, u8 value, u32 old_value)
288 {
289 	u32 ret;
290 	u32 dma_bits;
291 	u8 tmp;
292 	int shift = (spec_reg & 0x3) * 8;
293 
294 	/*
295 	 * eSDHC doesn't have a standard power control register, so we do
296 	 * nothing here to avoid incorrect operation.
297 	 */
298 	if (spec_reg == SDHCI_POWER_CONTROL)
299 		return old_value;
300 	/*
301 	 * "DMA select" location is offset 0x28 in SD specification, but on
302 	 * P5020 or P3041, it's located at 0x29.
303 	 */
304 	if (spec_reg == SDHCI_HOST_CONTROL) {
305 		/*
306 		 * If host control register is not standard, exit
307 		 * this function
308 		 */
309 		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
310 			return old_value;
311 
312 		/* DMA select is 22,23 bits in Protocol Control Register */
313 		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
314 		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
315 		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
316 		      (old_value & SDHCI_CTRL_DMA_MASK);
317 		ret = (ret & (~0xff)) | tmp;
318 
319 		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
320 		ret &= ~ESDHC_HOST_CONTROL_RES;
321 		return ret;
322 	}
323 
324 	ret = (old_value & (~(0xff << shift))) | (value << shift);
325 	return ret;
326 }
327 
esdhc_be_readl(struct sdhci_host * host,int reg)328 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
329 {
330 	u32 ret;
331 	u32 value;
332 
333 	if (reg == SDHCI_CAPABILITIES_1)
334 		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
335 	else
336 		value = ioread32be(host->ioaddr + reg);
337 
338 	ret = esdhc_readl_fixup(host, reg, value);
339 
340 	return ret;
341 }
342 
esdhc_le_readl(struct sdhci_host * host,int reg)343 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
344 {
345 	u32 ret;
346 	u32 value;
347 
348 	if (reg == SDHCI_CAPABILITIES_1)
349 		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
350 	else
351 		value = ioread32(host->ioaddr + reg);
352 
353 	ret = esdhc_readl_fixup(host, reg, value);
354 
355 	return ret;
356 }
357 
esdhc_be_readw(struct sdhci_host * host,int reg)358 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
359 {
360 	u16 ret;
361 	u32 value;
362 	int base = reg & ~0x3;
363 
364 	value = ioread32be(host->ioaddr + base);
365 	ret = esdhc_readw_fixup(host, reg, value);
366 	return ret;
367 }
368 
esdhc_le_readw(struct sdhci_host * host,int reg)369 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
370 {
371 	u16 ret;
372 	u32 value;
373 	int base = reg & ~0x3;
374 
375 	value = ioread32(host->ioaddr + base);
376 	ret = esdhc_readw_fixup(host, reg, value);
377 	return ret;
378 }
379 
esdhc_be_readb(struct sdhci_host * host,int reg)380 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
381 {
382 	u8 ret;
383 	u32 value;
384 	int base = reg & ~0x3;
385 
386 	value = ioread32be(host->ioaddr + base);
387 	ret = esdhc_readb_fixup(host, reg, value);
388 	return ret;
389 }
390 
esdhc_le_readb(struct sdhci_host * host,int reg)391 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
392 {
393 	u8 ret;
394 	u32 value;
395 	int base = reg & ~0x3;
396 
397 	value = ioread32(host->ioaddr + base);
398 	ret = esdhc_readb_fixup(host, reg, value);
399 	return ret;
400 }
401 
esdhc_be_writel(struct sdhci_host * host,u32 val,int reg)402 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
403 {
404 	u32 value;
405 
406 	value = esdhc_writel_fixup(host, reg, val, 0);
407 	iowrite32be(value, host->ioaddr + reg);
408 }
409 
esdhc_le_writel(struct sdhci_host * host,u32 val,int reg)410 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
411 {
412 	u32 value;
413 
414 	value = esdhc_writel_fixup(host, reg, val, 0);
415 	iowrite32(value, host->ioaddr + reg);
416 }
417 
esdhc_be_writew(struct sdhci_host * host,u16 val,int reg)418 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
419 {
420 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
421 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
422 	int base = reg & ~0x3;
423 	u32 value;
424 	u32 ret;
425 
426 	value = ioread32be(host->ioaddr + base);
427 	ret = esdhc_writew_fixup(host, reg, val, value);
428 	if (reg != SDHCI_TRANSFER_MODE)
429 		iowrite32be(ret, host->ioaddr + base);
430 
431 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
432 	 * 1us later after ESDHC_EXTN is set.
433 	 */
434 	if (base == ESDHC_SYSTEM_CONTROL_2) {
435 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
436 		    esdhc->in_sw_tuning) {
437 			udelay(1);
438 			ret |= ESDHC_SMPCLKSEL;
439 			iowrite32be(ret, host->ioaddr + base);
440 		}
441 	}
442 }
443 
esdhc_le_writew(struct sdhci_host * host,u16 val,int reg)444 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
445 {
446 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
447 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
448 	int base = reg & ~0x3;
449 	u32 value;
450 	u32 ret;
451 
452 	value = ioread32(host->ioaddr + base);
453 	ret = esdhc_writew_fixup(host, reg, val, value);
454 	if (reg != SDHCI_TRANSFER_MODE)
455 		iowrite32(ret, host->ioaddr + base);
456 
457 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
458 	 * 1us later after ESDHC_EXTN is set.
459 	 */
460 	if (base == ESDHC_SYSTEM_CONTROL_2) {
461 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
462 		    esdhc->in_sw_tuning) {
463 			udelay(1);
464 			ret |= ESDHC_SMPCLKSEL;
465 			iowrite32(ret, host->ioaddr + base);
466 		}
467 	}
468 }
469 
esdhc_be_writeb(struct sdhci_host * host,u8 val,int reg)470 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
471 {
472 	int base = reg & ~0x3;
473 	u32 value;
474 	u32 ret;
475 
476 	value = ioread32be(host->ioaddr + base);
477 	ret = esdhc_writeb_fixup(host, reg, val, value);
478 	iowrite32be(ret, host->ioaddr + base);
479 }
480 
esdhc_le_writeb(struct sdhci_host * host,u8 val,int reg)481 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
482 {
483 	int base = reg & ~0x3;
484 	u32 value;
485 	u32 ret;
486 
487 	value = ioread32(host->ioaddr + base);
488 	ret = esdhc_writeb_fixup(host, reg, val, value);
489 	iowrite32(ret, host->ioaddr + base);
490 }
491 
492 /*
493  * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
494  * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
495  * and Block Gap Event(IRQSTAT[BGE]) are also set.
496  * For Continue, apply soft reset for data(SYSCTL[RSTD]);
497  * and re-issue the entire read transaction from beginning.
498  */
esdhc_of_adma_workaround(struct sdhci_host * host,u32 intmask)499 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
500 {
501 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
502 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
503 	bool applicable;
504 	dma_addr_t dmastart;
505 	dma_addr_t dmanow;
506 
507 	applicable = (intmask & SDHCI_INT_DATA_END) &&
508 		     (intmask & SDHCI_INT_BLK_GAP) &&
509 		     (esdhc->vendor_ver == VENDOR_V_23);
510 	if (!applicable)
511 		return;
512 
513 	host->data->error = 0;
514 	dmastart = sg_dma_address(host->data->sg);
515 	dmanow = dmastart + host->data->bytes_xfered;
516 	/*
517 	 * Force update to the next DMA block boundary.
518 	 */
519 	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
520 		SDHCI_DEFAULT_BOUNDARY_SIZE;
521 	host->data->bytes_xfered = dmanow - dmastart;
522 	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
523 }
524 
esdhc_of_enable_dma(struct sdhci_host * host)525 static int esdhc_of_enable_dma(struct sdhci_host *host)
526 {
527 	int ret;
528 	u32 value;
529 	struct device *dev = mmc_dev(host->mmc);
530 
531 	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
532 	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
533 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
534 		if (ret)
535 			return ret;
536 	}
537 
538 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
539 
540 	if (of_dma_is_coherent(dev->of_node))
541 		value |= ESDHC_DMA_SNOOP;
542 	else
543 		value &= ~ESDHC_DMA_SNOOP;
544 
545 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
546 	return 0;
547 }
548 
esdhc_of_get_max_clock(struct sdhci_host * host)549 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
550 {
551 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
552 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
553 
554 	if (esdhc->peripheral_clock)
555 		return esdhc->peripheral_clock;
556 	else
557 		return pltfm_host->clock;
558 }
559 
esdhc_of_get_min_clock(struct sdhci_host * host)560 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
561 {
562 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
563 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
564 	unsigned int clock;
565 
566 	if (esdhc->peripheral_clock)
567 		clock = esdhc->peripheral_clock;
568 	else
569 		clock = pltfm_host->clock;
570 	return clock / 256 / 16;
571 }
572 
esdhc_clock_enable(struct sdhci_host * host,bool enable)573 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
574 {
575 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
576 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
577 	ktime_t timeout;
578 	u32 val, clk_en;
579 
580 	clk_en = ESDHC_CLOCK_SDCLKEN;
581 
582 	/*
583 	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
584 	 * is 2.2 or lower.
585 	 */
586 	if (esdhc->vendor_ver <= VENDOR_V_22)
587 		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
588 			   ESDHC_CLOCK_PEREN);
589 
590 	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
591 
592 	if (enable)
593 		val |= clk_en;
594 	else
595 		val &= ~clk_en;
596 
597 	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
598 
599 	/*
600 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
601 	 * wait clock stable bit which does not exist.
602 	 */
603 	timeout = ktime_add_ms(ktime_get(), 20);
604 	while (esdhc->vendor_ver > VENDOR_V_22) {
605 		bool timedout = ktime_after(ktime_get(), timeout);
606 
607 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
608 			break;
609 		if (timedout) {
610 			pr_err("%s: Internal clock never stabilised.\n",
611 				mmc_hostname(host->mmc));
612 			break;
613 		}
614 		usleep_range(10, 20);
615 	}
616 }
617 
esdhc_flush_async_fifo(struct sdhci_host * host)618 static void esdhc_flush_async_fifo(struct sdhci_host *host)
619 {
620 	ktime_t timeout;
621 	u32 val;
622 
623 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
624 	val |= ESDHC_FLUSH_ASYNC_FIFO;
625 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
626 
627 	/* Wait max 20 ms */
628 	timeout = ktime_add_ms(ktime_get(), 20);
629 	while (1) {
630 		bool timedout = ktime_after(ktime_get(), timeout);
631 
632 		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
633 		      ESDHC_FLUSH_ASYNC_FIFO))
634 			break;
635 		if (timedout) {
636 			pr_err("%s: flushing asynchronous FIFO timeout.\n",
637 				mmc_hostname(host->mmc));
638 			break;
639 		}
640 		usleep_range(10, 20);
641 	}
642 }
643 
esdhc_of_set_clock(struct sdhci_host * host,unsigned int clock)644 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
645 {
646 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
647 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
648 	unsigned int pre_div = 1, div = 1;
649 	unsigned int clock_fixup = 0;
650 	ktime_t timeout;
651 	u32 temp;
652 
653 	if (clock == 0) {
654 		host->mmc->actual_clock = 0;
655 		esdhc_clock_enable(host, false);
656 		return;
657 	}
658 
659 	/* Start pre_div at 2 for vendor version < 2.3. */
660 	if (esdhc->vendor_ver < VENDOR_V_23)
661 		pre_div = 2;
662 
663 	/* Fix clock value. */
664 	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
665 	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
666 		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
667 	else if (esdhc->clk_fixup)
668 		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
669 
670 	if (clock_fixup == 0 || clock < clock_fixup)
671 		clock_fixup = clock;
672 
673 	/* Calculate pre_div and div. */
674 	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
675 		pre_div *= 2;
676 
677 	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
678 		div++;
679 
680 	esdhc->div_ratio = pre_div * div;
681 
682 	/* Limit clock division for HS400 200MHz clock for quirk. */
683 	if (esdhc->quirk_limited_clk_division &&
684 	    clock == MMC_HS200_MAX_DTR &&
685 	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
686 	     host->flags & SDHCI_HS400_TUNING)) {
687 		if (esdhc->div_ratio <= 4) {
688 			pre_div = 4;
689 			div = 1;
690 		} else if (esdhc->div_ratio <= 8) {
691 			pre_div = 4;
692 			div = 2;
693 		} else if (esdhc->div_ratio <= 12) {
694 			pre_div = 4;
695 			div = 3;
696 		} else {
697 			pr_warn("%s: using unsupported clock division.\n",
698 				mmc_hostname(host->mmc));
699 		}
700 		esdhc->div_ratio = pre_div * div;
701 	}
702 
703 	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
704 
705 	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
706 		clock, host->mmc->actual_clock);
707 
708 	/* Set clock division into register. */
709 	pre_div >>= 1;
710 	div--;
711 
712 	esdhc_clock_enable(host, false);
713 
714 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
715 	temp &= ~ESDHC_CLOCK_MASK;
716 	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
717 		(pre_div << ESDHC_PREDIV_SHIFT));
718 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
719 
720 	/*
721 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
722 	 * wait clock stable bit which does not exist.
723 	 */
724 	timeout = ktime_add_ms(ktime_get(), 20);
725 	while (esdhc->vendor_ver > VENDOR_V_22) {
726 		bool timedout = ktime_after(ktime_get(), timeout);
727 
728 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
729 			break;
730 		if (timedout) {
731 			pr_err("%s: Internal clock never stabilised.\n",
732 				mmc_hostname(host->mmc));
733 			break;
734 		}
735 		usleep_range(10, 20);
736 	}
737 
738 	/* Additional setting for HS400. */
739 	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
740 	    clock == MMC_HS200_MAX_DTR) {
741 		temp = sdhci_readl(host, ESDHC_TBCTL);
742 		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
743 		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
744 		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
745 		esdhc_clock_enable(host, true);
746 
747 		temp = sdhci_readl(host, ESDHC_DLLCFG0);
748 		temp |= ESDHC_DLL_ENABLE;
749 		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
750 			temp |= ESDHC_DLL_FREQ_SEL;
751 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
752 
753 		temp |= ESDHC_DLL_RESET;
754 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
755 		udelay(1);
756 		temp &= ~ESDHC_DLL_RESET;
757 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
758 
759 		/* Wait max 20 ms */
760 		if (read_poll_timeout(sdhci_readl, temp,
761 				      temp & ESDHC_DLL_STS_SLV_LOCK,
762 				      10, 20000, false,
763 				      host, ESDHC_DLLSTAT0))
764 			pr_err("%s: timeout for delay chain lock.\n",
765 			       mmc_hostname(host->mmc));
766 
767 		temp = sdhci_readl(host, ESDHC_TBCTL);
768 		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
769 
770 		esdhc_clock_enable(host, false);
771 		esdhc_flush_async_fifo(host);
772 	}
773 	esdhc_clock_enable(host, true);
774 }
775 
esdhc_pltfm_set_bus_width(struct sdhci_host * host,int width)776 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
777 {
778 	u32 ctrl;
779 
780 	ctrl = sdhci_readl(host, ESDHC_PROCTL);
781 	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
782 	switch (width) {
783 	case MMC_BUS_WIDTH_8:
784 		ctrl |= ESDHC_CTRL_8BITBUS;
785 		break;
786 
787 	case MMC_BUS_WIDTH_4:
788 		ctrl |= ESDHC_CTRL_4BITBUS;
789 		break;
790 
791 	default:
792 		break;
793 	}
794 
795 	sdhci_writel(host, ctrl, ESDHC_PROCTL);
796 }
797 
esdhc_reset(struct sdhci_host * host,u8 mask)798 static void esdhc_reset(struct sdhci_host *host, u8 mask)
799 {
800 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
801 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
802 	u32 val, bus_width = 0;
803 
804 	/*
805 	 * Add delay to make sure all the DMA transfers are finished
806 	 * for quirk.
807 	 */
808 	if (esdhc->quirk_delay_before_data_reset &&
809 	    (mask & SDHCI_RESET_DATA) &&
810 	    (host->flags & SDHCI_REQ_USE_DMA))
811 		mdelay(5);
812 
813 	/*
814 	 * Save bus-width for eSDHC whose vendor version is 2.2
815 	 * or lower for data reset.
816 	 */
817 	if ((mask & SDHCI_RESET_DATA) &&
818 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
819 		val = sdhci_readl(host, ESDHC_PROCTL);
820 		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
821 	}
822 
823 	sdhci_reset(host, mask);
824 
825 	/*
826 	 * Restore bus-width setting and interrupt registers for eSDHC
827 	 * whose vendor version is 2.2 or lower for data reset.
828 	 */
829 	if ((mask & SDHCI_RESET_DATA) &&
830 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
831 		val = sdhci_readl(host, ESDHC_PROCTL);
832 		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
833 		val |= bus_width;
834 		sdhci_writel(host, val, ESDHC_PROCTL);
835 
836 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
837 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
838 	}
839 
840 	/*
841 	 * Some bits have to be cleaned manually for eSDHC whose spec
842 	 * version is higher than 3.0 for all reset.
843 	 */
844 	if ((mask & SDHCI_RESET_ALL) &&
845 	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
846 		val = sdhci_readl(host, ESDHC_TBCTL);
847 		val &= ~ESDHC_TB_EN;
848 		sdhci_writel(host, val, ESDHC_TBCTL);
849 
850 		/*
851 		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
852 		 * 0 for quirk.
853 		 */
854 		if (esdhc->quirk_unreliable_pulse_detection) {
855 			val = sdhci_readl(host, ESDHC_DLLCFG1);
856 			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
857 			sdhci_writel(host, val, ESDHC_DLLCFG1);
858 		}
859 	}
860 }
861 
862 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
863  * configuration and status registers for the device. There is a
864  * SDHC IO VSEL control register on SCFG for some platforms. It's
865  * used to support SDHC IO voltage switching.
866  */
867 static const struct of_device_id scfg_device_ids[] = {
868 	{ .compatible = "fsl,t1040-scfg", },
869 	{ .compatible = "fsl,ls1012a-scfg", },
870 	{ .compatible = "fsl,ls1046a-scfg", },
871 	{}
872 };
873 
874 /* SDHC IO VSEL control register definition */
875 #define SCFG_SDHCIOVSELCR	0x408
876 #define SDHCIOVSELCR_TGLEN	0x80000000
877 #define SDHCIOVSELCR_VSELVAL	0x60000000
878 #define SDHCIOVSELCR_SDHC_VS	0x00000001
879 
esdhc_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)880 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
881 				       struct mmc_ios *ios)
882 {
883 	struct sdhci_host *host = mmc_priv(mmc);
884 	struct device_node *scfg_node;
885 	void __iomem *scfg_base = NULL;
886 	u32 sdhciovselcr;
887 	u32 val;
888 
889 	/*
890 	 * Signal Voltage Switching is only applicable for Host Controllers
891 	 * v3.00 and above.
892 	 */
893 	if (host->version < SDHCI_SPEC_300)
894 		return 0;
895 
896 	val = sdhci_readl(host, ESDHC_PROCTL);
897 
898 	switch (ios->signal_voltage) {
899 	case MMC_SIGNAL_VOLTAGE_330:
900 		val &= ~ESDHC_VOLT_SEL;
901 		sdhci_writel(host, val, ESDHC_PROCTL);
902 		return 0;
903 	case MMC_SIGNAL_VOLTAGE_180:
904 		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
905 		if (scfg_node)
906 			scfg_base = of_iomap(scfg_node, 0);
907 		of_node_put(scfg_node);
908 		if (scfg_base) {
909 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
910 				       SDHCIOVSELCR_VSELVAL;
911 			iowrite32be(sdhciovselcr,
912 				scfg_base + SCFG_SDHCIOVSELCR);
913 
914 			val |= ESDHC_VOLT_SEL;
915 			sdhci_writel(host, val, ESDHC_PROCTL);
916 			mdelay(5);
917 
918 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
919 				       SDHCIOVSELCR_SDHC_VS;
920 			iowrite32be(sdhciovselcr,
921 				scfg_base + SCFG_SDHCIOVSELCR);
922 			iounmap(scfg_base);
923 		} else {
924 			val |= ESDHC_VOLT_SEL;
925 			sdhci_writel(host, val, ESDHC_PROCTL);
926 		}
927 		return 0;
928 	default:
929 		return 0;
930 	}
931 }
932 
933 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
934 	{ .family = "QorIQ T1023", },
935 	{ .family = "QorIQ T1040", },
936 	{ .family = "QorIQ T2080", },
937 	{ .family = "QorIQ LS1021A", },
938 	{ /* sentinel */ }
939 };
940 
941 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
942 	{ .family = "QorIQ LS1012A", },
943 	{ .family = "QorIQ LS1043A", },
944 	{ .family = "QorIQ LS1046A", },
945 	{ .family = "QorIQ LS1080A", },
946 	{ .family = "QorIQ LS2080A", },
947 	{ .family = "QorIQ LA1575A", },
948 	{ /* sentinel */ }
949 };
950 
esdhc_tuning_block_enable(struct sdhci_host * host,bool enable)951 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
952 {
953 	u32 val;
954 
955 	esdhc_clock_enable(host, false);
956 	esdhc_flush_async_fifo(host);
957 
958 	val = sdhci_readl(host, ESDHC_TBCTL);
959 	if (enable)
960 		val |= ESDHC_TB_EN;
961 	else
962 		val &= ~ESDHC_TB_EN;
963 	sdhci_writel(host, val, ESDHC_TBCTL);
964 
965 	esdhc_clock_enable(host, true);
966 }
967 
esdhc_tuning_window_ptr(struct sdhci_host * host,u8 * window_start,u8 * window_end)968 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
969 				    u8 *window_end)
970 {
971 	u32 val;
972 
973 	/* Write TBCTL[11:8]=4'h8 */
974 	val = sdhci_readl(host, ESDHC_TBCTL);
975 	val &= ~(0xf << 8);
976 	val |= 8 << 8;
977 	sdhci_writel(host, val, ESDHC_TBCTL);
978 
979 	mdelay(1);
980 
981 	/* Read TBCTL[31:0] register and rewrite again */
982 	val = sdhci_readl(host, ESDHC_TBCTL);
983 	sdhci_writel(host, val, ESDHC_TBCTL);
984 
985 	mdelay(1);
986 
987 	/* Read the TBSTAT[31:0] register twice */
988 	val = sdhci_readl(host, ESDHC_TBSTAT);
989 	val = sdhci_readl(host, ESDHC_TBSTAT);
990 
991 	*window_end = val & 0xff;
992 	*window_start = (val >> 8) & 0xff;
993 }
994 
esdhc_prepare_sw_tuning(struct sdhci_host * host,u8 * window_start,u8 * window_end)995 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
996 				    u8 *window_end)
997 {
998 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
999 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1000 	u8 start_ptr, end_ptr;
1001 
1002 	if (esdhc->quirk_tuning_erratum_type1) {
1003 		*window_start = 5 * esdhc->div_ratio;
1004 		*window_end = 3 * esdhc->div_ratio;
1005 		return;
1006 	}
1007 
1008 	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1009 
1010 	/* Reset data lines by setting ESDHCCTL[RSTD] */
1011 	sdhci_reset(host, SDHCI_RESET_DATA);
1012 	/* Write 32'hFFFF_FFFF to IRQSTAT register */
1013 	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1014 
1015 	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1016 	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1017 	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1018 	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1019 	 */
1020 
1021 	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1022 		*window_start = 8 * esdhc->div_ratio;
1023 		*window_end = 4 * esdhc->div_ratio;
1024 	} else {
1025 		*window_start = 5 * esdhc->div_ratio;
1026 		*window_end = 3 * esdhc->div_ratio;
1027 	}
1028 }
1029 
esdhc_execute_sw_tuning(struct mmc_host * mmc,u32 opcode,u8 window_start,u8 window_end)1030 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1031 				   u8 window_start, u8 window_end)
1032 {
1033 	struct sdhci_host *host = mmc_priv(mmc);
1034 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1035 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1036 	u32 val;
1037 	int ret;
1038 
1039 	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1040 	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1041 	      ESDHC_WNDW_STRT_PTR_MASK;
1042 	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1043 	sdhci_writel(host, val, ESDHC_TBPTR);
1044 
1045 	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1046 	val = sdhci_readl(host, ESDHC_TBCTL);
1047 	val &= ~ESDHC_TB_MODE_MASK;
1048 	val |= ESDHC_TB_MODE_SW;
1049 	sdhci_writel(host, val, ESDHC_TBCTL);
1050 
1051 	esdhc->in_sw_tuning = true;
1052 	ret = sdhci_execute_tuning(mmc, opcode);
1053 	esdhc->in_sw_tuning = false;
1054 	return ret;
1055 }
1056 
esdhc_execute_tuning(struct mmc_host * mmc,u32 opcode)1057 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1058 {
1059 	struct sdhci_host *host = mmc_priv(mmc);
1060 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1061 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1062 	u8 window_start, window_end;
1063 	int ret, retries = 1;
1064 	bool hs400_tuning;
1065 	unsigned int clk;
1066 	u32 val;
1067 
1068 	/* For tuning mode, the sd clock divisor value
1069 	 * must be larger than 3 according to reference manual.
1070 	 */
1071 	clk = esdhc->peripheral_clock / 3;
1072 	if (host->clock > clk)
1073 		esdhc_of_set_clock(host, clk);
1074 
1075 	esdhc_tuning_block_enable(host, true);
1076 
1077 	/*
1078 	 * The eSDHC controller takes the data timeout value into account
1079 	 * during tuning. If the SD card is too slow sending the response, the
1080 	 * timer will expire and a "Buffer Read Ready" interrupt without data
1081 	 * is triggered. This leads to tuning errors.
1082 	 *
1083 	 * Just set the timeout to the maximum value because the core will
1084 	 * already take care of it in sdhci_send_tuning().
1085 	 */
1086 	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1087 
1088 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1089 
1090 	do {
1091 		if (esdhc->quirk_limited_clk_division &&
1092 		    hs400_tuning)
1093 			esdhc_of_set_clock(host, host->clock);
1094 
1095 		/* Do HW tuning */
1096 		val = sdhci_readl(host, ESDHC_TBCTL);
1097 		val &= ~ESDHC_TB_MODE_MASK;
1098 		val |= ESDHC_TB_MODE_3;
1099 		sdhci_writel(host, val, ESDHC_TBCTL);
1100 
1101 		ret = sdhci_execute_tuning(mmc, opcode);
1102 		if (ret)
1103 			break;
1104 
1105 		/* For type2 affected platforms of the tuning erratum,
1106 		 * tuning may succeed although eSDHC might not have
1107 		 * tuned properly. Need to check tuning window.
1108 		 */
1109 		if (esdhc->quirk_tuning_erratum_type2 &&
1110 		    !host->tuning_err) {
1111 			esdhc_tuning_window_ptr(host, &window_start,
1112 						&window_end);
1113 			if (abs(window_start - window_end) >
1114 			    (4 * esdhc->div_ratio + 2))
1115 				host->tuning_err = -EAGAIN;
1116 		}
1117 
1118 		/* If HW tuning fails and triggers erratum,
1119 		 * try workaround.
1120 		 */
1121 		ret = host->tuning_err;
1122 		if (ret == -EAGAIN &&
1123 		    (esdhc->quirk_tuning_erratum_type1 ||
1124 		     esdhc->quirk_tuning_erratum_type2)) {
1125 			/* Recover HS400 tuning flag */
1126 			if (hs400_tuning)
1127 				host->flags |= SDHCI_HS400_TUNING;
1128 			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1129 				mmc_hostname(mmc));
1130 			/* Do SW tuning */
1131 			esdhc_prepare_sw_tuning(host, &window_start,
1132 						&window_end);
1133 			ret = esdhc_execute_sw_tuning(mmc, opcode,
1134 						      window_start,
1135 						      window_end);
1136 			if (ret)
1137 				break;
1138 
1139 			/* Retry both HW/SW tuning with reduced clock. */
1140 			ret = host->tuning_err;
1141 			if (ret == -EAGAIN && retries) {
1142 				/* Recover HS400 tuning flag */
1143 				if (hs400_tuning)
1144 					host->flags |= SDHCI_HS400_TUNING;
1145 
1146 				clk = host->max_clk / (esdhc->div_ratio + 1);
1147 				esdhc_of_set_clock(host, clk);
1148 				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1149 					mmc_hostname(mmc));
1150 			} else {
1151 				break;
1152 			}
1153 		} else {
1154 			break;
1155 		}
1156 	} while (retries--);
1157 
1158 	if (ret) {
1159 		esdhc_tuning_block_enable(host, false);
1160 	} else if (hs400_tuning) {
1161 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1162 		val |= ESDHC_FLW_CTL_BG;
1163 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1164 	}
1165 
1166 	return ret;
1167 }
1168 
esdhc_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)1169 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1170 				   unsigned int timing)
1171 {
1172 	u32 val;
1173 
1174 	/*
1175 	 * There are specific registers setting for HS400 mode.
1176 	 * Clean all of them if controller is in HS400 mode to
1177 	 * exit HS400 mode before re-setting any speed mode.
1178 	 */
1179 	val = sdhci_readl(host, ESDHC_TBCTL);
1180 	if (val & ESDHC_HS400_MODE) {
1181 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1182 		val &= ~ESDHC_FLW_CTL_BG;
1183 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1184 
1185 		val = sdhci_readl(host, ESDHC_SDCLKCTL);
1186 		val &= ~ESDHC_CMD_CLK_CTL;
1187 		sdhci_writel(host, val, ESDHC_SDCLKCTL);
1188 
1189 		esdhc_clock_enable(host, false);
1190 		val = sdhci_readl(host, ESDHC_TBCTL);
1191 		val &= ~ESDHC_HS400_MODE;
1192 		sdhci_writel(host, val, ESDHC_TBCTL);
1193 		esdhc_clock_enable(host, true);
1194 
1195 		val = sdhci_readl(host, ESDHC_DLLCFG0);
1196 		val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1197 		sdhci_writel(host, val, ESDHC_DLLCFG0);
1198 
1199 		val = sdhci_readl(host, ESDHC_TBCTL);
1200 		val &= ~ESDHC_HS400_WNDW_ADJUST;
1201 		sdhci_writel(host, val, ESDHC_TBCTL);
1202 
1203 		esdhc_tuning_block_enable(host, false);
1204 	}
1205 
1206 	if (timing == MMC_TIMING_MMC_HS400)
1207 		esdhc_tuning_block_enable(host, true);
1208 	else
1209 		sdhci_set_uhs_signaling(host, timing);
1210 }
1211 
esdhc_irq(struct sdhci_host * host,u32 intmask)1212 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1213 {
1214 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1215 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1216 	u32 command;
1217 
1218 	if (esdhc->quirk_trans_complete_erratum) {
1219 		command = SDHCI_GET_CMD(sdhci_readw(host,
1220 					SDHCI_COMMAND));
1221 		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1222 				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1223 				intmask & SDHCI_INT_DATA_END) {
1224 			intmask &= ~SDHCI_INT_DATA_END;
1225 			sdhci_writel(host, SDHCI_INT_DATA_END,
1226 					SDHCI_INT_STATUS);
1227 		}
1228 	}
1229 	return intmask;
1230 }
1231 
1232 #ifdef CONFIG_PM_SLEEP
1233 static u32 esdhc_proctl;
esdhc_of_suspend(struct device * dev)1234 static int esdhc_of_suspend(struct device *dev)
1235 {
1236 	struct sdhci_host *host = dev_get_drvdata(dev);
1237 
1238 	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1239 
1240 	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1241 		mmc_retune_needed(host->mmc);
1242 
1243 	return sdhci_suspend_host(host);
1244 }
1245 
esdhc_of_resume(struct device * dev)1246 static int esdhc_of_resume(struct device *dev)
1247 {
1248 	struct sdhci_host *host = dev_get_drvdata(dev);
1249 	int ret = sdhci_resume_host(host);
1250 
1251 	if (ret == 0) {
1252 		/* Isn't this already done by sdhci_resume_host() ? --rmk */
1253 		esdhc_of_enable_dma(host);
1254 		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1255 	}
1256 	return ret;
1257 }
1258 #endif
1259 
1260 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1261 			esdhc_of_suspend,
1262 			esdhc_of_resume);
1263 
1264 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1265 	.read_l = esdhc_be_readl,
1266 	.read_w = esdhc_be_readw,
1267 	.read_b = esdhc_be_readb,
1268 	.write_l = esdhc_be_writel,
1269 	.write_w = esdhc_be_writew,
1270 	.write_b = esdhc_be_writeb,
1271 	.set_clock = esdhc_of_set_clock,
1272 	.enable_dma = esdhc_of_enable_dma,
1273 	.get_max_clock = esdhc_of_get_max_clock,
1274 	.get_min_clock = esdhc_of_get_min_clock,
1275 	.adma_workaround = esdhc_of_adma_workaround,
1276 	.set_bus_width = esdhc_pltfm_set_bus_width,
1277 	.reset = esdhc_reset,
1278 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1279 	.irq = esdhc_irq,
1280 };
1281 
1282 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1283 	.read_l = esdhc_le_readl,
1284 	.read_w = esdhc_le_readw,
1285 	.read_b = esdhc_le_readb,
1286 	.write_l = esdhc_le_writel,
1287 	.write_w = esdhc_le_writew,
1288 	.write_b = esdhc_le_writeb,
1289 	.set_clock = esdhc_of_set_clock,
1290 	.enable_dma = esdhc_of_enable_dma,
1291 	.get_max_clock = esdhc_of_get_max_clock,
1292 	.get_min_clock = esdhc_of_get_min_clock,
1293 	.adma_workaround = esdhc_of_adma_workaround,
1294 	.set_bus_width = esdhc_pltfm_set_bus_width,
1295 	.reset = esdhc_reset,
1296 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1297 	.irq = esdhc_irq,
1298 };
1299 
1300 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1301 	.quirks = ESDHC_DEFAULT_QUIRKS |
1302 #ifdef CONFIG_PPC
1303 		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1304 #endif
1305 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1306 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1307 	.ops = &sdhci_esdhc_be_ops,
1308 };
1309 
1310 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1311 	.quirks = ESDHC_DEFAULT_QUIRKS |
1312 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1313 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1314 	.ops = &sdhci_esdhc_le_ops,
1315 };
1316 
1317 static struct soc_device_attribute soc_incorrect_hostver[] = {
1318 	{ .family = "QorIQ T4240", .revision = "1.0", },
1319 	{ .family = "QorIQ T4240", .revision = "2.0", },
1320 	{ /* sentinel */ }
1321 };
1322 
1323 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1324 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1325 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1326 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1327 	{ /* sentinel */ }
1328 };
1329 
1330 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1331 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1332 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1333 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1334 	{ /* sentinel */ }
1335 };
1336 
esdhc_init(struct platform_device * pdev,struct sdhci_host * host)1337 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1338 {
1339 	const struct of_device_id *match;
1340 	struct sdhci_pltfm_host *pltfm_host;
1341 	struct sdhci_esdhc *esdhc;
1342 	struct device_node *np;
1343 	struct clk *clk;
1344 	u32 val;
1345 	u16 host_ver;
1346 
1347 	pltfm_host = sdhci_priv(host);
1348 	esdhc = sdhci_pltfm_priv(pltfm_host);
1349 
1350 	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1351 	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1352 			     SDHCI_VENDOR_VER_SHIFT;
1353 	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1354 	if (soc_device_match(soc_incorrect_hostver))
1355 		esdhc->quirk_incorrect_hostver = true;
1356 	else
1357 		esdhc->quirk_incorrect_hostver = false;
1358 
1359 	if (soc_device_match(soc_fixup_sdhc_clkdivs))
1360 		esdhc->quirk_limited_clk_division = true;
1361 	else
1362 		esdhc->quirk_limited_clk_division = false;
1363 
1364 	if (soc_device_match(soc_unreliable_pulse_detection))
1365 		esdhc->quirk_unreliable_pulse_detection = true;
1366 	else
1367 		esdhc->quirk_unreliable_pulse_detection = false;
1368 
1369 	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1370 	if (match)
1371 		esdhc->clk_fixup = match->data;
1372 	np = pdev->dev.of_node;
1373 
1374 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1375 		esdhc->quirk_delay_before_data_reset = true;
1376 		esdhc->quirk_trans_complete_erratum = true;
1377 	}
1378 
1379 	clk = of_clk_get(np, 0);
1380 	if (!IS_ERR(clk)) {
1381 		/*
1382 		 * esdhc->peripheral_clock would be assigned with a value
1383 		 * which is eSDHC base clock when use periperal clock.
1384 		 * For some platforms, the clock value got by common clk
1385 		 * API is peripheral clock while the eSDHC base clock is
1386 		 * 1/2 peripheral clock.
1387 		 */
1388 		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1389 		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1390 		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1391 			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1392 		else
1393 			esdhc->peripheral_clock = clk_get_rate(clk);
1394 
1395 		clk_put(clk);
1396 	}
1397 
1398 	esdhc_clock_enable(host, false);
1399 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1400 	/*
1401 	 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1402 	 * initialize it as 1 or 0 once, to override the different value
1403 	 * which may be configured in bootloader.
1404 	 */
1405 	if (esdhc->peripheral_clock)
1406 		val |= ESDHC_PERIPHERAL_CLK_SEL;
1407 	else
1408 		val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1409 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1410 	esdhc_clock_enable(host, true);
1411 }
1412 
esdhc_hs400_prepare_ddr(struct mmc_host * mmc)1413 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1414 {
1415 	esdhc_tuning_block_enable(mmc_priv(mmc), false);
1416 	return 0;
1417 }
1418 
sdhci_esdhc_probe(struct platform_device * pdev)1419 static int sdhci_esdhc_probe(struct platform_device *pdev)
1420 {
1421 	struct sdhci_host *host;
1422 	struct device_node *np;
1423 	struct sdhci_pltfm_host *pltfm_host;
1424 	struct sdhci_esdhc *esdhc;
1425 	int ret;
1426 
1427 	np = pdev->dev.of_node;
1428 
1429 	if (of_property_read_bool(np, "little-endian"))
1430 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1431 					sizeof(struct sdhci_esdhc));
1432 	else
1433 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1434 					sizeof(struct sdhci_esdhc));
1435 
1436 	if (IS_ERR(host))
1437 		return PTR_ERR(host);
1438 
1439 	host->mmc_host_ops.start_signal_voltage_switch =
1440 		esdhc_signal_voltage_switch;
1441 	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1442 	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1443 	host->tuning_delay = 1;
1444 
1445 	esdhc_init(pdev, host);
1446 
1447 	sdhci_get_of_property(pdev);
1448 
1449 	pltfm_host = sdhci_priv(host);
1450 	esdhc = sdhci_pltfm_priv(pltfm_host);
1451 	if (soc_device_match(soc_tuning_erratum_type1))
1452 		esdhc->quirk_tuning_erratum_type1 = true;
1453 	else
1454 		esdhc->quirk_tuning_erratum_type1 = false;
1455 
1456 	if (soc_device_match(soc_tuning_erratum_type2))
1457 		esdhc->quirk_tuning_erratum_type2 = true;
1458 	else
1459 		esdhc->quirk_tuning_erratum_type2 = false;
1460 
1461 	if (esdhc->vendor_ver == VENDOR_V_22)
1462 		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1463 
1464 	if (esdhc->vendor_ver > VENDOR_V_22)
1465 		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1466 
1467 	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1468 		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1469 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1470 	}
1471 
1472 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1473 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1474 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1475 	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1476 	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
1477 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1478 
1479 	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1480 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1481 
1482 	esdhc->quirk_ignore_data_inhibit = false;
1483 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1484 		/*
1485 		 * Freescale messed up with P2020 as it has a non-standard
1486 		 * host control register
1487 		 */
1488 		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1489 		esdhc->quirk_ignore_data_inhibit = true;
1490 	}
1491 
1492 	/* call to generic mmc_of_parse to support additional capabilities */
1493 	ret = mmc_of_parse(host->mmc);
1494 	if (ret)
1495 		goto err;
1496 
1497 	mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
1498 
1499 	ret = sdhci_add_host(host);
1500 	if (ret)
1501 		goto err;
1502 
1503 	return 0;
1504  err:
1505 	sdhci_pltfm_free(pdev);
1506 	return ret;
1507 }
1508 
1509 static struct platform_driver sdhci_esdhc_driver = {
1510 	.driver = {
1511 		.name = "sdhci-esdhc",
1512 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1513 		.of_match_table = sdhci_esdhc_of_match,
1514 		.pm = &esdhc_of_dev_pm_ops,
1515 	},
1516 	.probe = sdhci_esdhc_probe,
1517 	.remove = sdhci_pltfm_unregister,
1518 };
1519 
1520 module_platform_driver(sdhci_esdhc_driver);
1521 
1522 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1523 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1524 	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
1525 MODULE_LICENSE("GPL v2");
1526