1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare Multimedia Card Interface driver
4  *  (Based on NXP driver for lpc 31xx)
5  *
6  * Copyright (C) 2009 NXP Semiconductors
7  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8  */
9 
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/ktime.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/prandom.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
40 
41 #include "dw_mmc.h"
42 
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
46 				 SDMMC_INT_EBE | SDMMC_INT_HLE)
47 #define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 				 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
49 #define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
50 				 DW_MCI_CMD_ERROR_FLAGS)
51 #define DW_MCI_SEND_STATUS	1
52 #define DW_MCI_RECV_STATUS	2
53 #define DW_MCI_DMA_THRESHOLD	16
54 
55 #define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
56 #define DW_MCI_FREQ_MIN	100000		/* unit: HZ */
57 
58 #define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 				 SDMMC_IDMAC_INT_TI)
62 
63 #define DESC_RING_BUF_SZ	PAGE_SIZE
64 
65 struct idmac_desc_64addr {
66 	u32		des0;	/* Control Descriptor */
67 #define IDMAC_OWN_CLR64(x) \
68 	!((x) & cpu_to_le32(IDMAC_DES0_OWN))
69 
70 	u32		des1;	/* Reserved */
71 
72 	u32		des2;	/*Buffer sizes */
73 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
74 	((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
75 	 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
76 
77 	u32		des3;	/* Reserved */
78 
79 	u32		des4;	/* Lower 32-bits of Buffer Address Pointer 1*/
80 	u32		des5;	/* Upper 32-bits of Buffer Address Pointer 1*/
81 
82 	u32		des6;	/* Lower 32-bits of Next Descriptor Address */
83 	u32		des7;	/* Upper 32-bits of Next Descriptor Address */
84 };
85 
86 struct idmac_desc {
87 	__le32		des0;	/* Control Descriptor */
88 #define IDMAC_DES0_DIC	BIT(1)
89 #define IDMAC_DES0_LD	BIT(2)
90 #define IDMAC_DES0_FD	BIT(3)
91 #define IDMAC_DES0_CH	BIT(4)
92 #define IDMAC_DES0_ER	BIT(5)
93 #define IDMAC_DES0_CES	BIT(30)
94 #define IDMAC_DES0_OWN	BIT(31)
95 
96 	__le32		des1;	/* Buffer sizes */
97 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
98 	((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
99 
100 	__le32		des2;	/* buffer 1 physical address */
101 
102 	__le32		des3;	/* buffer 2 physical address */
103 };
104 
105 /* Each descriptor can transfer up to 4KB of data in chained mode */
106 #define DW_MCI_DESC_DATA_LENGTH	0x1000
107 
108 #if defined(CONFIG_DEBUG_FS)
dw_mci_req_show(struct seq_file * s,void * v)109 static int dw_mci_req_show(struct seq_file *s, void *v)
110 {
111 	struct dw_mci_slot *slot = s->private;
112 	struct mmc_request *mrq;
113 	struct mmc_command *cmd;
114 	struct mmc_command *stop;
115 	struct mmc_data	*data;
116 
117 	/* Make sure we get a consistent snapshot */
118 	spin_lock_bh(&slot->host->lock);
119 	mrq = slot->mrq;
120 
121 	if (mrq) {
122 		cmd = mrq->cmd;
123 		data = mrq->data;
124 		stop = mrq->stop;
125 
126 		if (cmd)
127 			seq_printf(s,
128 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
129 				   cmd->opcode, cmd->arg, cmd->flags,
130 				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
131 				   cmd->resp[2], cmd->error);
132 		if (data)
133 			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
134 				   data->bytes_xfered, data->blocks,
135 				   data->blksz, data->flags, data->error);
136 		if (stop)
137 			seq_printf(s,
138 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
139 				   stop->opcode, stop->arg, stop->flags,
140 				   stop->resp[0], stop->resp[1], stop->resp[2],
141 				   stop->resp[2], stop->error);
142 	}
143 
144 	spin_unlock_bh(&slot->host->lock);
145 
146 	return 0;
147 }
148 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
149 
dw_mci_regs_show(struct seq_file * s,void * v)150 static int dw_mci_regs_show(struct seq_file *s, void *v)
151 {
152 	struct dw_mci *host = s->private;
153 
154 	pm_runtime_get_sync(host->dev);
155 
156 	seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
157 	seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
158 	seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
159 	seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
160 	seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
161 	seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
162 
163 	pm_runtime_put_autosuspend(host->dev);
164 
165 	return 0;
166 }
167 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
168 
dw_mci_init_debugfs(struct dw_mci_slot * slot)169 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
170 {
171 	struct mmc_host	*mmc = slot->mmc;
172 	struct dw_mci *host = slot->host;
173 	struct dentry *root;
174 
175 	root = mmc->debugfs_root;
176 	if (!root)
177 		return;
178 
179 	debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
180 	debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
181 	debugfs_create_u32("state", S_IRUSR, root, &host->state);
182 	debugfs_create_xul("pending_events", S_IRUSR, root,
183 			   &host->pending_events);
184 	debugfs_create_xul("completed_events", S_IRUSR, root,
185 			   &host->completed_events);
186 #ifdef CONFIG_FAULT_INJECTION
187 	fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
188 #endif
189 }
190 #endif /* defined(CONFIG_DEBUG_FS) */
191 
dw_mci_ctrl_reset(struct dw_mci * host,u32 reset)192 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
193 {
194 	u32 ctrl;
195 
196 	ctrl = mci_readl(host, CTRL);
197 	ctrl |= reset;
198 	mci_writel(host, CTRL, ctrl);
199 
200 	/* wait till resets clear */
201 	if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
202 				      !(ctrl & reset),
203 				      1, 500 * USEC_PER_MSEC)) {
204 		dev_err(host->dev,
205 			"Timeout resetting block (ctrl reset %#x)\n",
206 			ctrl & reset);
207 		return false;
208 	}
209 
210 	return true;
211 }
212 
dw_mci_wait_while_busy(struct dw_mci * host,u32 cmd_flags)213 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
214 {
215 	u32 status;
216 
217 	/*
218 	 * Databook says that before issuing a new data transfer command
219 	 * we need to check to see if the card is busy.  Data transfer commands
220 	 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
221 	 *
222 	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
223 	 * expected.
224 	 */
225 	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
226 	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
227 		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
228 					      status,
229 					      !(status & SDMMC_STATUS_BUSY),
230 					      10, 500 * USEC_PER_MSEC))
231 			dev_err(host->dev, "Busy; trying anyway\n");
232 	}
233 }
234 
mci_send_cmd(struct dw_mci_slot * slot,u32 cmd,u32 arg)235 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
236 {
237 	struct dw_mci *host = slot->host;
238 	unsigned int cmd_status = 0;
239 
240 	mci_writel(host, CMDARG, arg);
241 	wmb(); /* drain writebuffer */
242 	dw_mci_wait_while_busy(host, cmd);
243 	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
244 
245 	if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
246 				      !(cmd_status & SDMMC_CMD_START),
247 				      1, 500 * USEC_PER_MSEC))
248 		dev_err(&slot->mmc->class_dev,
249 			"Timeout sending command (cmd %#x arg %#x status %#x)\n",
250 			cmd, arg, cmd_status);
251 }
252 
dw_mci_prepare_command(struct mmc_host * mmc,struct mmc_command * cmd)253 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
254 {
255 	struct dw_mci_slot *slot = mmc_priv(mmc);
256 	struct dw_mci *host = slot->host;
257 	u32 cmdr;
258 
259 	cmd->error = -EINPROGRESS;
260 	cmdr = cmd->opcode;
261 
262 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
263 	    cmd->opcode == MMC_GO_IDLE_STATE ||
264 	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
265 	    (cmd->opcode == SD_IO_RW_DIRECT &&
266 	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
267 		cmdr |= SDMMC_CMD_STOP;
268 	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
269 		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
270 
271 	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
272 		u32 clk_en_a;
273 
274 		/* Special bit makes CMD11 not die */
275 		cmdr |= SDMMC_CMD_VOLT_SWITCH;
276 
277 		/* Change state to continue to handle CMD11 weirdness */
278 		WARN_ON(slot->host->state != STATE_SENDING_CMD);
279 		slot->host->state = STATE_SENDING_CMD11;
280 
281 		/*
282 		 * We need to disable low power mode (automatic clock stop)
283 		 * while doing voltage switch so we don't confuse the card,
284 		 * since stopping the clock is a specific part of the UHS
285 		 * voltage change dance.
286 		 *
287 		 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
288 		 * unconditionally turned back on in dw_mci_setup_bus() if it's
289 		 * ever called with a non-zero clock.  That shouldn't happen
290 		 * until the voltage change is all done.
291 		 */
292 		clk_en_a = mci_readl(host, CLKENA);
293 		clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
294 		mci_writel(host, CLKENA, clk_en_a);
295 		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
296 			     SDMMC_CMD_PRV_DAT_WAIT, 0);
297 	}
298 
299 	if (cmd->flags & MMC_RSP_PRESENT) {
300 		/* We expect a response, so set this bit */
301 		cmdr |= SDMMC_CMD_RESP_EXP;
302 		if (cmd->flags & MMC_RSP_136)
303 			cmdr |= SDMMC_CMD_RESP_LONG;
304 	}
305 
306 	if (cmd->flags & MMC_RSP_CRC)
307 		cmdr |= SDMMC_CMD_RESP_CRC;
308 
309 	if (cmd->data) {
310 		cmdr |= SDMMC_CMD_DAT_EXP;
311 		if (cmd->data->flags & MMC_DATA_WRITE)
312 			cmdr |= SDMMC_CMD_DAT_WR;
313 	}
314 
315 	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
316 		cmdr |= SDMMC_CMD_USE_HOLD_REG;
317 
318 	return cmdr;
319 }
320 
dw_mci_prep_stop_abort(struct dw_mci * host,struct mmc_command * cmd)321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
322 {
323 	struct mmc_command *stop;
324 	u32 cmdr;
325 
326 	if (!cmd->data)
327 		return 0;
328 
329 	stop = &host->stop_abort;
330 	cmdr = cmd->opcode;
331 	memset(stop, 0, sizeof(struct mmc_command));
332 
333 	if (cmdr == MMC_READ_SINGLE_BLOCK ||
334 	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
335 	    cmdr == MMC_WRITE_BLOCK ||
336 	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
337 	    cmdr == MMC_SEND_TUNING_BLOCK ||
338 	    cmdr == MMC_SEND_TUNING_BLOCK_HS200 ||
339 	    cmdr == MMC_GEN_CMD) {
340 		stop->opcode = MMC_STOP_TRANSMISSION;
341 		stop->arg = 0;
342 		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
343 	} else if (cmdr == SD_IO_RW_EXTENDED) {
344 		stop->opcode = SD_IO_RW_DIRECT;
345 		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
346 			     ((cmd->arg >> 28) & 0x7);
347 		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
348 	} else {
349 		return 0;
350 	}
351 
352 	cmdr = stop->opcode | SDMMC_CMD_STOP |
353 		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
354 
355 	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
356 		cmdr |= SDMMC_CMD_USE_HOLD_REG;
357 
358 	return cmdr;
359 }
360 
dw_mci_set_cto(struct dw_mci * host)361 static inline void dw_mci_set_cto(struct dw_mci *host)
362 {
363 	unsigned int cto_clks;
364 	unsigned int cto_div;
365 	unsigned int cto_ms;
366 	unsigned long irqflags;
367 
368 	cto_clks = mci_readl(host, TMOUT) & 0xff;
369 	cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
370 	if (cto_div == 0)
371 		cto_div = 1;
372 
373 	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
374 				  host->bus_hz);
375 
376 	/* add a bit spare time */
377 	cto_ms += 10;
378 
379 	/*
380 	 * The durations we're working with are fairly short so we have to be
381 	 * extra careful about synchronization here.  Specifically in hardware a
382 	 * command timeout is _at most_ 5.1 ms, so that means we expect an
383 	 * interrupt (either command done or timeout) to come rather quickly
384 	 * after the mci_writel.  ...but just in case we have a long interrupt
385 	 * latency let's add a bit of paranoia.
386 	 *
387 	 * In general we'll assume that at least an interrupt will be asserted
388 	 * in hardware by the time the cto_timer runs.  ...and if it hasn't
389 	 * been asserted in hardware by that time then we'll assume it'll never
390 	 * come.
391 	 */
392 	spin_lock_irqsave(&host->irq_lock, irqflags);
393 	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
394 		mod_timer(&host->cto_timer,
395 			jiffies + msecs_to_jiffies(cto_ms) + 1);
396 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
397 }
398 
dw_mci_start_command(struct dw_mci * host,struct mmc_command * cmd,u32 cmd_flags)399 static void dw_mci_start_command(struct dw_mci *host,
400 				 struct mmc_command *cmd, u32 cmd_flags)
401 {
402 	host->cmd = cmd;
403 	dev_vdbg(host->dev,
404 		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
405 		 cmd->arg, cmd_flags);
406 
407 	mci_writel(host, CMDARG, cmd->arg);
408 	wmb(); /* drain writebuffer */
409 	dw_mci_wait_while_busy(host, cmd_flags);
410 
411 	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
412 
413 	/* response expected command only */
414 	if (cmd_flags & SDMMC_CMD_RESP_EXP)
415 		dw_mci_set_cto(host);
416 }
417 
send_stop_abort(struct dw_mci * host,struct mmc_data * data)418 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
419 {
420 	struct mmc_command *stop = &host->stop_abort;
421 
422 	dw_mci_start_command(host, stop, host->stop_cmdr);
423 }
424 
425 /* DMA interface functions */
dw_mci_stop_dma(struct dw_mci * host)426 static void dw_mci_stop_dma(struct dw_mci *host)
427 {
428 	if (host->using_dma) {
429 		host->dma_ops->stop(host);
430 		host->dma_ops->cleanup(host);
431 	}
432 
433 	/* Data transfer was stopped by the interrupt handler */
434 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
435 }
436 
dw_mci_dma_cleanup(struct dw_mci * host)437 static void dw_mci_dma_cleanup(struct dw_mci *host)
438 {
439 	struct mmc_data *data = host->data;
440 
441 	if (data && data->host_cookie == COOKIE_MAPPED) {
442 		dma_unmap_sg(host->dev,
443 			     data->sg,
444 			     data->sg_len,
445 			     mmc_get_dma_dir(data));
446 		data->host_cookie = COOKIE_UNMAPPED;
447 	}
448 }
449 
dw_mci_idmac_reset(struct dw_mci * host)450 static void dw_mci_idmac_reset(struct dw_mci *host)
451 {
452 	u32 bmod = mci_readl(host, BMOD);
453 	/* Software reset of DMA */
454 	bmod |= SDMMC_IDMAC_SWRESET;
455 	mci_writel(host, BMOD, bmod);
456 }
457 
dw_mci_idmac_stop_dma(struct dw_mci * host)458 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
459 {
460 	u32 temp;
461 
462 	/* Disable and reset the IDMAC interface */
463 	temp = mci_readl(host, CTRL);
464 	temp &= ~SDMMC_CTRL_USE_IDMAC;
465 	temp |= SDMMC_CTRL_DMA_RESET;
466 	mci_writel(host, CTRL, temp);
467 
468 	/* Stop the IDMAC running */
469 	temp = mci_readl(host, BMOD);
470 	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
471 	temp |= SDMMC_IDMAC_SWRESET;
472 	mci_writel(host, BMOD, temp);
473 }
474 
dw_mci_dmac_complete_dma(void * arg)475 static void dw_mci_dmac_complete_dma(void *arg)
476 {
477 	struct dw_mci *host = arg;
478 	struct mmc_data *data = host->data;
479 
480 	dev_vdbg(host->dev, "DMA complete\n");
481 
482 	if ((host->use_dma == TRANS_MODE_EDMAC) &&
483 	    data && (data->flags & MMC_DATA_READ))
484 		/* Invalidate cache after read */
485 		dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
486 				    data->sg,
487 				    data->sg_len,
488 				    DMA_FROM_DEVICE);
489 
490 	host->dma_ops->cleanup(host);
491 
492 	/*
493 	 * If the card was removed, data will be NULL. No point in trying to
494 	 * send the stop command or waiting for NBUSY in this case.
495 	 */
496 	if (data) {
497 		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
498 		tasklet_schedule(&host->tasklet);
499 	}
500 }
501 
dw_mci_idmac_init(struct dw_mci * host)502 static int dw_mci_idmac_init(struct dw_mci *host)
503 {
504 	int i;
505 
506 	if (host->dma_64bit_address == 1) {
507 		struct idmac_desc_64addr *p;
508 		/* Number of descriptors in the ring buffer */
509 		host->ring_size =
510 			DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
511 
512 		/* Forward link the descriptor list */
513 		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
514 								i++, p++) {
515 			p->des6 = (host->sg_dma +
516 					(sizeof(struct idmac_desc_64addr) *
517 							(i + 1))) & 0xffffffff;
518 
519 			p->des7 = (u64)(host->sg_dma +
520 					(sizeof(struct idmac_desc_64addr) *
521 							(i + 1))) >> 32;
522 			/* Initialize reserved and buffer size fields to "0" */
523 			p->des0 = 0;
524 			p->des1 = 0;
525 			p->des2 = 0;
526 			p->des3 = 0;
527 		}
528 
529 		/* Set the last descriptor as the end-of-ring descriptor */
530 		p->des6 = host->sg_dma & 0xffffffff;
531 		p->des7 = (u64)host->sg_dma >> 32;
532 		p->des0 = IDMAC_DES0_ER;
533 
534 	} else {
535 		struct idmac_desc *p;
536 		/* Number of descriptors in the ring buffer */
537 		host->ring_size =
538 			DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
539 
540 		/* Forward link the descriptor list */
541 		for (i = 0, p = host->sg_cpu;
542 		     i < host->ring_size - 1;
543 		     i++, p++) {
544 			p->des3 = cpu_to_le32(host->sg_dma +
545 					(sizeof(struct idmac_desc) * (i + 1)));
546 			p->des0 = 0;
547 			p->des1 = 0;
548 		}
549 
550 		/* Set the last descriptor as the end-of-ring descriptor */
551 		p->des3 = cpu_to_le32(host->sg_dma);
552 		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
553 	}
554 
555 	dw_mci_idmac_reset(host);
556 
557 	if (host->dma_64bit_address == 1) {
558 		/* Mask out interrupts - get Tx & Rx complete only */
559 		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
560 		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
561 				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
562 
563 		/* Set the descriptor base address */
564 		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
565 		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
566 
567 	} else {
568 		/* Mask out interrupts - get Tx & Rx complete only */
569 		mci_writel(host, IDSTS, IDMAC_INT_CLR);
570 		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
571 				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
572 
573 		/* Set the descriptor base address */
574 		mci_writel(host, DBADDR, host->sg_dma);
575 	}
576 
577 	return 0;
578 }
579 
dw_mci_prepare_desc64(struct dw_mci * host,struct mmc_data * data,unsigned int sg_len)580 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
581 					 struct mmc_data *data,
582 					 unsigned int sg_len)
583 {
584 	unsigned int desc_len;
585 	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
586 	u32 val;
587 	int i;
588 
589 	desc_first = desc_last = desc = host->sg_cpu;
590 
591 	for (i = 0; i < sg_len; i++) {
592 		unsigned int length = sg_dma_len(&data->sg[i]);
593 
594 		u64 mem_addr = sg_dma_address(&data->sg[i]);
595 
596 		for ( ; length ; desc++) {
597 			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
598 				   length : DW_MCI_DESC_DATA_LENGTH;
599 
600 			length -= desc_len;
601 
602 			/*
603 			 * Wait for the former clear OWN bit operation
604 			 * of IDMAC to make sure that this descriptor
605 			 * isn't still owned by IDMAC as IDMAC's write
606 			 * ops and CPU's read ops are asynchronous.
607 			 */
608 			if (readl_poll_timeout_atomic(&desc->des0, val,
609 						!(val & IDMAC_DES0_OWN),
610 						10, 100 * USEC_PER_MSEC))
611 				goto err_own_bit;
612 
613 			/*
614 			 * Set the OWN bit and disable interrupts
615 			 * for this descriptor
616 			 */
617 			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
618 						IDMAC_DES0_CH;
619 
620 			/* Buffer length */
621 			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
622 
623 			/* Physical address to DMA to/from */
624 			desc->des4 = mem_addr & 0xffffffff;
625 			desc->des5 = mem_addr >> 32;
626 
627 			/* Update physical address for the next desc */
628 			mem_addr += desc_len;
629 
630 			/* Save pointer to the last descriptor */
631 			desc_last = desc;
632 		}
633 	}
634 
635 	/* Set first descriptor */
636 	desc_first->des0 |= IDMAC_DES0_FD;
637 
638 	/* Set last descriptor */
639 	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
640 	desc_last->des0 |= IDMAC_DES0_LD;
641 
642 	return 0;
643 err_own_bit:
644 	/* restore the descriptor chain as it's polluted */
645 	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
646 	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
647 	dw_mci_idmac_init(host);
648 	return -EINVAL;
649 }
650 
651 
dw_mci_prepare_desc32(struct dw_mci * host,struct mmc_data * data,unsigned int sg_len)652 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
653 					 struct mmc_data *data,
654 					 unsigned int sg_len)
655 {
656 	unsigned int desc_len;
657 	struct idmac_desc *desc_first, *desc_last, *desc;
658 	u32 val;
659 	int i;
660 
661 	desc_first = desc_last = desc = host->sg_cpu;
662 
663 	for (i = 0; i < sg_len; i++) {
664 		unsigned int length = sg_dma_len(&data->sg[i]);
665 
666 		u32 mem_addr = sg_dma_address(&data->sg[i]);
667 
668 		for ( ; length ; desc++) {
669 			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
670 				   length : DW_MCI_DESC_DATA_LENGTH;
671 
672 			length -= desc_len;
673 
674 			/*
675 			 * Wait for the former clear OWN bit operation
676 			 * of IDMAC to make sure that this descriptor
677 			 * isn't still owned by IDMAC as IDMAC's write
678 			 * ops and CPU's read ops are asynchronous.
679 			 */
680 			if (readl_poll_timeout_atomic(&desc->des0, val,
681 						      IDMAC_OWN_CLR64(val),
682 						      10,
683 						      100 * USEC_PER_MSEC))
684 				goto err_own_bit;
685 
686 			/*
687 			 * Set the OWN bit and disable interrupts
688 			 * for this descriptor
689 			 */
690 			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
691 						 IDMAC_DES0_DIC |
692 						 IDMAC_DES0_CH);
693 
694 			/* Buffer length */
695 			IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
696 
697 			/* Physical address to DMA to/from */
698 			desc->des2 = cpu_to_le32(mem_addr);
699 
700 			/* Update physical address for the next desc */
701 			mem_addr += desc_len;
702 
703 			/* Save pointer to the last descriptor */
704 			desc_last = desc;
705 		}
706 	}
707 
708 	/* Set first descriptor */
709 	desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
710 
711 	/* Set last descriptor */
712 	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
713 				       IDMAC_DES0_DIC));
714 	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
715 
716 	return 0;
717 err_own_bit:
718 	/* restore the descriptor chain as it's polluted */
719 	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
720 	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
721 	dw_mci_idmac_init(host);
722 	return -EINVAL;
723 }
724 
dw_mci_idmac_start_dma(struct dw_mci * host,unsigned int sg_len)725 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
726 {
727 	u32 temp;
728 	int ret;
729 
730 	if (host->dma_64bit_address == 1)
731 		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
732 	else
733 		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
734 
735 	if (ret)
736 		goto out;
737 
738 	/* drain writebuffer */
739 	wmb();
740 
741 	/* Make sure to reset DMA in case we did PIO before this */
742 	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
743 	dw_mci_idmac_reset(host);
744 
745 	/* Select IDMAC interface */
746 	temp = mci_readl(host, CTRL);
747 	temp |= SDMMC_CTRL_USE_IDMAC;
748 	mci_writel(host, CTRL, temp);
749 
750 	/* drain writebuffer */
751 	wmb();
752 
753 	/* Enable the IDMAC */
754 	temp = mci_readl(host, BMOD);
755 	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
756 	mci_writel(host, BMOD, temp);
757 
758 	/* Start it running */
759 	mci_writel(host, PLDMND, 1);
760 
761 out:
762 	return ret;
763 }
764 
765 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
766 	.init = dw_mci_idmac_init,
767 	.start = dw_mci_idmac_start_dma,
768 	.stop = dw_mci_idmac_stop_dma,
769 	.complete = dw_mci_dmac_complete_dma,
770 	.cleanup = dw_mci_dma_cleanup,
771 };
772 
dw_mci_edmac_stop_dma(struct dw_mci * host)773 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
774 {
775 	dmaengine_terminate_async(host->dms->ch);
776 }
777 
dw_mci_edmac_start_dma(struct dw_mci * host,unsigned int sg_len)778 static int dw_mci_edmac_start_dma(struct dw_mci *host,
779 					    unsigned int sg_len)
780 {
781 	struct dma_slave_config cfg;
782 	struct dma_async_tx_descriptor *desc = NULL;
783 	struct scatterlist *sgl = host->data->sg;
784 	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
785 	u32 sg_elems = host->data->sg_len;
786 	u32 fifoth_val;
787 	u32 fifo_offset = host->fifo_reg - host->regs;
788 	int ret = 0;
789 
790 	/* Set external dma config: burst size, burst width */
791 	memset(&cfg, 0, sizeof(cfg));
792 	cfg.dst_addr = host->phy_regs + fifo_offset;
793 	cfg.src_addr = cfg.dst_addr;
794 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
795 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
796 
797 	/* Match burst msize with external dma config */
798 	fifoth_val = mci_readl(host, FIFOTH);
799 	cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
800 	cfg.src_maxburst = cfg.dst_maxburst;
801 
802 	if (host->data->flags & MMC_DATA_WRITE)
803 		cfg.direction = DMA_MEM_TO_DEV;
804 	else
805 		cfg.direction = DMA_DEV_TO_MEM;
806 
807 	ret = dmaengine_slave_config(host->dms->ch, &cfg);
808 	if (ret) {
809 		dev_err(host->dev, "Failed to config edmac.\n");
810 		return -EBUSY;
811 	}
812 
813 	desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
814 				       sg_len, cfg.direction,
815 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
816 	if (!desc) {
817 		dev_err(host->dev, "Can't prepare slave sg.\n");
818 		return -EBUSY;
819 	}
820 
821 	/* Set dw_mci_dmac_complete_dma as callback */
822 	desc->callback = dw_mci_dmac_complete_dma;
823 	desc->callback_param = (void *)host;
824 	dmaengine_submit(desc);
825 
826 	/* Flush cache before write */
827 	if (host->data->flags & MMC_DATA_WRITE)
828 		dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
829 				       sg_elems, DMA_TO_DEVICE);
830 
831 	dma_async_issue_pending(host->dms->ch);
832 
833 	return 0;
834 }
835 
dw_mci_edmac_init(struct dw_mci * host)836 static int dw_mci_edmac_init(struct dw_mci *host)
837 {
838 	/* Request external dma channel */
839 	host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
840 	if (!host->dms)
841 		return -ENOMEM;
842 
843 	host->dms->ch = dma_request_chan(host->dev, "rx-tx");
844 	if (IS_ERR(host->dms->ch)) {
845 		int ret = PTR_ERR(host->dms->ch);
846 
847 		dev_err(host->dev, "Failed to get external DMA channel.\n");
848 		kfree(host->dms);
849 		host->dms = NULL;
850 		return ret;
851 	}
852 
853 	return 0;
854 }
855 
dw_mci_edmac_exit(struct dw_mci * host)856 static void dw_mci_edmac_exit(struct dw_mci *host)
857 {
858 	if (host->dms) {
859 		if (host->dms->ch) {
860 			dma_release_channel(host->dms->ch);
861 			host->dms->ch = NULL;
862 		}
863 		kfree(host->dms);
864 		host->dms = NULL;
865 	}
866 }
867 
868 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
869 	.init = dw_mci_edmac_init,
870 	.exit = dw_mci_edmac_exit,
871 	.start = dw_mci_edmac_start_dma,
872 	.stop = dw_mci_edmac_stop_dma,
873 	.complete = dw_mci_dmac_complete_dma,
874 	.cleanup = dw_mci_dma_cleanup,
875 };
876 
dw_mci_pre_dma_transfer(struct dw_mci * host,struct mmc_data * data,int cookie)877 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
878 				   struct mmc_data *data,
879 				   int cookie)
880 {
881 	struct scatterlist *sg;
882 	unsigned int i, sg_len;
883 
884 	if (data->host_cookie == COOKIE_PRE_MAPPED)
885 		return data->sg_len;
886 
887 	/*
888 	 * We don't do DMA on "complex" transfers, i.e. with
889 	 * non-word-aligned buffers or lengths. Also, we don't bother
890 	 * with all the DMA setup overhead for short transfers.
891 	 */
892 	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
893 		return -EINVAL;
894 
895 	if (data->blksz & 3)
896 		return -EINVAL;
897 
898 	for_each_sg(data->sg, sg, data->sg_len, i) {
899 		if (sg->offset & 3 || sg->length & 3)
900 			return -EINVAL;
901 	}
902 
903 	sg_len = dma_map_sg(host->dev,
904 			    data->sg,
905 			    data->sg_len,
906 			    mmc_get_dma_dir(data));
907 	if (sg_len == 0)
908 		return -EINVAL;
909 
910 	data->host_cookie = cookie;
911 
912 	return sg_len;
913 }
914 
dw_mci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)915 static void dw_mci_pre_req(struct mmc_host *mmc,
916 			   struct mmc_request *mrq)
917 {
918 	struct dw_mci_slot *slot = mmc_priv(mmc);
919 	struct mmc_data *data = mrq->data;
920 
921 	if (!slot->host->use_dma || !data)
922 		return;
923 
924 	/* This data might be unmapped at this time */
925 	data->host_cookie = COOKIE_UNMAPPED;
926 
927 	if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
928 				COOKIE_PRE_MAPPED) < 0)
929 		data->host_cookie = COOKIE_UNMAPPED;
930 }
931 
dw_mci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)932 static void dw_mci_post_req(struct mmc_host *mmc,
933 			    struct mmc_request *mrq,
934 			    int err)
935 {
936 	struct dw_mci_slot *slot = mmc_priv(mmc);
937 	struct mmc_data *data = mrq->data;
938 
939 	if (!slot->host->use_dma || !data)
940 		return;
941 
942 	if (data->host_cookie != COOKIE_UNMAPPED)
943 		dma_unmap_sg(slot->host->dev,
944 			     data->sg,
945 			     data->sg_len,
946 			     mmc_get_dma_dir(data));
947 	data->host_cookie = COOKIE_UNMAPPED;
948 }
949 
dw_mci_get_cd(struct mmc_host * mmc)950 static int dw_mci_get_cd(struct mmc_host *mmc)
951 {
952 	int present;
953 	struct dw_mci_slot *slot = mmc_priv(mmc);
954 	struct dw_mci *host = slot->host;
955 	int gpio_cd = mmc_gpio_get_cd(mmc);
956 
957 	/* Use platform get_cd function, else try onboard card detect */
958 	if (((mmc->caps & MMC_CAP_NEEDS_POLL)
959 				|| !mmc_card_is_removable(mmc))) {
960 		present = 1;
961 
962 		if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
963 			if (mmc->caps & MMC_CAP_NEEDS_POLL) {
964 				dev_info(&mmc->class_dev,
965 					"card is polling.\n");
966 			} else {
967 				dev_info(&mmc->class_dev,
968 					"card is non-removable.\n");
969 			}
970 			set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
971 		}
972 
973 		return present;
974 	} else if (gpio_cd >= 0)
975 		present = gpio_cd;
976 	else
977 		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
978 			== 0 ? 1 : 0;
979 
980 	spin_lock_bh(&host->lock);
981 	if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
982 		dev_dbg(&mmc->class_dev, "card is present\n");
983 	else if (!present &&
984 			!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
985 		dev_dbg(&mmc->class_dev, "card is not present\n");
986 	spin_unlock_bh(&host->lock);
987 
988 	return present;
989 }
990 
dw_mci_adjust_fifoth(struct dw_mci * host,struct mmc_data * data)991 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
992 {
993 	unsigned int blksz = data->blksz;
994 	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
995 	u32 fifo_width = 1 << host->data_shift;
996 	u32 blksz_depth = blksz / fifo_width, fifoth_val;
997 	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
998 	int idx = ARRAY_SIZE(mszs) - 1;
999 
1000 	/* pio should ship this scenario */
1001 	if (!host->use_dma)
1002 		return;
1003 
1004 	tx_wmark = (host->fifo_depth) / 2;
1005 	tx_wmark_invers = host->fifo_depth - tx_wmark;
1006 
1007 	/*
1008 	 * MSIZE is '1',
1009 	 * if blksz is not a multiple of the FIFO width
1010 	 */
1011 	if (blksz % fifo_width)
1012 		goto done;
1013 
1014 	do {
1015 		if (!((blksz_depth % mszs[idx]) ||
1016 		     (tx_wmark_invers % mszs[idx]))) {
1017 			msize = idx;
1018 			rx_wmark = mszs[idx] - 1;
1019 			break;
1020 		}
1021 	} while (--idx > 0);
1022 	/*
1023 	 * If idx is '0', it won't be tried
1024 	 * Thus, initial values are uesed
1025 	 */
1026 done:
1027 	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1028 	mci_writel(host, FIFOTH, fifoth_val);
1029 }
1030 
dw_mci_ctrl_thld(struct dw_mci * host,struct mmc_data * data)1031 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1032 {
1033 	unsigned int blksz = data->blksz;
1034 	u32 blksz_depth, fifo_depth;
1035 	u16 thld_size;
1036 	u8 enable;
1037 
1038 	/*
1039 	 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1040 	 * in the FIFO region, so we really shouldn't access it).
1041 	 */
1042 	if (host->verid < DW_MMC_240A ||
1043 		(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1044 		return;
1045 
1046 	/*
1047 	 * Card write Threshold is introduced since 2.80a
1048 	 * It's used when HS400 mode is enabled.
1049 	 */
1050 	if (data->flags & MMC_DATA_WRITE &&
1051 		host->timing != MMC_TIMING_MMC_HS400)
1052 		goto disable;
1053 
1054 	if (data->flags & MMC_DATA_WRITE)
1055 		enable = SDMMC_CARD_WR_THR_EN;
1056 	else
1057 		enable = SDMMC_CARD_RD_THR_EN;
1058 
1059 	if (host->timing != MMC_TIMING_MMC_HS200 &&
1060 	    host->timing != MMC_TIMING_UHS_SDR104 &&
1061 	    host->timing != MMC_TIMING_MMC_HS400)
1062 		goto disable;
1063 
1064 	blksz_depth = blksz / (1 << host->data_shift);
1065 	fifo_depth = host->fifo_depth;
1066 
1067 	if (blksz_depth > fifo_depth)
1068 		goto disable;
1069 
1070 	/*
1071 	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1072 	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
1073 	 * Currently just choose blksz.
1074 	 */
1075 	thld_size = blksz;
1076 	mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1077 	return;
1078 
1079 disable:
1080 	mci_writel(host, CDTHRCTL, 0);
1081 }
1082 
dw_mci_submit_data_dma(struct dw_mci * host,struct mmc_data * data)1083 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1084 {
1085 	unsigned long irqflags;
1086 	int sg_len;
1087 	u32 temp;
1088 
1089 	host->using_dma = 0;
1090 
1091 	/* If we don't have a channel, we can't do DMA */
1092 	if (!host->use_dma)
1093 		return -ENODEV;
1094 
1095 	sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1096 	if (sg_len < 0) {
1097 		host->dma_ops->stop(host);
1098 		return sg_len;
1099 	}
1100 
1101 	host->using_dma = 1;
1102 
1103 	if (host->use_dma == TRANS_MODE_IDMAC)
1104 		dev_vdbg(host->dev,
1105 			 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1106 			 (unsigned long)host->sg_cpu,
1107 			 (unsigned long)host->sg_dma,
1108 			 sg_len);
1109 
1110 	/*
1111 	 * Decide the MSIZE and RX/TX Watermark.
1112 	 * If current block size is same with previous size,
1113 	 * no need to update fifoth.
1114 	 */
1115 	if (host->prev_blksz != data->blksz)
1116 		dw_mci_adjust_fifoth(host, data);
1117 
1118 	/* Enable the DMA interface */
1119 	temp = mci_readl(host, CTRL);
1120 	temp |= SDMMC_CTRL_DMA_ENABLE;
1121 	mci_writel(host, CTRL, temp);
1122 
1123 	/* Disable RX/TX IRQs, let DMA handle it */
1124 	spin_lock_irqsave(&host->irq_lock, irqflags);
1125 	temp = mci_readl(host, INTMASK);
1126 	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1127 	mci_writel(host, INTMASK, temp);
1128 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1129 
1130 	if (host->dma_ops->start(host, sg_len)) {
1131 		host->dma_ops->stop(host);
1132 		/* We can't do DMA, try PIO for this one */
1133 		dev_dbg(host->dev,
1134 			"%s: fall back to PIO mode for current transfer\n",
1135 			__func__);
1136 		return -ENODEV;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
dw_mci_submit_data(struct dw_mci * host,struct mmc_data * data)1142 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1143 {
1144 	unsigned long irqflags;
1145 	int flags = SG_MITER_ATOMIC;
1146 	u32 temp;
1147 
1148 	data->error = -EINPROGRESS;
1149 
1150 	WARN_ON(host->data);
1151 	host->sg = NULL;
1152 	host->data = data;
1153 
1154 	if (data->flags & MMC_DATA_READ)
1155 		host->dir_status = DW_MCI_RECV_STATUS;
1156 	else
1157 		host->dir_status = DW_MCI_SEND_STATUS;
1158 
1159 	dw_mci_ctrl_thld(host, data);
1160 
1161 	if (dw_mci_submit_data_dma(host, data)) {
1162 		if (host->data->flags & MMC_DATA_READ)
1163 			flags |= SG_MITER_TO_SG;
1164 		else
1165 			flags |= SG_MITER_FROM_SG;
1166 
1167 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1168 		host->sg = data->sg;
1169 		host->part_buf_start = 0;
1170 		host->part_buf_count = 0;
1171 
1172 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1173 
1174 		spin_lock_irqsave(&host->irq_lock, irqflags);
1175 		temp = mci_readl(host, INTMASK);
1176 		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1177 		mci_writel(host, INTMASK, temp);
1178 		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1179 
1180 		temp = mci_readl(host, CTRL);
1181 		temp &= ~SDMMC_CTRL_DMA_ENABLE;
1182 		mci_writel(host, CTRL, temp);
1183 
1184 		/*
1185 		 * Use the initial fifoth_val for PIO mode. If wm_algined
1186 		 * is set, we set watermark same as data size.
1187 		 * If next issued data may be transfered by DMA mode,
1188 		 * prev_blksz should be invalidated.
1189 		 */
1190 		if (host->wm_aligned)
1191 			dw_mci_adjust_fifoth(host, data);
1192 		else
1193 			mci_writel(host, FIFOTH, host->fifoth_val);
1194 		host->prev_blksz = 0;
1195 	} else {
1196 		/*
1197 		 * Keep the current block size.
1198 		 * It will be used to decide whether to update
1199 		 * fifoth register next time.
1200 		 */
1201 		host->prev_blksz = data->blksz;
1202 	}
1203 }
1204 
dw_mci_setup_bus(struct dw_mci_slot * slot,bool force_clkinit)1205 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1206 {
1207 	struct dw_mci *host = slot->host;
1208 	unsigned int clock = slot->clock;
1209 	u32 div;
1210 	u32 clk_en_a;
1211 	u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1212 
1213 	/* We must continue to set bit 28 in CMD until the change is complete */
1214 	if (host->state == STATE_WAITING_CMD11_DONE)
1215 		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1216 
1217 	slot->mmc->actual_clock = 0;
1218 
1219 	if (!clock) {
1220 		mci_writel(host, CLKENA, 0);
1221 		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1222 	} else if (clock != host->current_speed || force_clkinit) {
1223 		div = host->bus_hz / clock;
1224 		if (host->bus_hz % clock && host->bus_hz > clock)
1225 			/*
1226 			 * move the + 1 after the divide to prevent
1227 			 * over-clocking the card.
1228 			 */
1229 			div += 1;
1230 
1231 		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1232 
1233 		if ((clock != slot->__clk_old &&
1234 			!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1235 			force_clkinit) {
1236 			/* Silent the verbose log if calling from PM context */
1237 			if (!force_clkinit)
1238 				dev_info(&slot->mmc->class_dev,
1239 					 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1240 					 slot->id, host->bus_hz, clock,
1241 					 div ? ((host->bus_hz / div) >> 1) :
1242 					 host->bus_hz, div);
1243 
1244 			/*
1245 			 * If card is polling, display the message only
1246 			 * one time at boot time.
1247 			 */
1248 			if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1249 					slot->mmc->f_min == clock)
1250 				set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1251 		}
1252 
1253 		/* disable clock */
1254 		mci_writel(host, CLKENA, 0);
1255 		mci_writel(host, CLKSRC, 0);
1256 
1257 		/* inform CIU */
1258 		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1259 
1260 		/* set clock to desired speed */
1261 		mci_writel(host, CLKDIV, div);
1262 
1263 		/* inform CIU */
1264 		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1265 
1266 		/* enable clock; only low power if no SDIO */
1267 		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1268 		if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1269 			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1270 		mci_writel(host, CLKENA, clk_en_a);
1271 
1272 		/* inform CIU */
1273 		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1274 
1275 		/* keep the last clock value that was requested from core */
1276 		slot->__clk_old = clock;
1277 		slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1278 					  host->bus_hz;
1279 	}
1280 
1281 	host->current_speed = clock;
1282 
1283 	/* Set the current slot bus width */
1284 	mci_writel(host, CTYPE, (slot->ctype << slot->id));
1285 }
1286 
dw_mci_set_data_timeout(struct dw_mci * host,unsigned int timeout_ns)1287 static void dw_mci_set_data_timeout(struct dw_mci *host,
1288 				    unsigned int timeout_ns)
1289 {
1290 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1291 	u32 clk_div, tmout;
1292 	u64 tmp;
1293 
1294 	if (drv_data && drv_data->set_data_timeout)
1295 		return drv_data->set_data_timeout(host, timeout_ns);
1296 
1297 	clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
1298 	if (clk_div == 0)
1299 		clk_div = 1;
1300 
1301 	tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
1302 	tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
1303 
1304 	/* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1305 	tmout = 0xFF; /* Set maximum */
1306 
1307 	/* TMOUT[31:8] (DATA_TIMEOUT) */
1308 	if (!tmp || tmp > 0xFFFFFF)
1309 		tmout |= (0xFFFFFF << 8);
1310 	else
1311 		tmout |= (tmp & 0xFFFFFF) << 8;
1312 
1313 	mci_writel(host, TMOUT, tmout);
1314 	dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1315 		timeout_ns, tmout >> 8);
1316 }
1317 
__dw_mci_start_request(struct dw_mci * host,struct dw_mci_slot * slot,struct mmc_command * cmd)1318 static void __dw_mci_start_request(struct dw_mci *host,
1319 				   struct dw_mci_slot *slot,
1320 				   struct mmc_command *cmd)
1321 {
1322 	struct mmc_request *mrq;
1323 	struct mmc_data	*data;
1324 	u32 cmdflags;
1325 
1326 	mrq = slot->mrq;
1327 
1328 	host->mrq = mrq;
1329 
1330 	host->pending_events = 0;
1331 	host->completed_events = 0;
1332 	host->cmd_status = 0;
1333 	host->data_status = 0;
1334 	host->dir_status = 0;
1335 
1336 	data = cmd->data;
1337 	if (data) {
1338 		dw_mci_set_data_timeout(host, data->timeout_ns);
1339 		mci_writel(host, BYTCNT, data->blksz*data->blocks);
1340 		mci_writel(host, BLKSIZ, data->blksz);
1341 	}
1342 
1343 	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1344 
1345 	/* this is the first command, send the initialization clock */
1346 	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1347 		cmdflags |= SDMMC_CMD_INIT;
1348 
1349 	if (data) {
1350 		dw_mci_submit_data(host, data);
1351 		wmb(); /* drain writebuffer */
1352 	}
1353 
1354 	dw_mci_start_command(host, cmd, cmdflags);
1355 
1356 	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1357 		unsigned long irqflags;
1358 
1359 		/*
1360 		 * Databook says to fail after 2ms w/ no response, but evidence
1361 		 * shows that sometimes the cmd11 interrupt takes over 130ms.
1362 		 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1363 		 * is just about to roll over.
1364 		 *
1365 		 * We do this whole thing under spinlock and only if the
1366 		 * command hasn't already completed (indicating the the irq
1367 		 * already ran so we don't want the timeout).
1368 		 */
1369 		spin_lock_irqsave(&host->irq_lock, irqflags);
1370 		if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1371 			mod_timer(&host->cmd11_timer,
1372 				jiffies + msecs_to_jiffies(500) + 1);
1373 		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1374 	}
1375 
1376 	host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1377 }
1378 
dw_mci_start_request(struct dw_mci * host,struct dw_mci_slot * slot)1379 static void dw_mci_start_request(struct dw_mci *host,
1380 				 struct dw_mci_slot *slot)
1381 {
1382 	struct mmc_request *mrq = slot->mrq;
1383 	struct mmc_command *cmd;
1384 
1385 	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1386 	__dw_mci_start_request(host, slot, cmd);
1387 }
1388 
1389 /* must be called with host->lock held */
dw_mci_queue_request(struct dw_mci * host,struct dw_mci_slot * slot,struct mmc_request * mrq)1390 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1391 				 struct mmc_request *mrq)
1392 {
1393 	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1394 		 host->state);
1395 
1396 	slot->mrq = mrq;
1397 
1398 	if (host->state == STATE_WAITING_CMD11_DONE) {
1399 		dev_warn(&slot->mmc->class_dev,
1400 			 "Voltage change didn't complete\n");
1401 		/*
1402 		 * this case isn't expected to happen, so we can
1403 		 * either crash here or just try to continue on
1404 		 * in the closest possible state
1405 		 */
1406 		host->state = STATE_IDLE;
1407 	}
1408 
1409 	if (host->state == STATE_IDLE) {
1410 		host->state = STATE_SENDING_CMD;
1411 		dw_mci_start_request(host, slot);
1412 	} else {
1413 		list_add_tail(&slot->queue_node, &host->queue);
1414 	}
1415 }
1416 
dw_mci_request(struct mmc_host * mmc,struct mmc_request * mrq)1417 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1418 {
1419 	struct dw_mci_slot *slot = mmc_priv(mmc);
1420 	struct dw_mci *host = slot->host;
1421 
1422 	WARN_ON(slot->mrq);
1423 
1424 	/*
1425 	 * The check for card presence and queueing of the request must be
1426 	 * atomic, otherwise the card could be removed in between and the
1427 	 * request wouldn't fail until another card was inserted.
1428 	 */
1429 
1430 	if (!dw_mci_get_cd(mmc)) {
1431 		mrq->cmd->error = -ENOMEDIUM;
1432 		mmc_request_done(mmc, mrq);
1433 		return;
1434 	}
1435 
1436 	spin_lock_bh(&host->lock);
1437 
1438 	dw_mci_queue_request(host, slot, mrq);
1439 
1440 	spin_unlock_bh(&host->lock);
1441 }
1442 
dw_mci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1443 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1444 {
1445 	struct dw_mci_slot *slot = mmc_priv(mmc);
1446 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1447 	u32 regs;
1448 	int ret;
1449 
1450 	switch (ios->bus_width) {
1451 	case MMC_BUS_WIDTH_4:
1452 		slot->ctype = SDMMC_CTYPE_4BIT;
1453 		break;
1454 	case MMC_BUS_WIDTH_8:
1455 		slot->ctype = SDMMC_CTYPE_8BIT;
1456 		break;
1457 	default:
1458 		/* set default 1 bit mode */
1459 		slot->ctype = SDMMC_CTYPE_1BIT;
1460 	}
1461 
1462 	regs = mci_readl(slot->host, UHS_REG);
1463 
1464 	/* DDR mode set */
1465 	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1466 	    ios->timing == MMC_TIMING_UHS_DDR50 ||
1467 	    ios->timing == MMC_TIMING_MMC_HS400)
1468 		regs |= ((0x1 << slot->id) << 16);
1469 	else
1470 		regs &= ~((0x1 << slot->id) << 16);
1471 
1472 	mci_writel(slot->host, UHS_REG, regs);
1473 	slot->host->timing = ios->timing;
1474 
1475 	/*
1476 	 * Use mirror of ios->clock to prevent race with mmc
1477 	 * core ios update when finding the minimum.
1478 	 */
1479 	slot->clock = ios->clock;
1480 
1481 	if (drv_data && drv_data->set_ios)
1482 		drv_data->set_ios(slot->host, ios);
1483 
1484 	switch (ios->power_mode) {
1485 	case MMC_POWER_UP:
1486 		if (!IS_ERR(mmc->supply.vmmc)) {
1487 			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1488 					ios->vdd);
1489 			if (ret) {
1490 				dev_err(slot->host->dev,
1491 					"failed to enable vmmc regulator\n");
1492 				/*return, if failed turn on vmmc*/
1493 				return;
1494 			}
1495 		}
1496 		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1497 		regs = mci_readl(slot->host, PWREN);
1498 		regs |= (1 << slot->id);
1499 		mci_writel(slot->host, PWREN, regs);
1500 		break;
1501 	case MMC_POWER_ON:
1502 		if (!slot->host->vqmmc_enabled) {
1503 			if (!IS_ERR(mmc->supply.vqmmc)) {
1504 				ret = regulator_enable(mmc->supply.vqmmc);
1505 				if (ret < 0)
1506 					dev_err(slot->host->dev,
1507 						"failed to enable vqmmc\n");
1508 				else
1509 					slot->host->vqmmc_enabled = true;
1510 
1511 			} else {
1512 				/* Keep track so we don't reset again */
1513 				slot->host->vqmmc_enabled = true;
1514 			}
1515 
1516 			/* Reset our state machine after powering on */
1517 			dw_mci_ctrl_reset(slot->host,
1518 					  SDMMC_CTRL_ALL_RESET_FLAGS);
1519 		}
1520 
1521 		/* Adjust clock / bus width after power is up */
1522 		dw_mci_setup_bus(slot, false);
1523 
1524 		break;
1525 	case MMC_POWER_OFF:
1526 		/* Turn clock off before power goes down */
1527 		dw_mci_setup_bus(slot, false);
1528 
1529 		if (!IS_ERR(mmc->supply.vmmc))
1530 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1531 
1532 		if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1533 			regulator_disable(mmc->supply.vqmmc);
1534 		slot->host->vqmmc_enabled = false;
1535 
1536 		regs = mci_readl(slot->host, PWREN);
1537 		regs &= ~(1 << slot->id);
1538 		mci_writel(slot->host, PWREN, regs);
1539 		break;
1540 	default:
1541 		break;
1542 	}
1543 
1544 	if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1545 		slot->host->state = STATE_IDLE;
1546 }
1547 
dw_mci_card_busy(struct mmc_host * mmc)1548 static int dw_mci_card_busy(struct mmc_host *mmc)
1549 {
1550 	struct dw_mci_slot *slot = mmc_priv(mmc);
1551 	u32 status;
1552 
1553 	/*
1554 	 * Check the busy bit which is low when DAT[3:0]
1555 	 * (the data lines) are 0000
1556 	 */
1557 	status = mci_readl(slot->host, STATUS);
1558 
1559 	return !!(status & SDMMC_STATUS_BUSY);
1560 }
1561 
dw_mci_switch_voltage(struct mmc_host * mmc,struct mmc_ios * ios)1562 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1563 {
1564 	struct dw_mci_slot *slot = mmc_priv(mmc);
1565 	struct dw_mci *host = slot->host;
1566 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1567 	u32 uhs;
1568 	u32 v18 = SDMMC_UHS_18V << slot->id;
1569 	int ret;
1570 
1571 	if (drv_data && drv_data->switch_voltage)
1572 		return drv_data->switch_voltage(mmc, ios);
1573 
1574 	/*
1575 	 * Program the voltage.  Note that some instances of dw_mmc may use
1576 	 * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1577 	 * does no harm but you need to set the regulator directly.  Try both.
1578 	 */
1579 	uhs = mci_readl(host, UHS_REG);
1580 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1581 		uhs &= ~v18;
1582 	else
1583 		uhs |= v18;
1584 
1585 	if (!IS_ERR(mmc->supply.vqmmc)) {
1586 		ret = mmc_regulator_set_vqmmc(mmc, ios);
1587 		if (ret < 0) {
1588 			dev_dbg(&mmc->class_dev,
1589 					 "Regulator set error %d - %s V\n",
1590 					 ret, uhs & v18 ? "1.8" : "3.3");
1591 			return ret;
1592 		}
1593 	}
1594 	mci_writel(host, UHS_REG, uhs);
1595 
1596 	return 0;
1597 }
1598 
dw_mci_get_ro(struct mmc_host * mmc)1599 static int dw_mci_get_ro(struct mmc_host *mmc)
1600 {
1601 	int read_only;
1602 	struct dw_mci_slot *slot = mmc_priv(mmc);
1603 	int gpio_ro = mmc_gpio_get_ro(mmc);
1604 
1605 	/* Use platform get_ro function, else try on board write protect */
1606 	if (gpio_ro >= 0)
1607 		read_only = gpio_ro;
1608 	else
1609 		read_only =
1610 			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1611 
1612 	dev_dbg(&mmc->class_dev, "card is %s\n",
1613 		read_only ? "read-only" : "read-write");
1614 
1615 	return read_only;
1616 }
1617 
dw_mci_hw_reset(struct mmc_host * mmc)1618 static void dw_mci_hw_reset(struct mmc_host *mmc)
1619 {
1620 	struct dw_mci_slot *slot = mmc_priv(mmc);
1621 	struct dw_mci *host = slot->host;
1622 	int reset;
1623 
1624 	if (host->use_dma == TRANS_MODE_IDMAC)
1625 		dw_mci_idmac_reset(host);
1626 
1627 	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1628 				     SDMMC_CTRL_FIFO_RESET))
1629 		return;
1630 
1631 	/*
1632 	 * According to eMMC spec, card reset procedure:
1633 	 * tRstW >= 1us:   RST_n pulse width
1634 	 * tRSCA >= 200us: RST_n to Command time
1635 	 * tRSTH >= 1us:   RST_n high period
1636 	 */
1637 	reset = mci_readl(host, RST_N);
1638 	reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1639 	mci_writel(host, RST_N, reset);
1640 	usleep_range(1, 2);
1641 	reset |= SDMMC_RST_HWACTIVE << slot->id;
1642 	mci_writel(host, RST_N, reset);
1643 	usleep_range(200, 300);
1644 }
1645 
dw_mci_prepare_sdio_irq(struct dw_mci_slot * slot,bool prepare)1646 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
1647 {
1648 	struct dw_mci *host = slot->host;
1649 	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1650 	u32 clk_en_a_old;
1651 	u32 clk_en_a;
1652 
1653 	/*
1654 	 * Low power mode will stop the card clock when idle.  According to the
1655 	 * description of the CLKENA register we should disable low power mode
1656 	 * for SDIO cards if we need SDIO interrupts to work.
1657 	 */
1658 
1659 	clk_en_a_old = mci_readl(host, CLKENA);
1660 	if (prepare) {
1661 		set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1662 		clk_en_a = clk_en_a_old & ~clken_low_pwr;
1663 	} else {
1664 		clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1665 		clk_en_a = clk_en_a_old | clken_low_pwr;
1666 	}
1667 
1668 	if (clk_en_a != clk_en_a_old) {
1669 		mci_writel(host, CLKENA, clk_en_a);
1670 		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
1671 			     0);
1672 	}
1673 }
1674 
__dw_mci_enable_sdio_irq(struct dw_mci_slot * slot,int enb)1675 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1676 {
1677 	struct dw_mci *host = slot->host;
1678 	unsigned long irqflags;
1679 	u32 int_mask;
1680 
1681 	spin_lock_irqsave(&host->irq_lock, irqflags);
1682 
1683 	/* Enable/disable Slot Specific SDIO interrupt */
1684 	int_mask = mci_readl(host, INTMASK);
1685 	if (enb)
1686 		int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1687 	else
1688 		int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1689 	mci_writel(host, INTMASK, int_mask);
1690 
1691 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1692 }
1693 
dw_mci_enable_sdio_irq(struct mmc_host * mmc,int enb)1694 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1695 {
1696 	struct dw_mci_slot *slot = mmc_priv(mmc);
1697 	struct dw_mci *host = slot->host;
1698 
1699 	dw_mci_prepare_sdio_irq(slot, enb);
1700 	__dw_mci_enable_sdio_irq(slot, enb);
1701 
1702 	/* Avoid runtime suspending the device when SDIO IRQ is enabled */
1703 	if (enb)
1704 		pm_runtime_get_noresume(host->dev);
1705 	else
1706 		pm_runtime_put_noidle(host->dev);
1707 }
1708 
dw_mci_ack_sdio_irq(struct mmc_host * mmc)1709 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1710 {
1711 	struct dw_mci_slot *slot = mmc_priv(mmc);
1712 
1713 	__dw_mci_enable_sdio_irq(slot, 1);
1714 }
1715 
dw_mci_execute_tuning(struct mmc_host * mmc,u32 opcode)1716 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1717 {
1718 	struct dw_mci_slot *slot = mmc_priv(mmc);
1719 	struct dw_mci *host = slot->host;
1720 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1721 	int err = -EINVAL;
1722 
1723 	if (drv_data && drv_data->execute_tuning)
1724 		err = drv_data->execute_tuning(slot, opcode);
1725 	return err;
1726 }
1727 
dw_mci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)1728 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1729 				       struct mmc_ios *ios)
1730 {
1731 	struct dw_mci_slot *slot = mmc_priv(mmc);
1732 	struct dw_mci *host = slot->host;
1733 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1734 
1735 	if (drv_data && drv_data->prepare_hs400_tuning)
1736 		return drv_data->prepare_hs400_tuning(host, ios);
1737 
1738 	return 0;
1739 }
1740 
dw_mci_reset(struct dw_mci * host)1741 static bool dw_mci_reset(struct dw_mci *host)
1742 {
1743 	u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1744 	bool ret = false;
1745 	u32 status = 0;
1746 
1747 	/*
1748 	 * Resetting generates a block interrupt, hence setting
1749 	 * the scatter-gather pointer to NULL.
1750 	 */
1751 	if (host->sg) {
1752 		sg_miter_stop(&host->sg_miter);
1753 		host->sg = NULL;
1754 	}
1755 
1756 	if (host->use_dma)
1757 		flags |= SDMMC_CTRL_DMA_RESET;
1758 
1759 	if (dw_mci_ctrl_reset(host, flags)) {
1760 		/*
1761 		 * In all cases we clear the RAWINTS
1762 		 * register to clear any interrupts.
1763 		 */
1764 		mci_writel(host, RINTSTS, 0xFFFFFFFF);
1765 
1766 		if (!host->use_dma) {
1767 			ret = true;
1768 			goto ciu_out;
1769 		}
1770 
1771 		/* Wait for dma_req to be cleared */
1772 		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1773 					      status,
1774 					      !(status & SDMMC_STATUS_DMA_REQ),
1775 					      1, 500 * USEC_PER_MSEC)) {
1776 			dev_err(host->dev,
1777 				"%s: Timeout waiting for dma_req to be cleared\n",
1778 				__func__);
1779 			goto ciu_out;
1780 		}
1781 
1782 		/* when using DMA next we reset the fifo again */
1783 		if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1784 			goto ciu_out;
1785 	} else {
1786 		/* if the controller reset bit did clear, then set clock regs */
1787 		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1788 			dev_err(host->dev,
1789 				"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1790 				__func__);
1791 			goto ciu_out;
1792 		}
1793 	}
1794 
1795 	if (host->use_dma == TRANS_MODE_IDMAC)
1796 		/* It is also required that we reinit idmac */
1797 		dw_mci_idmac_init(host);
1798 
1799 	ret = true;
1800 
1801 ciu_out:
1802 	/* After a CTRL reset we need to have CIU set clock registers  */
1803 	mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1804 
1805 	return ret;
1806 }
1807 
1808 static const struct mmc_host_ops dw_mci_ops = {
1809 	.request		= dw_mci_request,
1810 	.pre_req		= dw_mci_pre_req,
1811 	.post_req		= dw_mci_post_req,
1812 	.set_ios		= dw_mci_set_ios,
1813 	.get_ro			= dw_mci_get_ro,
1814 	.get_cd			= dw_mci_get_cd,
1815 	.card_hw_reset          = dw_mci_hw_reset,
1816 	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1817 	.ack_sdio_irq		= dw_mci_ack_sdio_irq,
1818 	.execute_tuning		= dw_mci_execute_tuning,
1819 	.card_busy		= dw_mci_card_busy,
1820 	.start_signal_voltage_switch = dw_mci_switch_voltage,
1821 	.prepare_hs400_tuning	= dw_mci_prepare_hs400_tuning,
1822 };
1823 
1824 #ifdef CONFIG_FAULT_INJECTION
dw_mci_fault_timer(struct hrtimer * t)1825 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1826 {
1827 	struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1828 	unsigned long flags;
1829 
1830 	spin_lock_irqsave(&host->irq_lock, flags);
1831 
1832 	/*
1833 	 * Only inject an error if we haven't already got an error or data over
1834 	 * interrupt.
1835 	 */
1836 	if (!host->data_status) {
1837 		host->data_status = SDMMC_INT_DCRC;
1838 		set_bit(EVENT_DATA_ERROR, &host->pending_events);
1839 		tasklet_schedule(&host->tasklet);
1840 	}
1841 
1842 	spin_unlock_irqrestore(&host->irq_lock, flags);
1843 
1844 	return HRTIMER_NORESTART;
1845 }
1846 
dw_mci_start_fault_timer(struct dw_mci * host)1847 static void dw_mci_start_fault_timer(struct dw_mci *host)
1848 {
1849 	struct mmc_data *data = host->data;
1850 
1851 	if (!data || data->blocks <= 1)
1852 		return;
1853 
1854 	if (!should_fail(&host->fail_data_crc, 1))
1855 		return;
1856 
1857 	/*
1858 	 * Try to inject the error at random points during the data transfer.
1859 	 */
1860 	hrtimer_start(&host->fault_timer,
1861 		      ms_to_ktime(prandom_u32() % 25),
1862 		      HRTIMER_MODE_REL);
1863 }
1864 
dw_mci_stop_fault_timer(struct dw_mci * host)1865 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1866 {
1867 	hrtimer_cancel(&host->fault_timer);
1868 }
1869 
dw_mci_init_fault(struct dw_mci * host)1870 static void dw_mci_init_fault(struct dw_mci *host)
1871 {
1872 	host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1873 
1874 	hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1875 	host->fault_timer.function = dw_mci_fault_timer;
1876 }
1877 #else
dw_mci_init_fault(struct dw_mci * host)1878 static void dw_mci_init_fault(struct dw_mci *host)
1879 {
1880 }
1881 
dw_mci_start_fault_timer(struct dw_mci * host)1882 static void dw_mci_start_fault_timer(struct dw_mci *host)
1883 {
1884 }
1885 
dw_mci_stop_fault_timer(struct dw_mci * host)1886 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1887 {
1888 }
1889 #endif
1890 
dw_mci_request_end(struct dw_mci * host,struct mmc_request * mrq)1891 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1892 	__releases(&host->lock)
1893 	__acquires(&host->lock)
1894 {
1895 	struct dw_mci_slot *slot;
1896 	struct mmc_host	*prev_mmc = host->slot->mmc;
1897 
1898 	WARN_ON(host->cmd || host->data);
1899 
1900 	host->slot->mrq = NULL;
1901 	host->mrq = NULL;
1902 	if (!list_empty(&host->queue)) {
1903 		slot = list_entry(host->queue.next,
1904 				  struct dw_mci_slot, queue_node);
1905 		list_del(&slot->queue_node);
1906 		dev_vdbg(host->dev, "list not empty: %s is next\n",
1907 			 mmc_hostname(slot->mmc));
1908 		host->state = STATE_SENDING_CMD;
1909 		dw_mci_start_request(host, slot);
1910 	} else {
1911 		dev_vdbg(host->dev, "list empty\n");
1912 
1913 		if (host->state == STATE_SENDING_CMD11)
1914 			host->state = STATE_WAITING_CMD11_DONE;
1915 		else
1916 			host->state = STATE_IDLE;
1917 	}
1918 
1919 	spin_unlock(&host->lock);
1920 	mmc_request_done(prev_mmc, mrq);
1921 	spin_lock(&host->lock);
1922 }
1923 
dw_mci_command_complete(struct dw_mci * host,struct mmc_command * cmd)1924 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1925 {
1926 	u32 status = host->cmd_status;
1927 
1928 	host->cmd_status = 0;
1929 
1930 	/* Read the response from the card (up to 16 bytes) */
1931 	if (cmd->flags & MMC_RSP_PRESENT) {
1932 		if (cmd->flags & MMC_RSP_136) {
1933 			cmd->resp[3] = mci_readl(host, RESP0);
1934 			cmd->resp[2] = mci_readl(host, RESP1);
1935 			cmd->resp[1] = mci_readl(host, RESP2);
1936 			cmd->resp[0] = mci_readl(host, RESP3);
1937 		} else {
1938 			cmd->resp[0] = mci_readl(host, RESP0);
1939 			cmd->resp[1] = 0;
1940 			cmd->resp[2] = 0;
1941 			cmd->resp[3] = 0;
1942 		}
1943 	}
1944 
1945 	if (status & SDMMC_INT_RTO)
1946 		cmd->error = -ETIMEDOUT;
1947 	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1948 		cmd->error = -EILSEQ;
1949 	else if (status & SDMMC_INT_RESP_ERR)
1950 		cmd->error = -EIO;
1951 	else
1952 		cmd->error = 0;
1953 
1954 	return cmd->error;
1955 }
1956 
dw_mci_data_complete(struct dw_mci * host,struct mmc_data * data)1957 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1958 {
1959 	u32 status = host->data_status;
1960 
1961 	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1962 		if (status & SDMMC_INT_DRTO) {
1963 			data->error = -ETIMEDOUT;
1964 		} else if (status & SDMMC_INT_DCRC) {
1965 			data->error = -EILSEQ;
1966 		} else if (status & SDMMC_INT_EBE) {
1967 			if (host->dir_status ==
1968 				DW_MCI_SEND_STATUS) {
1969 				/*
1970 				 * No data CRC status was returned.
1971 				 * The number of bytes transferred
1972 				 * will be exaggerated in PIO mode.
1973 				 */
1974 				data->bytes_xfered = 0;
1975 				data->error = -ETIMEDOUT;
1976 			} else if (host->dir_status ==
1977 					DW_MCI_RECV_STATUS) {
1978 				data->error = -EILSEQ;
1979 			}
1980 		} else {
1981 			/* SDMMC_INT_SBE is included */
1982 			data->error = -EILSEQ;
1983 		}
1984 
1985 		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1986 
1987 		/*
1988 		 * After an error, there may be data lingering
1989 		 * in the FIFO
1990 		 */
1991 		dw_mci_reset(host);
1992 	} else {
1993 		data->bytes_xfered = data->blocks * data->blksz;
1994 		data->error = 0;
1995 	}
1996 
1997 	return data->error;
1998 }
1999 
dw_mci_set_drto(struct dw_mci * host)2000 static void dw_mci_set_drto(struct dw_mci *host)
2001 {
2002 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2003 	unsigned int drto_clks;
2004 	unsigned int drto_div;
2005 	unsigned int drto_ms;
2006 	unsigned long irqflags;
2007 
2008 	if (drv_data && drv_data->get_drto_clks)
2009 		drto_clks = drv_data->get_drto_clks(host);
2010 	else
2011 		drto_clks = mci_readl(host, TMOUT) >> 8;
2012 	drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2013 	if (drto_div == 0)
2014 		drto_div = 1;
2015 
2016 	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
2017 				   host->bus_hz);
2018 
2019 	dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
2020 
2021 	/* add a bit spare time */
2022 	drto_ms += 10;
2023 
2024 	spin_lock_irqsave(&host->irq_lock, irqflags);
2025 	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2026 		mod_timer(&host->dto_timer,
2027 			  jiffies + msecs_to_jiffies(drto_ms));
2028 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
2029 }
2030 
dw_mci_clear_pending_cmd_complete(struct dw_mci * host)2031 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2032 {
2033 	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2034 		return false;
2035 
2036 	/*
2037 	 * Really be certain that the timer has stopped.  This is a bit of
2038 	 * paranoia and could only really happen if we had really bad
2039 	 * interrupt latency and the interrupt routine and timeout were
2040 	 * running concurrently so that the del_timer() in the interrupt
2041 	 * handler couldn't run.
2042 	 */
2043 	WARN_ON(del_timer_sync(&host->cto_timer));
2044 	clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2045 
2046 	return true;
2047 }
2048 
dw_mci_clear_pending_data_complete(struct dw_mci * host)2049 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2050 {
2051 	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2052 		return false;
2053 
2054 	/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2055 	WARN_ON(del_timer_sync(&host->dto_timer));
2056 	clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2057 
2058 	return true;
2059 }
2060 
dw_mci_tasklet_func(struct tasklet_struct * t)2061 static void dw_mci_tasklet_func(struct tasklet_struct *t)
2062 {
2063 	struct dw_mci *host = from_tasklet(host, t, tasklet);
2064 	struct mmc_data	*data;
2065 	struct mmc_command *cmd;
2066 	struct mmc_request *mrq;
2067 	enum dw_mci_state state;
2068 	enum dw_mci_state prev_state;
2069 	unsigned int err;
2070 
2071 	spin_lock(&host->lock);
2072 
2073 	state = host->state;
2074 	data = host->data;
2075 	mrq = host->mrq;
2076 
2077 	do {
2078 		prev_state = state;
2079 
2080 		switch (state) {
2081 		case STATE_IDLE:
2082 		case STATE_WAITING_CMD11_DONE:
2083 			break;
2084 
2085 		case STATE_SENDING_CMD11:
2086 		case STATE_SENDING_CMD:
2087 			if (!dw_mci_clear_pending_cmd_complete(host))
2088 				break;
2089 
2090 			cmd = host->cmd;
2091 			host->cmd = NULL;
2092 			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2093 			err = dw_mci_command_complete(host, cmd);
2094 			if (cmd == mrq->sbc && !err) {
2095 				__dw_mci_start_request(host, host->slot,
2096 						       mrq->cmd);
2097 				goto unlock;
2098 			}
2099 
2100 			if (cmd->data && err) {
2101 				/*
2102 				 * During UHS tuning sequence, sending the stop
2103 				 * command after the response CRC error would
2104 				 * throw the system into a confused state
2105 				 * causing all future tuning phases to report
2106 				 * failure.
2107 				 *
2108 				 * In such case controller will move into a data
2109 				 * transfer state after a response error or
2110 				 * response CRC error. Let's let that finish
2111 				 * before trying to send a stop, so we'll go to
2112 				 * STATE_SENDING_DATA.
2113 				 *
2114 				 * Although letting the data transfer take place
2115 				 * will waste a bit of time (we already know
2116 				 * the command was bad), it can't cause any
2117 				 * errors since it's possible it would have
2118 				 * taken place anyway if this tasklet got
2119 				 * delayed. Allowing the transfer to take place
2120 				 * avoids races and keeps things simple.
2121 				 */
2122 				if (err != -ETIMEDOUT &&
2123 				    host->dir_status == DW_MCI_RECV_STATUS) {
2124 					state = STATE_SENDING_DATA;
2125 					continue;
2126 				}
2127 
2128 				send_stop_abort(host, data);
2129 				dw_mci_stop_dma(host);
2130 				state = STATE_SENDING_STOP;
2131 				break;
2132 			}
2133 
2134 			if (!cmd->data || err) {
2135 				dw_mci_request_end(host, mrq);
2136 				goto unlock;
2137 			}
2138 
2139 			prev_state = state = STATE_SENDING_DATA;
2140 			fallthrough;
2141 
2142 		case STATE_SENDING_DATA:
2143 			/*
2144 			 * We could get a data error and never a transfer
2145 			 * complete so we'd better check for it here.
2146 			 *
2147 			 * Note that we don't really care if we also got a
2148 			 * transfer complete; stopping the DMA and sending an
2149 			 * abort won't hurt.
2150 			 */
2151 			if (test_and_clear_bit(EVENT_DATA_ERROR,
2152 					       &host->pending_events)) {
2153 				if (!(host->data_status & (SDMMC_INT_DRTO |
2154 							   SDMMC_INT_EBE)))
2155 					send_stop_abort(host, data);
2156 				dw_mci_stop_dma(host);
2157 				state = STATE_DATA_ERROR;
2158 				break;
2159 			}
2160 
2161 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2162 						&host->pending_events)) {
2163 				/*
2164 				 * If all data-related interrupts don't come
2165 				 * within the given time in reading data state.
2166 				 */
2167 				if (host->dir_status == DW_MCI_RECV_STATUS)
2168 					dw_mci_set_drto(host);
2169 				break;
2170 			}
2171 
2172 			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2173 
2174 			/*
2175 			 * Handle an EVENT_DATA_ERROR that might have shown up
2176 			 * before the transfer completed.  This might not have
2177 			 * been caught by the check above because the interrupt
2178 			 * could have gone off between the previous check and
2179 			 * the check for transfer complete.
2180 			 *
2181 			 * Technically this ought not be needed assuming we
2182 			 * get a DATA_COMPLETE eventually (we'll notice the
2183 			 * error and end the request), but it shouldn't hurt.
2184 			 *
2185 			 * This has the advantage of sending the stop command.
2186 			 */
2187 			if (test_and_clear_bit(EVENT_DATA_ERROR,
2188 					       &host->pending_events)) {
2189 				if (!(host->data_status & (SDMMC_INT_DRTO |
2190 							   SDMMC_INT_EBE)))
2191 					send_stop_abort(host, data);
2192 				dw_mci_stop_dma(host);
2193 				state = STATE_DATA_ERROR;
2194 				break;
2195 			}
2196 			prev_state = state = STATE_DATA_BUSY;
2197 
2198 			fallthrough;
2199 
2200 		case STATE_DATA_BUSY:
2201 			if (!dw_mci_clear_pending_data_complete(host)) {
2202 				/*
2203 				 * If data error interrupt comes but data over
2204 				 * interrupt doesn't come within the given time.
2205 				 * in reading data state.
2206 				 */
2207 				if (host->dir_status == DW_MCI_RECV_STATUS)
2208 					dw_mci_set_drto(host);
2209 				break;
2210 			}
2211 
2212 			dw_mci_stop_fault_timer(host);
2213 			host->data = NULL;
2214 			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2215 			err = dw_mci_data_complete(host, data);
2216 
2217 			if (!err) {
2218 				if (!data->stop || mrq->sbc) {
2219 					if (mrq->sbc && data->stop)
2220 						data->stop->error = 0;
2221 					dw_mci_request_end(host, mrq);
2222 					goto unlock;
2223 				}
2224 
2225 				/* stop command for open-ended transfer*/
2226 				if (data->stop)
2227 					send_stop_abort(host, data);
2228 			} else {
2229 				/*
2230 				 * If we don't have a command complete now we'll
2231 				 * never get one since we just reset everything;
2232 				 * better end the request.
2233 				 *
2234 				 * If we do have a command complete we'll fall
2235 				 * through to the SENDING_STOP command and
2236 				 * everything will be peachy keen.
2237 				 */
2238 				if (!test_bit(EVENT_CMD_COMPLETE,
2239 					      &host->pending_events)) {
2240 					host->cmd = NULL;
2241 					dw_mci_request_end(host, mrq);
2242 					goto unlock;
2243 				}
2244 			}
2245 
2246 			/*
2247 			 * If err has non-zero,
2248 			 * stop-abort command has been already issued.
2249 			 */
2250 			prev_state = state = STATE_SENDING_STOP;
2251 
2252 			fallthrough;
2253 
2254 		case STATE_SENDING_STOP:
2255 			if (!dw_mci_clear_pending_cmd_complete(host))
2256 				break;
2257 
2258 			/* CMD error in data command */
2259 			if (mrq->cmd->error && mrq->data)
2260 				dw_mci_reset(host);
2261 
2262 			dw_mci_stop_fault_timer(host);
2263 			host->cmd = NULL;
2264 			host->data = NULL;
2265 
2266 			if (!mrq->sbc && mrq->stop)
2267 				dw_mci_command_complete(host, mrq->stop);
2268 			else
2269 				host->cmd_status = 0;
2270 
2271 			dw_mci_request_end(host, mrq);
2272 			goto unlock;
2273 
2274 		case STATE_DATA_ERROR:
2275 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2276 						&host->pending_events))
2277 				break;
2278 
2279 			state = STATE_DATA_BUSY;
2280 			break;
2281 		}
2282 	} while (state != prev_state);
2283 
2284 	host->state = state;
2285 unlock:
2286 	spin_unlock(&host->lock);
2287 
2288 }
2289 
2290 /* push final bytes to part_buf, only use during push */
dw_mci_set_part_bytes(struct dw_mci * host,void * buf,int cnt)2291 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2292 {
2293 	memcpy((void *)&host->part_buf, buf, cnt);
2294 	host->part_buf_count = cnt;
2295 }
2296 
2297 /* append bytes to part_buf, only use during push */
dw_mci_push_part_bytes(struct dw_mci * host,void * buf,int cnt)2298 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2299 {
2300 	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2301 	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2302 	host->part_buf_count += cnt;
2303 	return cnt;
2304 }
2305 
2306 /* pull first bytes from part_buf, only use during pull */
dw_mci_pull_part_bytes(struct dw_mci * host,void * buf,int cnt)2307 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2308 {
2309 	cnt = min_t(int, cnt, host->part_buf_count);
2310 	if (cnt) {
2311 		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2312 		       cnt);
2313 		host->part_buf_count -= cnt;
2314 		host->part_buf_start += cnt;
2315 	}
2316 	return cnt;
2317 }
2318 
2319 /* pull final bytes from the part_buf, assuming it's just been filled */
dw_mci_pull_final_bytes(struct dw_mci * host,void * buf,int cnt)2320 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2321 {
2322 	memcpy(buf, &host->part_buf, cnt);
2323 	host->part_buf_start = cnt;
2324 	host->part_buf_count = (1 << host->data_shift) - cnt;
2325 }
2326 
dw_mci_push_data16(struct dw_mci * host,void * buf,int cnt)2327 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2328 {
2329 	struct mmc_data *data = host->data;
2330 	int init_cnt = cnt;
2331 
2332 	/* try and push anything in the part_buf */
2333 	if (unlikely(host->part_buf_count)) {
2334 		int len = dw_mci_push_part_bytes(host, buf, cnt);
2335 
2336 		buf += len;
2337 		cnt -= len;
2338 		if (host->part_buf_count == 2) {
2339 			mci_fifo_writew(host->fifo_reg, host->part_buf16);
2340 			host->part_buf_count = 0;
2341 		}
2342 	}
2343 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2344 	if (unlikely((unsigned long)buf & 0x1)) {
2345 		while (cnt >= 2) {
2346 			u16 aligned_buf[64];
2347 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2348 			int items = len >> 1;
2349 			int i;
2350 			/* memcpy from input buffer into aligned buffer */
2351 			memcpy(aligned_buf, buf, len);
2352 			buf += len;
2353 			cnt -= len;
2354 			/* push data from aligned buffer into fifo */
2355 			for (i = 0; i < items; ++i)
2356 				mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2357 		}
2358 	} else
2359 #endif
2360 	{
2361 		u16 *pdata = buf;
2362 
2363 		for (; cnt >= 2; cnt -= 2)
2364 			mci_fifo_writew(host->fifo_reg, *pdata++);
2365 		buf = pdata;
2366 	}
2367 	/* put anything remaining in the part_buf */
2368 	if (cnt) {
2369 		dw_mci_set_part_bytes(host, buf, cnt);
2370 		 /* Push data if we have reached the expected data length */
2371 		if ((data->bytes_xfered + init_cnt) ==
2372 		    (data->blksz * data->blocks))
2373 			mci_fifo_writew(host->fifo_reg, host->part_buf16);
2374 	}
2375 }
2376 
dw_mci_pull_data16(struct dw_mci * host,void * buf,int cnt)2377 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2378 {
2379 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2380 	if (unlikely((unsigned long)buf & 0x1)) {
2381 		while (cnt >= 2) {
2382 			/* pull data from fifo into aligned buffer */
2383 			u16 aligned_buf[64];
2384 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2385 			int items = len >> 1;
2386 			int i;
2387 
2388 			for (i = 0; i < items; ++i)
2389 				aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2390 			/* memcpy from aligned buffer into output buffer */
2391 			memcpy(buf, aligned_buf, len);
2392 			buf += len;
2393 			cnt -= len;
2394 		}
2395 	} else
2396 #endif
2397 	{
2398 		u16 *pdata = buf;
2399 
2400 		for (; cnt >= 2; cnt -= 2)
2401 			*pdata++ = mci_fifo_readw(host->fifo_reg);
2402 		buf = pdata;
2403 	}
2404 	if (cnt) {
2405 		host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2406 		dw_mci_pull_final_bytes(host, buf, cnt);
2407 	}
2408 }
2409 
dw_mci_push_data32(struct dw_mci * host,void * buf,int cnt)2410 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2411 {
2412 	struct mmc_data *data = host->data;
2413 	int init_cnt = cnt;
2414 
2415 	/* try and push anything in the part_buf */
2416 	if (unlikely(host->part_buf_count)) {
2417 		int len = dw_mci_push_part_bytes(host, buf, cnt);
2418 
2419 		buf += len;
2420 		cnt -= len;
2421 		if (host->part_buf_count == 4) {
2422 			mci_fifo_writel(host->fifo_reg,	host->part_buf32);
2423 			host->part_buf_count = 0;
2424 		}
2425 	}
2426 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2427 	if (unlikely((unsigned long)buf & 0x3)) {
2428 		while (cnt >= 4) {
2429 			u32 aligned_buf[32];
2430 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2431 			int items = len >> 2;
2432 			int i;
2433 			/* memcpy from input buffer into aligned buffer */
2434 			memcpy(aligned_buf, buf, len);
2435 			buf += len;
2436 			cnt -= len;
2437 			/* push data from aligned buffer into fifo */
2438 			for (i = 0; i < items; ++i)
2439 				mci_fifo_writel(host->fifo_reg,	aligned_buf[i]);
2440 		}
2441 	} else
2442 #endif
2443 	{
2444 		u32 *pdata = buf;
2445 
2446 		for (; cnt >= 4; cnt -= 4)
2447 			mci_fifo_writel(host->fifo_reg, *pdata++);
2448 		buf = pdata;
2449 	}
2450 	/* put anything remaining in the part_buf */
2451 	if (cnt) {
2452 		dw_mci_set_part_bytes(host, buf, cnt);
2453 		 /* Push data if we have reached the expected data length */
2454 		if ((data->bytes_xfered + init_cnt) ==
2455 		    (data->blksz * data->blocks))
2456 			mci_fifo_writel(host->fifo_reg, host->part_buf32);
2457 	}
2458 }
2459 
dw_mci_pull_data32(struct dw_mci * host,void * buf,int cnt)2460 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2461 {
2462 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2463 	if (unlikely((unsigned long)buf & 0x3)) {
2464 		while (cnt >= 4) {
2465 			/* pull data from fifo into aligned buffer */
2466 			u32 aligned_buf[32];
2467 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2468 			int items = len >> 2;
2469 			int i;
2470 
2471 			for (i = 0; i < items; ++i)
2472 				aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2473 			/* memcpy from aligned buffer into output buffer */
2474 			memcpy(buf, aligned_buf, len);
2475 			buf += len;
2476 			cnt -= len;
2477 		}
2478 	} else
2479 #endif
2480 	{
2481 		u32 *pdata = buf;
2482 
2483 		for (; cnt >= 4; cnt -= 4)
2484 			*pdata++ = mci_fifo_readl(host->fifo_reg);
2485 		buf = pdata;
2486 	}
2487 	if (cnt) {
2488 		host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2489 		dw_mci_pull_final_bytes(host, buf, cnt);
2490 	}
2491 }
2492 
dw_mci_push_data64(struct dw_mci * host,void * buf,int cnt)2493 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2494 {
2495 	struct mmc_data *data = host->data;
2496 	int init_cnt = cnt;
2497 
2498 	/* try and push anything in the part_buf */
2499 	if (unlikely(host->part_buf_count)) {
2500 		int len = dw_mci_push_part_bytes(host, buf, cnt);
2501 
2502 		buf += len;
2503 		cnt -= len;
2504 
2505 		if (host->part_buf_count == 8) {
2506 			mci_fifo_writeq(host->fifo_reg,	host->part_buf);
2507 			host->part_buf_count = 0;
2508 		}
2509 	}
2510 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2511 	if (unlikely((unsigned long)buf & 0x7)) {
2512 		while (cnt >= 8) {
2513 			u64 aligned_buf[16];
2514 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2515 			int items = len >> 3;
2516 			int i;
2517 			/* memcpy from input buffer into aligned buffer */
2518 			memcpy(aligned_buf, buf, len);
2519 			buf += len;
2520 			cnt -= len;
2521 			/* push data from aligned buffer into fifo */
2522 			for (i = 0; i < items; ++i)
2523 				mci_fifo_writeq(host->fifo_reg,	aligned_buf[i]);
2524 		}
2525 	} else
2526 #endif
2527 	{
2528 		u64 *pdata = buf;
2529 
2530 		for (; cnt >= 8; cnt -= 8)
2531 			mci_fifo_writeq(host->fifo_reg, *pdata++);
2532 		buf = pdata;
2533 	}
2534 	/* put anything remaining in the part_buf */
2535 	if (cnt) {
2536 		dw_mci_set_part_bytes(host, buf, cnt);
2537 		/* Push data if we have reached the expected data length */
2538 		if ((data->bytes_xfered + init_cnt) ==
2539 		    (data->blksz * data->blocks))
2540 			mci_fifo_writeq(host->fifo_reg, host->part_buf);
2541 	}
2542 }
2543 
dw_mci_pull_data64(struct dw_mci * host,void * buf,int cnt)2544 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2545 {
2546 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2547 	if (unlikely((unsigned long)buf & 0x7)) {
2548 		while (cnt >= 8) {
2549 			/* pull data from fifo into aligned buffer */
2550 			u64 aligned_buf[16];
2551 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2552 			int items = len >> 3;
2553 			int i;
2554 
2555 			for (i = 0; i < items; ++i)
2556 				aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2557 
2558 			/* memcpy from aligned buffer into output buffer */
2559 			memcpy(buf, aligned_buf, len);
2560 			buf += len;
2561 			cnt -= len;
2562 		}
2563 	} else
2564 #endif
2565 	{
2566 		u64 *pdata = buf;
2567 
2568 		for (; cnt >= 8; cnt -= 8)
2569 			*pdata++ = mci_fifo_readq(host->fifo_reg);
2570 		buf = pdata;
2571 	}
2572 	if (cnt) {
2573 		host->part_buf = mci_fifo_readq(host->fifo_reg);
2574 		dw_mci_pull_final_bytes(host, buf, cnt);
2575 	}
2576 }
2577 
dw_mci_pull_data(struct dw_mci * host,void * buf,int cnt)2578 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2579 {
2580 	int len;
2581 
2582 	/* get remaining partial bytes */
2583 	len = dw_mci_pull_part_bytes(host, buf, cnt);
2584 	if (unlikely(len == cnt))
2585 		return;
2586 	buf += len;
2587 	cnt -= len;
2588 
2589 	/* get the rest of the data */
2590 	host->pull_data(host, buf, cnt);
2591 }
2592 
dw_mci_read_data_pio(struct dw_mci * host,bool dto)2593 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2594 {
2595 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2596 	void *buf;
2597 	unsigned int offset;
2598 	struct mmc_data	*data = host->data;
2599 	int shift = host->data_shift;
2600 	u32 status;
2601 	unsigned int len;
2602 	unsigned int remain, fcnt;
2603 
2604 	do {
2605 		if (!sg_miter_next(sg_miter))
2606 			goto done;
2607 
2608 		host->sg = sg_miter->piter.sg;
2609 		buf = sg_miter->addr;
2610 		remain = sg_miter->length;
2611 		offset = 0;
2612 
2613 		do {
2614 			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2615 					<< shift) + host->part_buf_count;
2616 			len = min(remain, fcnt);
2617 			if (!len)
2618 				break;
2619 			dw_mci_pull_data(host, (void *)(buf + offset), len);
2620 			data->bytes_xfered += len;
2621 			offset += len;
2622 			remain -= len;
2623 		} while (remain);
2624 
2625 		sg_miter->consumed = offset;
2626 		status = mci_readl(host, MINTSTS);
2627 		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2628 	/* if the RXDR is ready read again */
2629 	} while ((status & SDMMC_INT_RXDR) ||
2630 		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2631 
2632 	if (!remain) {
2633 		if (!sg_miter_next(sg_miter))
2634 			goto done;
2635 		sg_miter->consumed = 0;
2636 	}
2637 	sg_miter_stop(sg_miter);
2638 	return;
2639 
2640 done:
2641 	sg_miter_stop(sg_miter);
2642 	host->sg = NULL;
2643 	smp_wmb(); /* drain writebuffer */
2644 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2645 }
2646 
dw_mci_write_data_pio(struct dw_mci * host)2647 static void dw_mci_write_data_pio(struct dw_mci *host)
2648 {
2649 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2650 	void *buf;
2651 	unsigned int offset;
2652 	struct mmc_data	*data = host->data;
2653 	int shift = host->data_shift;
2654 	u32 status;
2655 	unsigned int len;
2656 	unsigned int fifo_depth = host->fifo_depth;
2657 	unsigned int remain, fcnt;
2658 
2659 	do {
2660 		if (!sg_miter_next(sg_miter))
2661 			goto done;
2662 
2663 		host->sg = sg_miter->piter.sg;
2664 		buf = sg_miter->addr;
2665 		remain = sg_miter->length;
2666 		offset = 0;
2667 
2668 		do {
2669 			fcnt = ((fifo_depth -
2670 				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2671 					<< shift) - host->part_buf_count;
2672 			len = min(remain, fcnt);
2673 			if (!len)
2674 				break;
2675 			host->push_data(host, (void *)(buf + offset), len);
2676 			data->bytes_xfered += len;
2677 			offset += len;
2678 			remain -= len;
2679 		} while (remain);
2680 
2681 		sg_miter->consumed = offset;
2682 		status = mci_readl(host, MINTSTS);
2683 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2684 	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2685 
2686 	if (!remain) {
2687 		if (!sg_miter_next(sg_miter))
2688 			goto done;
2689 		sg_miter->consumed = 0;
2690 	}
2691 	sg_miter_stop(sg_miter);
2692 	return;
2693 
2694 done:
2695 	sg_miter_stop(sg_miter);
2696 	host->sg = NULL;
2697 	smp_wmb(); /* drain writebuffer */
2698 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2699 }
2700 
dw_mci_cmd_interrupt(struct dw_mci * host,u32 status)2701 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2702 {
2703 	del_timer(&host->cto_timer);
2704 
2705 	if (!host->cmd_status)
2706 		host->cmd_status = status;
2707 
2708 	smp_wmb(); /* drain writebuffer */
2709 
2710 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2711 	tasklet_schedule(&host->tasklet);
2712 
2713 	dw_mci_start_fault_timer(host);
2714 }
2715 
dw_mci_handle_cd(struct dw_mci * host)2716 static void dw_mci_handle_cd(struct dw_mci *host)
2717 {
2718 	struct dw_mci_slot *slot = host->slot;
2719 
2720 	mmc_detect_change(slot->mmc,
2721 		msecs_to_jiffies(host->pdata->detect_delay_ms));
2722 }
2723 
dw_mci_interrupt(int irq,void * dev_id)2724 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2725 {
2726 	struct dw_mci *host = dev_id;
2727 	u32 pending;
2728 	struct dw_mci_slot *slot = host->slot;
2729 
2730 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2731 
2732 	if (pending) {
2733 		/* Check volt switch first, since it can look like an error */
2734 		if ((host->state == STATE_SENDING_CMD11) &&
2735 		    (pending & SDMMC_INT_VOLT_SWITCH)) {
2736 			mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2737 			pending &= ~SDMMC_INT_VOLT_SWITCH;
2738 
2739 			/*
2740 			 * Hold the lock; we know cmd11_timer can't be kicked
2741 			 * off after the lock is released, so safe to delete.
2742 			 */
2743 			spin_lock(&host->irq_lock);
2744 			dw_mci_cmd_interrupt(host, pending);
2745 			spin_unlock(&host->irq_lock);
2746 
2747 			del_timer(&host->cmd11_timer);
2748 		}
2749 
2750 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2751 			spin_lock(&host->irq_lock);
2752 
2753 			del_timer(&host->cto_timer);
2754 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2755 			host->cmd_status = pending;
2756 			smp_wmb(); /* drain writebuffer */
2757 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2758 
2759 			spin_unlock(&host->irq_lock);
2760 		}
2761 
2762 		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2763 			spin_lock(&host->irq_lock);
2764 
2765 			if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2766 				del_timer(&host->dto_timer);
2767 
2768 			/* if there is an error report DATA_ERROR */
2769 			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2770 			host->data_status = pending;
2771 			smp_wmb(); /* drain writebuffer */
2772 			set_bit(EVENT_DATA_ERROR, &host->pending_events);
2773 
2774 			if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2775 				/* In case of error, we cannot expect a DTO */
2776 				set_bit(EVENT_DATA_COMPLETE,
2777 					&host->pending_events);
2778 
2779 			tasklet_schedule(&host->tasklet);
2780 
2781 			spin_unlock(&host->irq_lock);
2782 		}
2783 
2784 		if (pending & SDMMC_INT_DATA_OVER) {
2785 			spin_lock(&host->irq_lock);
2786 
2787 			del_timer(&host->dto_timer);
2788 
2789 			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2790 			if (!host->data_status)
2791 				host->data_status = pending;
2792 			smp_wmb(); /* drain writebuffer */
2793 			if (host->dir_status == DW_MCI_RECV_STATUS) {
2794 				if (host->sg != NULL)
2795 					dw_mci_read_data_pio(host, true);
2796 			}
2797 			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2798 			tasklet_schedule(&host->tasklet);
2799 
2800 			spin_unlock(&host->irq_lock);
2801 		}
2802 
2803 		if (pending & SDMMC_INT_RXDR) {
2804 			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2805 			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2806 				dw_mci_read_data_pio(host, false);
2807 		}
2808 
2809 		if (pending & SDMMC_INT_TXDR) {
2810 			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2811 			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2812 				dw_mci_write_data_pio(host);
2813 		}
2814 
2815 		if (pending & SDMMC_INT_CMD_DONE) {
2816 			spin_lock(&host->irq_lock);
2817 
2818 			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2819 			dw_mci_cmd_interrupt(host, pending);
2820 
2821 			spin_unlock(&host->irq_lock);
2822 		}
2823 
2824 		if (pending & SDMMC_INT_CD) {
2825 			mci_writel(host, RINTSTS, SDMMC_INT_CD);
2826 			dw_mci_handle_cd(host);
2827 		}
2828 
2829 		if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2830 			mci_writel(host, RINTSTS,
2831 				   SDMMC_INT_SDIO(slot->sdio_id));
2832 			__dw_mci_enable_sdio_irq(slot, 0);
2833 			sdio_signal_irq(slot->mmc);
2834 		}
2835 
2836 	}
2837 
2838 	if (host->use_dma != TRANS_MODE_IDMAC)
2839 		return IRQ_HANDLED;
2840 
2841 	/* Handle IDMA interrupts */
2842 	if (host->dma_64bit_address == 1) {
2843 		pending = mci_readl(host, IDSTS64);
2844 		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2845 			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2846 							SDMMC_IDMAC_INT_RI);
2847 			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2848 			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2849 				host->dma_ops->complete((void *)host);
2850 		}
2851 	} else {
2852 		pending = mci_readl(host, IDSTS);
2853 		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2854 			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2855 							SDMMC_IDMAC_INT_RI);
2856 			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2857 			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2858 				host->dma_ops->complete((void *)host);
2859 		}
2860 	}
2861 
2862 	return IRQ_HANDLED;
2863 }
2864 
dw_mci_init_slot_caps(struct dw_mci_slot * slot)2865 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2866 {
2867 	struct dw_mci *host = slot->host;
2868 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2869 	struct mmc_host *mmc = slot->mmc;
2870 	int ctrl_id;
2871 
2872 	if (host->pdata->caps)
2873 		mmc->caps = host->pdata->caps;
2874 
2875 	if (host->pdata->pm_caps)
2876 		mmc->pm_caps = host->pdata->pm_caps;
2877 
2878 	if (drv_data)
2879 		mmc->caps |= drv_data->common_caps;
2880 
2881 	if (host->dev->of_node) {
2882 		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2883 		if (ctrl_id < 0)
2884 			ctrl_id = 0;
2885 	} else {
2886 		ctrl_id = to_platform_device(host->dev)->id;
2887 	}
2888 
2889 	if (drv_data && drv_data->caps) {
2890 		if (ctrl_id >= drv_data->num_caps) {
2891 			dev_err(host->dev, "invalid controller id %d\n",
2892 				ctrl_id);
2893 			return -EINVAL;
2894 		}
2895 		mmc->caps |= drv_data->caps[ctrl_id];
2896 	}
2897 
2898 	if (host->pdata->caps2)
2899 		mmc->caps2 = host->pdata->caps2;
2900 
2901 	/* if host has set a minimum_freq, we should respect it */
2902 	if (host->minimum_speed)
2903 		mmc->f_min = host->minimum_speed;
2904 	else
2905 		mmc->f_min = DW_MCI_FREQ_MIN;
2906 
2907 	if (!mmc->f_max)
2908 		mmc->f_max = DW_MCI_FREQ_MAX;
2909 
2910 	/* Process SDIO IRQs through the sdio_irq_work. */
2911 	if (mmc->caps & MMC_CAP_SDIO_IRQ)
2912 		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2913 
2914 	return 0;
2915 }
2916 
dw_mci_init_slot(struct dw_mci * host)2917 static int dw_mci_init_slot(struct dw_mci *host)
2918 {
2919 	struct mmc_host *mmc;
2920 	struct dw_mci_slot *slot;
2921 	int ret;
2922 
2923 	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2924 	if (!mmc)
2925 		return -ENOMEM;
2926 
2927 	slot = mmc_priv(mmc);
2928 	slot->id = 0;
2929 	slot->sdio_id = host->sdio_id0 + slot->id;
2930 	slot->mmc = mmc;
2931 	slot->host = host;
2932 	host->slot = slot;
2933 
2934 	mmc->ops = &dw_mci_ops;
2935 
2936 	/*if there are external regulators, get them*/
2937 	ret = mmc_regulator_get_supply(mmc);
2938 	if (ret)
2939 		goto err_host_allocated;
2940 
2941 	if (!mmc->ocr_avail)
2942 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2943 
2944 	ret = mmc_of_parse(mmc);
2945 	if (ret)
2946 		goto err_host_allocated;
2947 
2948 	ret = dw_mci_init_slot_caps(slot);
2949 	if (ret)
2950 		goto err_host_allocated;
2951 
2952 	/* Useful defaults if platform data is unset. */
2953 	if (host->use_dma == TRANS_MODE_IDMAC) {
2954 		mmc->max_segs = host->ring_size;
2955 		mmc->max_blk_size = 65535;
2956 		mmc->max_seg_size = 0x1000;
2957 		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2958 		mmc->max_blk_count = mmc->max_req_size / 512;
2959 	} else if (host->use_dma == TRANS_MODE_EDMAC) {
2960 		mmc->max_segs = 64;
2961 		mmc->max_blk_size = 65535;
2962 		mmc->max_blk_count = 65535;
2963 		mmc->max_req_size =
2964 				mmc->max_blk_size * mmc->max_blk_count;
2965 		mmc->max_seg_size = mmc->max_req_size;
2966 	} else {
2967 		/* TRANS_MODE_PIO */
2968 		mmc->max_segs = 64;
2969 		mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2970 		mmc->max_blk_count = 512;
2971 		mmc->max_req_size = mmc->max_blk_size *
2972 				    mmc->max_blk_count;
2973 		mmc->max_seg_size = mmc->max_req_size;
2974 	}
2975 
2976 	dw_mci_get_cd(mmc);
2977 
2978 	ret = mmc_add_host(mmc);
2979 	if (ret)
2980 		goto err_host_allocated;
2981 
2982 #if defined(CONFIG_DEBUG_FS)
2983 	dw_mci_init_debugfs(slot);
2984 #endif
2985 
2986 	return 0;
2987 
2988 err_host_allocated:
2989 	mmc_free_host(mmc);
2990 	return ret;
2991 }
2992 
dw_mci_cleanup_slot(struct dw_mci_slot * slot)2993 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2994 {
2995 	/* Debugfs stuff is cleaned up by mmc core */
2996 	mmc_remove_host(slot->mmc);
2997 	slot->host->slot = NULL;
2998 	mmc_free_host(slot->mmc);
2999 }
3000 
dw_mci_init_dma(struct dw_mci * host)3001 static void dw_mci_init_dma(struct dw_mci *host)
3002 {
3003 	int addr_config;
3004 	struct device *dev = host->dev;
3005 
3006 	/*
3007 	* Check tansfer mode from HCON[17:16]
3008 	* Clear the ambiguous description of dw_mmc databook:
3009 	* 2b'00: No DMA Interface -> Actually means using Internal DMA block
3010 	* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3011 	* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3012 	* 2b'11: Non DW DMA Interface -> pio only
3013 	* Compared to DesignWare DMA Interface, Generic DMA Interface has a
3014 	* simpler request/acknowledge handshake mechanism and both of them
3015 	* are regarded as external dma master for dw_mmc.
3016 	*/
3017 	host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3018 	if (host->use_dma == DMA_INTERFACE_IDMA) {
3019 		host->use_dma = TRANS_MODE_IDMAC;
3020 	} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3021 		   host->use_dma == DMA_INTERFACE_GDMA) {
3022 		host->use_dma = TRANS_MODE_EDMAC;
3023 	} else {
3024 		goto no_dma;
3025 	}
3026 
3027 	/* Determine which DMA interface to use */
3028 	if (host->use_dma == TRANS_MODE_IDMAC) {
3029 		/*
3030 		* Check ADDR_CONFIG bit in HCON to find
3031 		* IDMAC address bus width
3032 		*/
3033 		addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3034 
3035 		if (addr_config == 1) {
3036 			/* host supports IDMAC in 64-bit address mode */
3037 			host->dma_64bit_address = 1;
3038 			dev_info(host->dev,
3039 				 "IDMAC supports 64-bit address mode.\n");
3040 			if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3041 				dma_set_coherent_mask(host->dev,
3042 						      DMA_BIT_MASK(64));
3043 		} else {
3044 			/* host supports IDMAC in 32-bit address mode */
3045 			host->dma_64bit_address = 0;
3046 			dev_info(host->dev,
3047 				 "IDMAC supports 32-bit address mode.\n");
3048 		}
3049 
3050 		/* Alloc memory for sg translation */
3051 		host->sg_cpu = dmam_alloc_coherent(host->dev,
3052 						   DESC_RING_BUF_SZ,
3053 						   &host->sg_dma, GFP_KERNEL);
3054 		if (!host->sg_cpu) {
3055 			dev_err(host->dev,
3056 				"%s: could not alloc DMA memory\n",
3057 				__func__);
3058 			goto no_dma;
3059 		}
3060 
3061 		host->dma_ops = &dw_mci_idmac_ops;
3062 		dev_info(host->dev, "Using internal DMA controller.\n");
3063 	} else {
3064 		/* TRANS_MODE_EDMAC: check dma bindings again */
3065 		if ((device_property_string_array_count(dev, "dma-names") < 0) ||
3066 		    !device_property_present(dev, "dmas")) {
3067 			goto no_dma;
3068 		}
3069 		host->dma_ops = &dw_mci_edmac_ops;
3070 		dev_info(host->dev, "Using external DMA controller.\n");
3071 	}
3072 
3073 	if (host->dma_ops->init && host->dma_ops->start &&
3074 	    host->dma_ops->stop && host->dma_ops->cleanup) {
3075 		if (host->dma_ops->init(host)) {
3076 			dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3077 				__func__);
3078 			goto no_dma;
3079 		}
3080 	} else {
3081 		dev_err(host->dev, "DMA initialization not found.\n");
3082 		goto no_dma;
3083 	}
3084 
3085 	return;
3086 
3087 no_dma:
3088 	dev_info(host->dev, "Using PIO mode.\n");
3089 	host->use_dma = TRANS_MODE_PIO;
3090 }
3091 
dw_mci_cmd11_timer(struct timer_list * t)3092 static void dw_mci_cmd11_timer(struct timer_list *t)
3093 {
3094 	struct dw_mci *host = from_timer(host, t, cmd11_timer);
3095 
3096 	if (host->state != STATE_SENDING_CMD11) {
3097 		dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3098 		return;
3099 	}
3100 
3101 	host->cmd_status = SDMMC_INT_RTO;
3102 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3103 	tasklet_schedule(&host->tasklet);
3104 }
3105 
dw_mci_cto_timer(struct timer_list * t)3106 static void dw_mci_cto_timer(struct timer_list *t)
3107 {
3108 	struct dw_mci *host = from_timer(host, t, cto_timer);
3109 	unsigned long irqflags;
3110 	u32 pending;
3111 
3112 	spin_lock_irqsave(&host->irq_lock, irqflags);
3113 
3114 	/*
3115 	 * If somehow we have very bad interrupt latency it's remotely possible
3116 	 * that the timer could fire while the interrupt is still pending or
3117 	 * while the interrupt is midway through running.  Let's be paranoid
3118 	 * and detect those two cases.  Note that this is paranoia is somewhat
3119 	 * justified because in this function we don't actually cancel the
3120 	 * pending command in the controller--we just assume it will never come.
3121 	 */
3122 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3123 	if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3124 		/* The interrupt should fire; no need to act but we can warn */
3125 		dev_warn(host->dev, "Unexpected interrupt latency\n");
3126 		goto exit;
3127 	}
3128 	if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3129 		/* Presumably interrupt handler couldn't delete the timer */
3130 		dev_warn(host->dev, "CTO timeout when already completed\n");
3131 		goto exit;
3132 	}
3133 
3134 	/*
3135 	 * Continued paranoia to make sure we're in the state we expect.
3136 	 * This paranoia isn't really justified but it seems good to be safe.
3137 	 */
3138 	switch (host->state) {
3139 	case STATE_SENDING_CMD11:
3140 	case STATE_SENDING_CMD:
3141 	case STATE_SENDING_STOP:
3142 		/*
3143 		 * If CMD_DONE interrupt does NOT come in sending command
3144 		 * state, we should notify the driver to terminate current
3145 		 * transfer and report a command timeout to the core.
3146 		 */
3147 		host->cmd_status = SDMMC_INT_RTO;
3148 		set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3149 		tasklet_schedule(&host->tasklet);
3150 		break;
3151 	default:
3152 		dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3153 			 host->state);
3154 		break;
3155 	}
3156 
3157 exit:
3158 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
3159 }
3160 
dw_mci_dto_timer(struct timer_list * t)3161 static void dw_mci_dto_timer(struct timer_list *t)
3162 {
3163 	struct dw_mci *host = from_timer(host, t, dto_timer);
3164 	unsigned long irqflags;
3165 	u32 pending;
3166 
3167 	spin_lock_irqsave(&host->irq_lock, irqflags);
3168 
3169 	/*
3170 	 * The DTO timer is much longer than the CTO timer, so it's even less
3171 	 * likely that we'll these cases, but it pays to be paranoid.
3172 	 */
3173 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3174 	if (pending & SDMMC_INT_DATA_OVER) {
3175 		/* The interrupt should fire; no need to act but we can warn */
3176 		dev_warn(host->dev, "Unexpected data interrupt latency\n");
3177 		goto exit;
3178 	}
3179 	if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3180 		/* Presumably interrupt handler couldn't delete the timer */
3181 		dev_warn(host->dev, "DTO timeout when already completed\n");
3182 		goto exit;
3183 	}
3184 
3185 	/*
3186 	 * Continued paranoia to make sure we're in the state we expect.
3187 	 * This paranoia isn't really justified but it seems good to be safe.
3188 	 */
3189 	switch (host->state) {
3190 	case STATE_SENDING_DATA:
3191 	case STATE_DATA_BUSY:
3192 		/*
3193 		 * If DTO interrupt does NOT come in sending data state,
3194 		 * we should notify the driver to terminate current transfer
3195 		 * and report a data timeout to the core.
3196 		 */
3197 		host->data_status = SDMMC_INT_DRTO;
3198 		set_bit(EVENT_DATA_ERROR, &host->pending_events);
3199 		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3200 		tasklet_schedule(&host->tasklet);
3201 		break;
3202 	default:
3203 		dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3204 			 host->state);
3205 		break;
3206 	}
3207 
3208 exit:
3209 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
3210 }
3211 
3212 #ifdef CONFIG_OF
dw_mci_parse_dt(struct dw_mci * host)3213 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3214 {
3215 	struct dw_mci_board *pdata;
3216 	struct device *dev = host->dev;
3217 	const struct dw_mci_drv_data *drv_data = host->drv_data;
3218 	int ret;
3219 	u32 clock_frequency;
3220 
3221 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3222 	if (!pdata)
3223 		return ERR_PTR(-ENOMEM);
3224 
3225 	/* find reset controller when exist */
3226 	pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3227 	if (IS_ERR(pdata->rstc))
3228 		return ERR_CAST(pdata->rstc);
3229 
3230 	if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3231 		dev_info(dev,
3232 			 "fifo-depth property not found, using value of FIFOTH register as default\n");
3233 
3234 	device_property_read_u32(dev, "card-detect-delay",
3235 				 &pdata->detect_delay_ms);
3236 
3237 	device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3238 
3239 	if (device_property_present(dev, "fifo-watermark-aligned"))
3240 		host->wm_aligned = true;
3241 
3242 	if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3243 		pdata->bus_hz = clock_frequency;
3244 
3245 	if (drv_data && drv_data->parse_dt) {
3246 		ret = drv_data->parse_dt(host);
3247 		if (ret)
3248 			return ERR_PTR(ret);
3249 	}
3250 
3251 	return pdata;
3252 }
3253 
3254 #else /* CONFIG_OF */
dw_mci_parse_dt(struct dw_mci * host)3255 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3256 {
3257 	return ERR_PTR(-EINVAL);
3258 }
3259 #endif /* CONFIG_OF */
3260 
dw_mci_enable_cd(struct dw_mci * host)3261 static void dw_mci_enable_cd(struct dw_mci *host)
3262 {
3263 	unsigned long irqflags;
3264 	u32 temp;
3265 
3266 	/*
3267 	 * No need for CD if all slots have a non-error GPIO
3268 	 * as well as broken card detection is found.
3269 	 */
3270 	if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3271 		return;
3272 
3273 	if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3274 		spin_lock_irqsave(&host->irq_lock, irqflags);
3275 		temp = mci_readl(host, INTMASK);
3276 		temp  |= SDMMC_INT_CD;
3277 		mci_writel(host, INTMASK, temp);
3278 		spin_unlock_irqrestore(&host->irq_lock, irqflags);
3279 	}
3280 }
3281 
dw_mci_probe(struct dw_mci * host)3282 int dw_mci_probe(struct dw_mci *host)
3283 {
3284 	const struct dw_mci_drv_data *drv_data = host->drv_data;
3285 	int width, i, ret = 0;
3286 	u32 fifo_size;
3287 
3288 	if (!host->pdata) {
3289 		host->pdata = dw_mci_parse_dt(host);
3290 		if (IS_ERR(host->pdata))
3291 			return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3292 					     "platform data not available\n");
3293 	}
3294 
3295 	host->biu_clk = devm_clk_get(host->dev, "biu");
3296 	if (IS_ERR(host->biu_clk)) {
3297 		dev_dbg(host->dev, "biu clock not available\n");
3298 	} else {
3299 		ret = clk_prepare_enable(host->biu_clk);
3300 		if (ret) {
3301 			dev_err(host->dev, "failed to enable biu clock\n");
3302 			return ret;
3303 		}
3304 	}
3305 
3306 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
3307 	if (IS_ERR(host->ciu_clk)) {
3308 		dev_dbg(host->dev, "ciu clock not available\n");
3309 		host->bus_hz = host->pdata->bus_hz;
3310 	} else {
3311 		ret = clk_prepare_enable(host->ciu_clk);
3312 		if (ret) {
3313 			dev_err(host->dev, "failed to enable ciu clock\n");
3314 			goto err_clk_biu;
3315 		}
3316 
3317 		if (host->pdata->bus_hz) {
3318 			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3319 			if (ret)
3320 				dev_warn(host->dev,
3321 					 "Unable to set bus rate to %uHz\n",
3322 					 host->pdata->bus_hz);
3323 		}
3324 		host->bus_hz = clk_get_rate(host->ciu_clk);
3325 	}
3326 
3327 	if (!host->bus_hz) {
3328 		dev_err(host->dev,
3329 			"Platform data must supply bus speed\n");
3330 		ret = -ENODEV;
3331 		goto err_clk_ciu;
3332 	}
3333 
3334 	if (host->pdata->rstc) {
3335 		reset_control_assert(host->pdata->rstc);
3336 		usleep_range(10, 50);
3337 		reset_control_deassert(host->pdata->rstc);
3338 	}
3339 
3340 	if (drv_data && drv_data->init) {
3341 		ret = drv_data->init(host);
3342 		if (ret) {
3343 			dev_err(host->dev,
3344 				"implementation specific init failed\n");
3345 			goto err_clk_ciu;
3346 		}
3347 	}
3348 
3349 	timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3350 	timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3351 	timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3352 
3353 	spin_lock_init(&host->lock);
3354 	spin_lock_init(&host->irq_lock);
3355 	INIT_LIST_HEAD(&host->queue);
3356 
3357 	dw_mci_init_fault(host);
3358 
3359 	/*
3360 	 * Get the host data width - this assumes that HCON has been set with
3361 	 * the correct values.
3362 	 */
3363 	i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3364 	if (!i) {
3365 		host->push_data = dw_mci_push_data16;
3366 		host->pull_data = dw_mci_pull_data16;
3367 		width = 16;
3368 		host->data_shift = 1;
3369 	} else if (i == 2) {
3370 		host->push_data = dw_mci_push_data64;
3371 		host->pull_data = dw_mci_pull_data64;
3372 		width = 64;
3373 		host->data_shift = 3;
3374 	} else {
3375 		/* Check for a reserved value, and warn if it is */
3376 		WARN((i != 1),
3377 		     "HCON reports a reserved host data width!\n"
3378 		     "Defaulting to 32-bit access.\n");
3379 		host->push_data = dw_mci_push_data32;
3380 		host->pull_data = dw_mci_pull_data32;
3381 		width = 32;
3382 		host->data_shift = 2;
3383 	}
3384 
3385 	/* Reset all blocks */
3386 	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3387 		ret = -ENODEV;
3388 		goto err_clk_ciu;
3389 	}
3390 
3391 	host->dma_ops = host->pdata->dma_ops;
3392 	dw_mci_init_dma(host);
3393 
3394 	/* Clear the interrupts for the host controller */
3395 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3396 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3397 
3398 	/* Put in max timeout */
3399 	mci_writel(host, TMOUT, 0xFFFFFFFF);
3400 
3401 	/*
3402 	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3403 	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
3404 	 */
3405 	if (!host->pdata->fifo_depth) {
3406 		/*
3407 		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3408 		 * have been overwritten by the bootloader, just like we're
3409 		 * about to do, so if you know the value for your hardware, you
3410 		 * should put it in the platform data.
3411 		 */
3412 		fifo_size = mci_readl(host, FIFOTH);
3413 		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3414 	} else {
3415 		fifo_size = host->pdata->fifo_depth;
3416 	}
3417 	host->fifo_depth = fifo_size;
3418 	host->fifoth_val =
3419 		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3420 	mci_writel(host, FIFOTH, host->fifoth_val);
3421 
3422 	/* disable clock to CIU */
3423 	mci_writel(host, CLKENA, 0);
3424 	mci_writel(host, CLKSRC, 0);
3425 
3426 	/*
3427 	 * In 2.40a spec, Data offset is changed.
3428 	 * Need to check the version-id and set data-offset for DATA register.
3429 	 */
3430 	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3431 	dev_info(host->dev, "Version ID is %04x\n", host->verid);
3432 
3433 	if (host->data_addr_override)
3434 		host->fifo_reg = host->regs + host->data_addr_override;
3435 	else if (host->verid < DW_MMC_240A)
3436 		host->fifo_reg = host->regs + DATA_OFFSET;
3437 	else
3438 		host->fifo_reg = host->regs + DATA_240A_OFFSET;
3439 
3440 	tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
3441 	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3442 			       host->irq_flags, "dw-mci", host);
3443 	if (ret)
3444 		goto err_dmaunmap;
3445 
3446 	/*
3447 	 * Enable interrupts for command done, data over, data empty,
3448 	 * receive ready and error such as transmit, receive timeout, crc error
3449 	 */
3450 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3451 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3452 		   DW_MCI_ERROR_FLAGS);
3453 	/* Enable mci interrupt */
3454 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3455 
3456 	dev_info(host->dev,
3457 		 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3458 		 host->irq, width, fifo_size);
3459 
3460 	/* We need at least one slot to succeed */
3461 	ret = dw_mci_init_slot(host);
3462 	if (ret) {
3463 		dev_dbg(host->dev, "slot %d init failed\n", i);
3464 		goto err_dmaunmap;
3465 	}
3466 
3467 	/* Now that slots are all setup, we can enable card detect */
3468 	dw_mci_enable_cd(host);
3469 
3470 	return 0;
3471 
3472 err_dmaunmap:
3473 	if (host->use_dma && host->dma_ops->exit)
3474 		host->dma_ops->exit(host);
3475 
3476 	reset_control_assert(host->pdata->rstc);
3477 
3478 err_clk_ciu:
3479 	clk_disable_unprepare(host->ciu_clk);
3480 
3481 err_clk_biu:
3482 	clk_disable_unprepare(host->biu_clk);
3483 
3484 	return ret;
3485 }
3486 EXPORT_SYMBOL(dw_mci_probe);
3487 
dw_mci_remove(struct dw_mci * host)3488 void dw_mci_remove(struct dw_mci *host)
3489 {
3490 	dev_dbg(host->dev, "remove slot\n");
3491 	if (host->slot)
3492 		dw_mci_cleanup_slot(host->slot);
3493 
3494 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3495 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3496 
3497 	/* disable clock to CIU */
3498 	mci_writel(host, CLKENA, 0);
3499 	mci_writel(host, CLKSRC, 0);
3500 
3501 	if (host->use_dma && host->dma_ops->exit)
3502 		host->dma_ops->exit(host);
3503 
3504 	reset_control_assert(host->pdata->rstc);
3505 
3506 	clk_disable_unprepare(host->ciu_clk);
3507 	clk_disable_unprepare(host->biu_clk);
3508 }
3509 EXPORT_SYMBOL(dw_mci_remove);
3510 
3511 
3512 
3513 #ifdef CONFIG_PM
dw_mci_runtime_suspend(struct device * dev)3514 int dw_mci_runtime_suspend(struct device *dev)
3515 {
3516 	struct dw_mci *host = dev_get_drvdata(dev);
3517 
3518 	if (host->use_dma && host->dma_ops->exit)
3519 		host->dma_ops->exit(host);
3520 
3521 	clk_disable_unprepare(host->ciu_clk);
3522 
3523 	if (host->slot &&
3524 	    (mmc_can_gpio_cd(host->slot->mmc) ||
3525 	     !mmc_card_is_removable(host->slot->mmc)))
3526 		clk_disable_unprepare(host->biu_clk);
3527 
3528 	return 0;
3529 }
3530 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3531 
dw_mci_runtime_resume(struct device * dev)3532 int dw_mci_runtime_resume(struct device *dev)
3533 {
3534 	int ret = 0;
3535 	struct dw_mci *host = dev_get_drvdata(dev);
3536 
3537 	if (host->slot &&
3538 	    (mmc_can_gpio_cd(host->slot->mmc) ||
3539 	     !mmc_card_is_removable(host->slot->mmc))) {
3540 		ret = clk_prepare_enable(host->biu_clk);
3541 		if (ret)
3542 			return ret;
3543 	}
3544 
3545 	ret = clk_prepare_enable(host->ciu_clk);
3546 	if (ret)
3547 		goto err;
3548 
3549 	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3550 		clk_disable_unprepare(host->ciu_clk);
3551 		ret = -ENODEV;
3552 		goto err;
3553 	}
3554 
3555 	if (host->use_dma && host->dma_ops->init)
3556 		host->dma_ops->init(host);
3557 
3558 	/*
3559 	 * Restore the initial value at FIFOTH register
3560 	 * And Invalidate the prev_blksz with zero
3561 	 */
3562 	mci_writel(host, FIFOTH, host->fifoth_val);
3563 	host->prev_blksz = 0;
3564 
3565 	/* Put in max timeout */
3566 	mci_writel(host, TMOUT, 0xFFFFFFFF);
3567 
3568 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3569 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3570 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3571 		   DW_MCI_ERROR_FLAGS);
3572 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3573 
3574 
3575 	if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3576 		dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3577 
3578 	/* Force setup bus to guarantee available clock output */
3579 	dw_mci_setup_bus(host->slot, true);
3580 
3581 	/* Re-enable SDIO interrupts. */
3582 	if (sdio_irq_claimed(host->slot->mmc))
3583 		__dw_mci_enable_sdio_irq(host->slot, 1);
3584 
3585 	/* Now that slots are all setup, we can enable card detect */
3586 	dw_mci_enable_cd(host);
3587 
3588 	return 0;
3589 
3590 err:
3591 	if (host->slot &&
3592 	    (mmc_can_gpio_cd(host->slot->mmc) ||
3593 	     !mmc_card_is_removable(host->slot->mmc)))
3594 		clk_disable_unprepare(host->biu_clk);
3595 
3596 	return ret;
3597 }
3598 EXPORT_SYMBOL(dw_mci_runtime_resume);
3599 #endif /* CONFIG_PM */
3600 
dw_mci_init(void)3601 static int __init dw_mci_init(void)
3602 {
3603 	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3604 	return 0;
3605 }
3606 
dw_mci_exit(void)3607 static void __exit dw_mci_exit(void)
3608 {
3609 }
3610 
3611 module_init(dw_mci_init);
3612 module_exit(dw_mci_exit);
3613 
3614 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3615 MODULE_AUTHOR("NXP Semiconductor VietNam");
3616 MODULE_AUTHOR("Imagination Technologies Ltd");
3617 MODULE_LICENSE("GPL v2");
3618