1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26
27 #include <linux/leds.h>
28
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #include "sdhci.h"
36
37 #define DRIVER_NAME "sdhci"
38
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define MAX_TUNING_LOOP 40
46
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
49
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51
52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53
sdhci_dumpregs(struct sdhci_host * host)54 void sdhci_dumpregs(struct sdhci_host *host)
55 {
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
99
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 }
111 }
112
113 if (host->ops->dump_vendor_regs)
114 host->ops->dump_vendor_regs(host);
115
116 SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124 \*****************************************************************************/
125
sdhci_do_enable_v4_mode(struct sdhci_host * host)126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127 {
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 }
137
138 /*
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
141 */
sdhci_enable_v4_mode(struct sdhci_host * host)142 void sdhci_enable_v4_mode(struct sdhci_host *host)
143 {
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146 }
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
sdhci_data_line_cmd(struct mmc_command * cmd)149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150 {
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152 }
153
sdhci_set_card_detection(struct sdhci_host * host,bool enable)154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155 {
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 }
175
sdhci_enable_card_detection(struct sdhci_host * host)176 static void sdhci_enable_card_detection(struct sdhci_host *host)
177 {
178 sdhci_set_card_detection(host, true);
179 }
180
sdhci_disable_card_detection(struct sdhci_host * host)181 static void sdhci_disable_card_detection(struct sdhci_host *host)
182 {
183 sdhci_set_card_detection(host, false);
184 }
185
sdhci_runtime_pm_bus_on(struct sdhci_host * host)186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187 {
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(mmc_dev(host->mmc));
192 }
193
sdhci_runtime_pm_bus_off(struct sdhci_host * host)194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195 {
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(mmc_dev(host->mmc));
200 }
201
sdhci_reset(struct sdhci_host * host,u8 mask)202 void sdhci_reset(struct sdhci_host *host, u8 mask)
203 {
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210 /* Reset-all turns off SD Bus Power */
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215 /* Wait max 100 ms */
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218 /* hw clears the bit when it's done */
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
228 sdhci_dumpregs(host);
229 return;
230 }
231 udelay(10);
232 }
233 }
234 EXPORT_SYMBOL_GPL(sdhci_reset);
235
sdhci_do_reset(struct sdhci_host * host,u8 mask)236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
237 {
238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
239 struct mmc_host *mmc = host->mmc;
240
241 if (!mmc->ops->get_cd(mmc))
242 return false;
243 }
244
245 host->ops->reset(host, mask);
246
247 return true;
248 }
249
sdhci_reset_for_all(struct sdhci_host * host)250 static void sdhci_reset_for_all(struct sdhci_host *host)
251 {
252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
254 if (host->ops->enable_dma)
255 host->ops->enable_dma(host);
256 }
257 /* Resetting the controller clears many */
258 host->preset_enabled = false;
259 }
260 }
261
262 enum sdhci_reset_reason {
263 SDHCI_RESET_FOR_INIT,
264 SDHCI_RESET_FOR_REQUEST_ERROR,
265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
266 SDHCI_RESET_FOR_TUNING_ABORT,
267 SDHCI_RESET_FOR_CARD_REMOVED,
268 SDHCI_RESET_FOR_CQE_RECOVERY,
269 };
270
sdhci_reset_for_reason(struct sdhci_host * host,enum sdhci_reset_reason reason)271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
272 {
273 if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
274 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
275 return;
276 }
277
278 switch (reason) {
279 case SDHCI_RESET_FOR_INIT:
280 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
281 break;
282 case SDHCI_RESET_FOR_REQUEST_ERROR:
283 case SDHCI_RESET_FOR_TUNING_ABORT:
284 case SDHCI_RESET_FOR_CARD_REMOVED:
285 case SDHCI_RESET_FOR_CQE_RECOVERY:
286 sdhci_do_reset(host, SDHCI_RESET_CMD);
287 sdhci_do_reset(host, SDHCI_RESET_DATA);
288 break;
289 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
290 sdhci_do_reset(host, SDHCI_RESET_DATA);
291 break;
292 }
293 }
294
295 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
296
sdhci_set_default_irqs(struct sdhci_host * host)297 static void sdhci_set_default_irqs(struct sdhci_host *host)
298 {
299 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
300 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
301 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
302 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
303 SDHCI_INT_RESPONSE;
304
305 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
306 host->tuning_mode == SDHCI_TUNING_MODE_3)
307 host->ier |= SDHCI_INT_RETUNE;
308
309 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
310 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
311 }
312
sdhci_config_dma(struct sdhci_host * host)313 static void sdhci_config_dma(struct sdhci_host *host)
314 {
315 u8 ctrl;
316 u16 ctrl2;
317
318 if (host->version < SDHCI_SPEC_200)
319 return;
320
321 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
322
323 /*
324 * Always adjust the DMA selection as some controllers
325 * (e.g. JMicron) can't do PIO properly when the selection
326 * is ADMA.
327 */
328 ctrl &= ~SDHCI_CTRL_DMA_MASK;
329 if (!(host->flags & SDHCI_REQ_USE_DMA))
330 goto out;
331
332 /* Note if DMA Select is zero then SDMA is selected */
333 if (host->flags & SDHCI_USE_ADMA)
334 ctrl |= SDHCI_CTRL_ADMA32;
335
336 if (host->flags & SDHCI_USE_64_BIT_DMA) {
337 /*
338 * If v4 mode, all supported DMA can be 64-bit addressing if
339 * controller supports 64-bit system address, otherwise only
340 * ADMA can support 64-bit addressing.
341 */
342 if (host->v4_mode) {
343 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
344 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
345 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
346 } else if (host->flags & SDHCI_USE_ADMA) {
347 /*
348 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
349 * set SDHCI_CTRL_ADMA64.
350 */
351 ctrl |= SDHCI_CTRL_ADMA64;
352 }
353 }
354
355 out:
356 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
357 }
358
sdhci_init(struct sdhci_host * host,int soft)359 static void sdhci_init(struct sdhci_host *host, int soft)
360 {
361 struct mmc_host *mmc = host->mmc;
362 unsigned long flags;
363
364 if (soft)
365 sdhci_reset_for(host, INIT);
366 else
367 sdhci_reset_for_all(host);
368
369 if (host->v4_mode)
370 sdhci_do_enable_v4_mode(host);
371
372 spin_lock_irqsave(&host->lock, flags);
373 sdhci_set_default_irqs(host);
374 spin_unlock_irqrestore(&host->lock, flags);
375
376 host->cqe_on = false;
377
378 if (soft) {
379 /* force clock reconfiguration */
380 host->clock = 0;
381 host->reinit_uhs = true;
382 mmc->ops->set_ios(mmc, &mmc->ios);
383 }
384 }
385
sdhci_reinit(struct sdhci_host * host)386 static void sdhci_reinit(struct sdhci_host *host)
387 {
388 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
389
390 sdhci_init(host, 0);
391 sdhci_enable_card_detection(host);
392
393 /*
394 * A change to the card detect bits indicates a change in present state,
395 * refer sdhci_set_card_detection(). A card detect interrupt might have
396 * been missed while the host controller was being reset, so trigger a
397 * rescan to check.
398 */
399 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
400 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
401 }
402
__sdhci_led_activate(struct sdhci_host * host)403 static void __sdhci_led_activate(struct sdhci_host *host)
404 {
405 u8 ctrl;
406
407 if (host->quirks & SDHCI_QUIRK_NO_LED)
408 return;
409
410 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
411 ctrl |= SDHCI_CTRL_LED;
412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
413 }
414
__sdhci_led_deactivate(struct sdhci_host * host)415 static void __sdhci_led_deactivate(struct sdhci_host *host)
416 {
417 u8 ctrl;
418
419 if (host->quirks & SDHCI_QUIRK_NO_LED)
420 return;
421
422 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
423 ctrl &= ~SDHCI_CTRL_LED;
424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
425 }
426
427 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)428 static void sdhci_led_control(struct led_classdev *led,
429 enum led_brightness brightness)
430 {
431 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
432 unsigned long flags;
433
434 spin_lock_irqsave(&host->lock, flags);
435
436 if (host->runtime_suspended)
437 goto out;
438
439 if (brightness == LED_OFF)
440 __sdhci_led_deactivate(host);
441 else
442 __sdhci_led_activate(host);
443 out:
444 spin_unlock_irqrestore(&host->lock, flags);
445 }
446
sdhci_led_register(struct sdhci_host * host)447 static int sdhci_led_register(struct sdhci_host *host)
448 {
449 struct mmc_host *mmc = host->mmc;
450
451 if (host->quirks & SDHCI_QUIRK_NO_LED)
452 return 0;
453
454 snprintf(host->led_name, sizeof(host->led_name),
455 "%s::", mmc_hostname(mmc));
456
457 host->led.name = host->led_name;
458 host->led.brightness = LED_OFF;
459 host->led.default_trigger = mmc_hostname(mmc);
460 host->led.brightness_set = sdhci_led_control;
461
462 return led_classdev_register(mmc_dev(mmc), &host->led);
463 }
464
sdhci_led_unregister(struct sdhci_host * host)465 static void sdhci_led_unregister(struct sdhci_host *host)
466 {
467 if (host->quirks & SDHCI_QUIRK_NO_LED)
468 return;
469
470 led_classdev_unregister(&host->led);
471 }
472
sdhci_led_activate(struct sdhci_host * host)473 static inline void sdhci_led_activate(struct sdhci_host *host)
474 {
475 }
476
sdhci_led_deactivate(struct sdhci_host * host)477 static inline void sdhci_led_deactivate(struct sdhci_host *host)
478 {
479 }
480
481 #else
482
sdhci_led_register(struct sdhci_host * host)483 static inline int sdhci_led_register(struct sdhci_host *host)
484 {
485 return 0;
486 }
487
sdhci_led_unregister(struct sdhci_host * host)488 static inline void sdhci_led_unregister(struct sdhci_host *host)
489 {
490 }
491
sdhci_led_activate(struct sdhci_host * host)492 static inline void sdhci_led_activate(struct sdhci_host *host)
493 {
494 __sdhci_led_activate(host);
495 }
496
sdhci_led_deactivate(struct sdhci_host * host)497 static inline void sdhci_led_deactivate(struct sdhci_host *host)
498 {
499 __sdhci_led_deactivate(host);
500 }
501
502 #endif
503
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)504 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
505 unsigned long timeout)
506 {
507 if (sdhci_data_line_cmd(mrq->cmd))
508 mod_timer(&host->data_timer, timeout);
509 else
510 mod_timer(&host->timer, timeout);
511 }
512
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)513 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
514 {
515 if (sdhci_data_line_cmd(mrq->cmd))
516 del_timer(&host->data_timer);
517 else
518 del_timer(&host->timer);
519 }
520
sdhci_has_requests(struct sdhci_host * host)521 static inline bool sdhci_has_requests(struct sdhci_host *host)
522 {
523 return host->cmd || host->data_cmd;
524 }
525
526 /*****************************************************************************\
527 * *
528 * Core functions *
529 * *
530 \*****************************************************************************/
531
sdhci_read_block_pio(struct sdhci_host * host)532 static void sdhci_read_block_pio(struct sdhci_host *host)
533 {
534 unsigned long flags;
535 size_t blksize, len, chunk;
536 u32 scratch;
537 u8 *buf;
538
539 DBG("PIO reading\n");
540
541 blksize = host->data->blksz;
542 chunk = 0;
543
544 local_irq_save(flags);
545
546 while (blksize) {
547 BUG_ON(!sg_miter_next(&host->sg_miter));
548
549 len = min(host->sg_miter.length, blksize);
550
551 blksize -= len;
552 host->sg_miter.consumed = len;
553
554 buf = host->sg_miter.addr;
555
556 while (len) {
557 if (chunk == 0) {
558 scratch = sdhci_readl(host, SDHCI_BUFFER);
559 chunk = 4;
560 }
561
562 *buf = scratch & 0xFF;
563
564 buf++;
565 scratch >>= 8;
566 chunk--;
567 len--;
568 }
569 }
570
571 sg_miter_stop(&host->sg_miter);
572
573 local_irq_restore(flags);
574 }
575
sdhci_write_block_pio(struct sdhci_host * host)576 static void sdhci_write_block_pio(struct sdhci_host *host)
577 {
578 unsigned long flags;
579 size_t blksize, len, chunk;
580 u32 scratch;
581 u8 *buf;
582
583 DBG("PIO writing\n");
584
585 blksize = host->data->blksz;
586 chunk = 0;
587 scratch = 0;
588
589 local_irq_save(flags);
590
591 while (blksize) {
592 BUG_ON(!sg_miter_next(&host->sg_miter));
593
594 len = min(host->sg_miter.length, blksize);
595
596 blksize -= len;
597 host->sg_miter.consumed = len;
598
599 buf = host->sg_miter.addr;
600
601 while (len) {
602 scratch |= (u32)*buf << (chunk * 8);
603
604 buf++;
605 chunk++;
606 len--;
607
608 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
609 sdhci_writel(host, scratch, SDHCI_BUFFER);
610 chunk = 0;
611 scratch = 0;
612 }
613 }
614 }
615
616 sg_miter_stop(&host->sg_miter);
617
618 local_irq_restore(flags);
619 }
620
sdhci_transfer_pio(struct sdhci_host * host)621 static void sdhci_transfer_pio(struct sdhci_host *host)
622 {
623 u32 mask;
624
625 if (host->blocks == 0)
626 return;
627
628 if (host->data->flags & MMC_DATA_READ)
629 mask = SDHCI_DATA_AVAILABLE;
630 else
631 mask = SDHCI_SPACE_AVAILABLE;
632
633 /*
634 * Some controllers (JMicron JMB38x) mess up the buffer bits
635 * for transfers < 4 bytes. As long as it is just one block,
636 * we can ignore the bits.
637 */
638 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
639 (host->data->blocks == 1))
640 mask = ~0;
641
642 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
643 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
644 udelay(100);
645
646 if (host->data->flags & MMC_DATA_READ)
647 sdhci_read_block_pio(host);
648 else
649 sdhci_write_block_pio(host);
650
651 host->blocks--;
652 if (host->blocks == 0)
653 break;
654 }
655
656 DBG("PIO transfer complete.\n");
657 }
658
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)659 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
660 struct mmc_data *data, int cookie)
661 {
662 int sg_count;
663
664 /*
665 * If the data buffers are already mapped, return the previous
666 * dma_map_sg() result.
667 */
668 if (data->host_cookie == COOKIE_PRE_MAPPED)
669 return data->sg_count;
670
671 /* Bounce write requests to the bounce buffer */
672 if (host->bounce_buffer) {
673 unsigned int length = data->blksz * data->blocks;
674
675 if (length > host->bounce_buffer_size) {
676 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
677 mmc_hostname(host->mmc), length,
678 host->bounce_buffer_size);
679 return -EIO;
680 }
681 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
682 /* Copy the data to the bounce buffer */
683 if (host->ops->copy_to_bounce_buffer) {
684 host->ops->copy_to_bounce_buffer(host,
685 data, length);
686 } else {
687 sg_copy_to_buffer(data->sg, data->sg_len,
688 host->bounce_buffer, length);
689 }
690 }
691 /* Switch ownership to the DMA */
692 dma_sync_single_for_device(mmc_dev(host->mmc),
693 host->bounce_addr,
694 host->bounce_buffer_size,
695 mmc_get_dma_dir(data));
696 /* Just a dummy value */
697 sg_count = 1;
698 } else {
699 /* Just access the data directly from memory */
700 sg_count = dma_map_sg(mmc_dev(host->mmc),
701 data->sg, data->sg_len,
702 mmc_get_dma_dir(data));
703 }
704
705 if (sg_count == 0)
706 return -ENOSPC;
707
708 data->sg_count = sg_count;
709 data->host_cookie = cookie;
710
711 return sg_count;
712 }
713
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)714 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
715 {
716 local_irq_save(*flags);
717 return kmap_atomic(sg_page(sg)) + sg->offset;
718 }
719
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)720 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
721 {
722 kunmap_atomic(buffer);
723 local_irq_restore(*flags);
724 }
725
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)726 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
727 dma_addr_t addr, int len, unsigned int cmd)
728 {
729 struct sdhci_adma2_64_desc *dma_desc = *desc;
730
731 /* 32-bit and 64-bit descriptors have these members in same position */
732 dma_desc->cmd = cpu_to_le16(cmd);
733 dma_desc->len = cpu_to_le16(len);
734 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
735
736 if (host->flags & SDHCI_USE_64_BIT_DMA)
737 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
738
739 *desc += host->desc_sz;
740 }
741 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
742
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)743 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
744 void **desc, dma_addr_t addr,
745 int len, unsigned int cmd)
746 {
747 if (host->ops->adma_write_desc)
748 host->ops->adma_write_desc(host, desc, addr, len, cmd);
749 else
750 sdhci_adma_write_desc(host, desc, addr, len, cmd);
751 }
752
sdhci_adma_mark_end(void * desc)753 static void sdhci_adma_mark_end(void *desc)
754 {
755 struct sdhci_adma2_64_desc *dma_desc = desc;
756
757 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
758 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
759 }
760
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)761 static void sdhci_adma_table_pre(struct sdhci_host *host,
762 struct mmc_data *data, int sg_count)
763 {
764 struct scatterlist *sg;
765 unsigned long flags;
766 dma_addr_t addr, align_addr;
767 void *desc, *align;
768 char *buffer;
769 int len, offset, i;
770
771 /*
772 * The spec does not specify endianness of descriptor table.
773 * We currently guess that it is LE.
774 */
775
776 host->sg_count = sg_count;
777
778 desc = host->adma_table;
779 align = host->align_buffer;
780
781 align_addr = host->align_addr;
782
783 for_each_sg(data->sg, sg, host->sg_count, i) {
784 addr = sg_dma_address(sg);
785 len = sg_dma_len(sg);
786
787 /*
788 * The SDHCI specification states that ADMA addresses must
789 * be 32-bit aligned. If they aren't, then we use a bounce
790 * buffer for the (up to three) bytes that screw up the
791 * alignment.
792 */
793 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
794 SDHCI_ADMA2_MASK;
795 if (offset) {
796 if (data->flags & MMC_DATA_WRITE) {
797 buffer = sdhci_kmap_atomic(sg, &flags);
798 memcpy(align, buffer, offset);
799 sdhci_kunmap_atomic(buffer, &flags);
800 }
801
802 /* tran, valid */
803 __sdhci_adma_write_desc(host, &desc, align_addr,
804 offset, ADMA2_TRAN_VALID);
805
806 BUG_ON(offset > 65536);
807
808 align += SDHCI_ADMA2_ALIGN;
809 align_addr += SDHCI_ADMA2_ALIGN;
810
811 addr += offset;
812 len -= offset;
813 }
814
815 /*
816 * The block layer forces a minimum segment size of PAGE_SIZE,
817 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
818 * multiple descriptors, noting that the ADMA table is sized
819 * for 4KiB chunks anyway, so it will be big enough.
820 */
821 while (len > host->max_adma) {
822 int n = 32 * 1024; /* 32KiB*/
823
824 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
825 addr += n;
826 len -= n;
827 }
828
829 /* tran, valid */
830 if (len)
831 __sdhci_adma_write_desc(host, &desc, addr, len,
832 ADMA2_TRAN_VALID);
833
834 /*
835 * If this triggers then we have a calculation bug
836 * somewhere. :/
837 */
838 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
839 }
840
841 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
842 /* Mark the last descriptor as the terminating descriptor */
843 if (desc != host->adma_table) {
844 desc -= host->desc_sz;
845 sdhci_adma_mark_end(desc);
846 }
847 } else {
848 /* Add a terminating entry - nop, end, valid */
849 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
850 }
851 }
852
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)853 static void sdhci_adma_table_post(struct sdhci_host *host,
854 struct mmc_data *data)
855 {
856 struct scatterlist *sg;
857 int i, size;
858 void *align;
859 char *buffer;
860 unsigned long flags;
861
862 if (data->flags & MMC_DATA_READ) {
863 bool has_unaligned = false;
864
865 /* Do a quick scan of the SG list for any unaligned mappings */
866 for_each_sg(data->sg, sg, host->sg_count, i)
867 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
868 has_unaligned = true;
869 break;
870 }
871
872 if (has_unaligned) {
873 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
874 data->sg_len, DMA_FROM_DEVICE);
875
876 align = host->align_buffer;
877
878 for_each_sg(data->sg, sg, host->sg_count, i) {
879 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
880 size = SDHCI_ADMA2_ALIGN -
881 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
882
883 buffer = sdhci_kmap_atomic(sg, &flags);
884 memcpy(buffer, align, size);
885 sdhci_kunmap_atomic(buffer, &flags);
886
887 align += SDHCI_ADMA2_ALIGN;
888 }
889 }
890 }
891 }
892 }
893
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)894 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
895 {
896 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
897 if (host->flags & SDHCI_USE_64_BIT_DMA)
898 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
899 }
900
sdhci_sdma_address(struct sdhci_host * host)901 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
902 {
903 if (host->bounce_buffer)
904 return host->bounce_addr;
905 else
906 return sg_dma_address(host->data->sg);
907 }
908
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)909 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
910 {
911 if (host->v4_mode)
912 sdhci_set_adma_addr(host, addr);
913 else
914 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
915 }
916
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)917 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
918 struct mmc_command *cmd,
919 struct mmc_data *data)
920 {
921 unsigned int target_timeout;
922
923 /* timeout in us */
924 if (!data) {
925 target_timeout = cmd->busy_timeout * 1000;
926 } else {
927 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
928 if (host->clock && data->timeout_clks) {
929 unsigned long long val;
930
931 /*
932 * data->timeout_clks is in units of clock cycles.
933 * host->clock is in Hz. target_timeout is in us.
934 * Hence, us = 1000000 * cycles / Hz. Round up.
935 */
936 val = 1000000ULL * data->timeout_clks;
937 if (do_div(val, host->clock))
938 target_timeout++;
939 target_timeout += val;
940 }
941 }
942
943 return target_timeout;
944 }
945
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)946 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
947 struct mmc_command *cmd)
948 {
949 struct mmc_data *data = cmd->data;
950 struct mmc_host *mmc = host->mmc;
951 struct mmc_ios *ios = &mmc->ios;
952 unsigned char bus_width = 1 << ios->bus_width;
953 unsigned int blksz;
954 unsigned int freq;
955 u64 target_timeout;
956 u64 transfer_time;
957
958 target_timeout = sdhci_target_timeout(host, cmd, data);
959 target_timeout *= NSEC_PER_USEC;
960
961 if (data) {
962 blksz = data->blksz;
963 freq = mmc->actual_clock ? : host->clock;
964 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
965 do_div(transfer_time, freq);
966 /* multiply by '2' to account for any unknowns */
967 transfer_time = transfer_time * 2;
968 /* calculate timeout for the entire data */
969 host->data_timeout = data->blocks * target_timeout +
970 transfer_time;
971 } else {
972 host->data_timeout = target_timeout;
973 }
974
975 if (host->data_timeout)
976 host->data_timeout += MMC_CMD_TRANSFER_TIME;
977 }
978
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)979 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
980 bool *too_big)
981 {
982 u8 count;
983 struct mmc_data *data;
984 unsigned target_timeout, current_timeout;
985
986 *too_big = false;
987
988 /*
989 * If the host controller provides us with an incorrect timeout
990 * value, just skip the check and use the maximum. The hardware may take
991 * longer to time out, but that's much better than having a too-short
992 * timeout value.
993 */
994 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
995 return host->max_timeout_count;
996
997 /* Unspecified command, assume max */
998 if (cmd == NULL)
999 return host->max_timeout_count;
1000
1001 data = cmd->data;
1002 /* Unspecified timeout, assume max */
1003 if (!data && !cmd->busy_timeout)
1004 return host->max_timeout_count;
1005
1006 /* timeout in us */
1007 target_timeout = sdhci_target_timeout(host, cmd, data);
1008
1009 /*
1010 * Figure out needed cycles.
1011 * We do this in steps in order to fit inside a 32 bit int.
1012 * The first step is the minimum timeout, which will have a
1013 * minimum resolution of 6 bits:
1014 * (1) 2^13*1000 > 2^22,
1015 * (2) host->timeout_clk < 2^16
1016 * =>
1017 * (1) / (2) > 2^6
1018 */
1019 count = 0;
1020 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
1021 while (current_timeout < target_timeout) {
1022 count++;
1023 current_timeout <<= 1;
1024 if (count > host->max_timeout_count) {
1025 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
1026 DBG("Too large timeout 0x%x requested for CMD%d!\n",
1027 count, cmd->opcode);
1028 count = host->max_timeout_count;
1029 *too_big = true;
1030 break;
1031 }
1032 }
1033
1034 return count;
1035 }
1036
sdhci_set_transfer_irqs(struct sdhci_host * host)1037 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1038 {
1039 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1040 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1041
1042 if (host->flags & SDHCI_REQ_USE_DMA)
1043 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1044 else
1045 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1046
1047 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1048 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1049 else
1050 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1051
1052 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1053 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1054 }
1055
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1056 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1057 {
1058 if (enable)
1059 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1060 else
1061 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1062 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1063 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1064 }
1065 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1066
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1067 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1068 {
1069 bool too_big = false;
1070 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1071
1072 if (too_big &&
1073 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1074 sdhci_calc_sw_timeout(host, cmd);
1075 sdhci_set_data_timeout_irq(host, false);
1076 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1077 sdhci_set_data_timeout_irq(host, true);
1078 }
1079
1080 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1081 }
1082 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1083
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1084 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1085 {
1086 if (host->ops->set_timeout)
1087 host->ops->set_timeout(host, cmd);
1088 else
1089 __sdhci_set_timeout(host, cmd);
1090 }
1091
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1092 static void sdhci_initialize_data(struct sdhci_host *host,
1093 struct mmc_data *data)
1094 {
1095 WARN_ON(host->data);
1096
1097 /* Sanity checks */
1098 BUG_ON(data->blksz * data->blocks > 524288);
1099 BUG_ON(data->blksz > host->mmc->max_blk_size);
1100 BUG_ON(data->blocks > 65535);
1101
1102 host->data = data;
1103 host->data_early = 0;
1104 host->data->bytes_xfered = 0;
1105 }
1106
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1107 static inline void sdhci_set_block_info(struct sdhci_host *host,
1108 struct mmc_data *data)
1109 {
1110 /* Set the DMA boundary value and block size */
1111 sdhci_writew(host,
1112 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1113 SDHCI_BLOCK_SIZE);
1114 /*
1115 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1116 * can be supported, in that case 16-bit block count register must be 0.
1117 */
1118 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1119 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1120 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1121 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1122 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1123 } else {
1124 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1125 }
1126 }
1127
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1128 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1129 {
1130 struct mmc_data *data = cmd->data;
1131
1132 sdhci_initialize_data(host, data);
1133
1134 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1135 struct scatterlist *sg;
1136 unsigned int length_mask, offset_mask;
1137 int i;
1138
1139 host->flags |= SDHCI_REQ_USE_DMA;
1140
1141 /*
1142 * FIXME: This doesn't account for merging when mapping the
1143 * scatterlist.
1144 *
1145 * The assumption here being that alignment and lengths are
1146 * the same after DMA mapping to device address space.
1147 */
1148 length_mask = 0;
1149 offset_mask = 0;
1150 if (host->flags & SDHCI_USE_ADMA) {
1151 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1152 length_mask = 3;
1153 /*
1154 * As we use up to 3 byte chunks to work
1155 * around alignment problems, we need to
1156 * check the offset as well.
1157 */
1158 offset_mask = 3;
1159 }
1160 } else {
1161 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1162 length_mask = 3;
1163 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1164 offset_mask = 3;
1165 }
1166
1167 if (unlikely(length_mask | offset_mask)) {
1168 for_each_sg(data->sg, sg, data->sg_len, i) {
1169 if (sg->length & length_mask) {
1170 DBG("Reverting to PIO because of transfer size (%d)\n",
1171 sg->length);
1172 host->flags &= ~SDHCI_REQ_USE_DMA;
1173 break;
1174 }
1175 if (sg->offset & offset_mask) {
1176 DBG("Reverting to PIO because of bad alignment\n");
1177 host->flags &= ~SDHCI_REQ_USE_DMA;
1178 break;
1179 }
1180 }
1181 }
1182 }
1183
1184 if (host->flags & SDHCI_REQ_USE_DMA) {
1185 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1186
1187 if (sg_cnt <= 0) {
1188 /*
1189 * This only happens when someone fed
1190 * us an invalid request.
1191 */
1192 WARN_ON(1);
1193 host->flags &= ~SDHCI_REQ_USE_DMA;
1194 } else if (host->flags & SDHCI_USE_ADMA) {
1195 sdhci_adma_table_pre(host, data, sg_cnt);
1196 sdhci_set_adma_addr(host, host->adma_addr);
1197 } else {
1198 WARN_ON(sg_cnt != 1);
1199 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1200 }
1201 }
1202
1203 sdhci_config_dma(host);
1204
1205 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1206 int flags;
1207
1208 flags = SG_MITER_ATOMIC;
1209 if (host->data->flags & MMC_DATA_READ)
1210 flags |= SG_MITER_TO_SG;
1211 else
1212 flags |= SG_MITER_FROM_SG;
1213 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1214 host->blocks = data->blocks;
1215 }
1216
1217 sdhci_set_transfer_irqs(host);
1218
1219 sdhci_set_block_info(host, data);
1220 }
1221
1222 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1223
sdhci_external_dma_init(struct sdhci_host * host)1224 static int sdhci_external_dma_init(struct sdhci_host *host)
1225 {
1226 int ret = 0;
1227 struct mmc_host *mmc = host->mmc;
1228
1229 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1230 if (IS_ERR(host->tx_chan)) {
1231 ret = PTR_ERR(host->tx_chan);
1232 if (ret != -EPROBE_DEFER)
1233 pr_warn("Failed to request TX DMA channel.\n");
1234 host->tx_chan = NULL;
1235 return ret;
1236 }
1237
1238 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1239 if (IS_ERR(host->rx_chan)) {
1240 if (host->tx_chan) {
1241 dma_release_channel(host->tx_chan);
1242 host->tx_chan = NULL;
1243 }
1244
1245 ret = PTR_ERR(host->rx_chan);
1246 if (ret != -EPROBE_DEFER)
1247 pr_warn("Failed to request RX DMA channel.\n");
1248 host->rx_chan = NULL;
1249 }
1250
1251 return ret;
1252 }
1253
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1254 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1255 struct mmc_data *data)
1256 {
1257 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1258 }
1259
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1260 static int sdhci_external_dma_setup(struct sdhci_host *host,
1261 struct mmc_command *cmd)
1262 {
1263 int ret, i;
1264 enum dma_transfer_direction dir;
1265 struct dma_async_tx_descriptor *desc;
1266 struct mmc_data *data = cmd->data;
1267 struct dma_chan *chan;
1268 struct dma_slave_config cfg;
1269 dma_cookie_t cookie;
1270 int sg_cnt;
1271
1272 if (!host->mapbase)
1273 return -EINVAL;
1274
1275 memset(&cfg, 0, sizeof(cfg));
1276 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1277 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1278 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1279 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1280 cfg.src_maxburst = data->blksz / 4;
1281 cfg.dst_maxburst = data->blksz / 4;
1282
1283 /* Sanity check: all the SG entries must be aligned by block size. */
1284 for (i = 0; i < data->sg_len; i++) {
1285 if ((data->sg + i)->length % data->blksz)
1286 return -EINVAL;
1287 }
1288
1289 chan = sdhci_external_dma_channel(host, data);
1290
1291 ret = dmaengine_slave_config(chan, &cfg);
1292 if (ret)
1293 return ret;
1294
1295 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1296 if (sg_cnt <= 0)
1297 return -EINVAL;
1298
1299 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1300 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1301 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1302 if (!desc)
1303 return -EINVAL;
1304
1305 desc->callback = NULL;
1306 desc->callback_param = NULL;
1307
1308 cookie = dmaengine_submit(desc);
1309 if (dma_submit_error(cookie))
1310 ret = cookie;
1311
1312 return ret;
1313 }
1314
sdhci_external_dma_release(struct sdhci_host * host)1315 static void sdhci_external_dma_release(struct sdhci_host *host)
1316 {
1317 if (host->tx_chan) {
1318 dma_release_channel(host->tx_chan);
1319 host->tx_chan = NULL;
1320 }
1321
1322 if (host->rx_chan) {
1323 dma_release_channel(host->rx_chan);
1324 host->rx_chan = NULL;
1325 }
1326
1327 sdhci_switch_external_dma(host, false);
1328 }
1329
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1330 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1331 struct mmc_command *cmd)
1332 {
1333 struct mmc_data *data = cmd->data;
1334
1335 sdhci_initialize_data(host, data);
1336
1337 host->flags |= SDHCI_REQ_USE_DMA;
1338 sdhci_set_transfer_irqs(host);
1339
1340 sdhci_set_block_info(host, data);
1341 }
1342
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1343 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1344 struct mmc_command *cmd)
1345 {
1346 if (!sdhci_external_dma_setup(host, cmd)) {
1347 __sdhci_external_dma_prepare_data(host, cmd);
1348 } else {
1349 sdhci_external_dma_release(host);
1350 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1351 mmc_hostname(host->mmc));
1352 sdhci_prepare_data(host, cmd);
1353 }
1354 }
1355
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1356 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1357 struct mmc_command *cmd)
1358 {
1359 struct dma_chan *chan;
1360
1361 if (!cmd->data)
1362 return;
1363
1364 chan = sdhci_external_dma_channel(host, cmd->data);
1365 if (chan)
1366 dma_async_issue_pending(chan);
1367 }
1368
1369 #else
1370
sdhci_external_dma_init(struct sdhci_host * host)1371 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1372 {
1373 return -EOPNOTSUPP;
1374 }
1375
sdhci_external_dma_release(struct sdhci_host * host)1376 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1377 {
1378 }
1379
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1380 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1381 struct mmc_command *cmd)
1382 {
1383 /* This should never happen */
1384 WARN_ON_ONCE(1);
1385 }
1386
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1387 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1388 struct mmc_command *cmd)
1389 {
1390 }
1391
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1392 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1393 struct mmc_data *data)
1394 {
1395 return NULL;
1396 }
1397
1398 #endif
1399
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1400 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1401 {
1402 host->use_external_dma = en;
1403 }
1404 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1405
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1406 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1407 struct mmc_request *mrq)
1408 {
1409 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1410 !mrq->cap_cmd_during_tfr;
1411 }
1412
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1413 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1414 struct mmc_request *mrq)
1415 {
1416 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1417 }
1418
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1419 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1420 struct mmc_request *mrq)
1421 {
1422 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1423 }
1424
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1425 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1426 struct mmc_command *cmd,
1427 u16 *mode)
1428 {
1429 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1430 (cmd->opcode != SD_IO_RW_EXTENDED);
1431 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1432 u16 ctrl2;
1433
1434 /*
1435 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1436 * Select' is recommended rather than use of 'Auto CMD12
1437 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1438 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1439 */
1440 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1441 (use_cmd12 || use_cmd23)) {
1442 *mode |= SDHCI_TRNS_AUTO_SEL;
1443
1444 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1445 if (use_cmd23)
1446 ctrl2 |= SDHCI_CMD23_ENABLE;
1447 else
1448 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1449 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1450
1451 return;
1452 }
1453
1454 /*
1455 * If we are sending CMD23, CMD12 never gets sent
1456 * on successful completion (so no Auto-CMD12).
1457 */
1458 if (use_cmd12)
1459 *mode |= SDHCI_TRNS_AUTO_CMD12;
1460 else if (use_cmd23)
1461 *mode |= SDHCI_TRNS_AUTO_CMD23;
1462 }
1463
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1464 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1465 struct mmc_command *cmd)
1466 {
1467 u16 mode = 0;
1468 struct mmc_data *data = cmd->data;
1469
1470 if (data == NULL) {
1471 if (host->quirks2 &
1472 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1473 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1474 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1475 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1476 } else {
1477 /* clear Auto CMD settings for no data CMDs */
1478 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1479 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1480 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1481 }
1482 return;
1483 }
1484
1485 WARN_ON(!host->data);
1486
1487 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1488 mode = SDHCI_TRNS_BLK_CNT_EN;
1489
1490 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1491 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1492 sdhci_auto_cmd_select(host, cmd, &mode);
1493 if (sdhci_auto_cmd23(host, cmd->mrq))
1494 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1495 }
1496
1497 if (data->flags & MMC_DATA_READ)
1498 mode |= SDHCI_TRNS_READ;
1499 if (host->flags & SDHCI_REQ_USE_DMA)
1500 mode |= SDHCI_TRNS_DMA;
1501
1502 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1503 }
1504
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1505 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1506 {
1507 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1508 ((mrq->cmd && mrq->cmd->error) ||
1509 (mrq->sbc && mrq->sbc->error) ||
1510 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1511 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1512 }
1513
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1514 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1515 {
1516 int i;
1517
1518 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1519 if (host->mrqs_done[i] == mrq) {
1520 WARN_ON(1);
1521 return;
1522 }
1523 }
1524
1525 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1526 if (!host->mrqs_done[i]) {
1527 host->mrqs_done[i] = mrq;
1528 break;
1529 }
1530 }
1531
1532 WARN_ON(i >= SDHCI_MAX_MRQS);
1533 }
1534
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1535 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1536 {
1537 if (host->cmd && host->cmd->mrq == mrq)
1538 host->cmd = NULL;
1539
1540 if (host->data_cmd && host->data_cmd->mrq == mrq)
1541 host->data_cmd = NULL;
1542
1543 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1544 host->deferred_cmd = NULL;
1545
1546 if (host->data && host->data->mrq == mrq)
1547 host->data = NULL;
1548
1549 if (sdhci_needs_reset(host, mrq))
1550 host->pending_reset = true;
1551
1552 sdhci_set_mrq_done(host, mrq);
1553
1554 sdhci_del_timer(host, mrq);
1555
1556 if (!sdhci_has_requests(host))
1557 sdhci_led_deactivate(host);
1558 }
1559
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1560 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1561 {
1562 __sdhci_finish_mrq(host, mrq);
1563
1564 queue_work(host->complete_wq, &host->complete_work);
1565 }
1566
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1567 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1568 {
1569 struct mmc_command *data_cmd = host->data_cmd;
1570 struct mmc_data *data = host->data;
1571
1572 host->data = NULL;
1573 host->data_cmd = NULL;
1574
1575 /*
1576 * The controller needs a reset of internal state machines upon error
1577 * conditions.
1578 */
1579 if (data->error) {
1580 if (!host->cmd || host->cmd == data_cmd)
1581 sdhci_reset_for(host, REQUEST_ERROR);
1582 else
1583 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
1584 }
1585
1586 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1587 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1588 sdhci_adma_table_post(host, data);
1589
1590 /*
1591 * The specification states that the block count register must
1592 * be updated, but it does not specify at what point in the
1593 * data flow. That makes the register entirely useless to read
1594 * back so we have to assume that nothing made it to the card
1595 * in the event of an error.
1596 */
1597 if (data->error)
1598 data->bytes_xfered = 0;
1599 else
1600 data->bytes_xfered = data->blksz * data->blocks;
1601
1602 /*
1603 * Need to send CMD12 if -
1604 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1605 * b) error in multiblock transfer
1606 */
1607 if (data->stop &&
1608 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1609 data->error)) {
1610 /*
1611 * 'cap_cmd_during_tfr' request must not use the command line
1612 * after mmc_command_done() has been called. It is upper layer's
1613 * responsibility to send the stop command if required.
1614 */
1615 if (data->mrq->cap_cmd_during_tfr) {
1616 __sdhci_finish_mrq(host, data->mrq);
1617 } else {
1618 /* Avoid triggering warning in sdhci_send_command() */
1619 host->cmd = NULL;
1620 if (!sdhci_send_command(host, data->stop)) {
1621 if (sw_data_timeout) {
1622 /*
1623 * This is anyway a sw data timeout, so
1624 * give up now.
1625 */
1626 data->stop->error = -EIO;
1627 __sdhci_finish_mrq(host, data->mrq);
1628 } else {
1629 WARN_ON(host->deferred_cmd);
1630 host->deferred_cmd = data->stop;
1631 }
1632 }
1633 }
1634 } else {
1635 __sdhci_finish_mrq(host, data->mrq);
1636 }
1637 }
1638
sdhci_finish_data(struct sdhci_host * host)1639 static void sdhci_finish_data(struct sdhci_host *host)
1640 {
1641 __sdhci_finish_data(host, false);
1642 }
1643
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1644 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1645 {
1646 int flags;
1647 u32 mask;
1648 unsigned long timeout;
1649
1650 WARN_ON(host->cmd);
1651
1652 /* Initially, a command has no error */
1653 cmd->error = 0;
1654
1655 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1656 cmd->opcode == MMC_STOP_TRANSMISSION)
1657 cmd->flags |= MMC_RSP_BUSY;
1658
1659 mask = SDHCI_CMD_INHIBIT;
1660 if (sdhci_data_line_cmd(cmd))
1661 mask |= SDHCI_DATA_INHIBIT;
1662
1663 /* We shouldn't wait for data inihibit for stop commands, even
1664 though they might use busy signaling */
1665 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1666 mask &= ~SDHCI_DATA_INHIBIT;
1667
1668 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1669 return false;
1670
1671 host->cmd = cmd;
1672 host->data_timeout = 0;
1673 if (sdhci_data_line_cmd(cmd)) {
1674 WARN_ON(host->data_cmd);
1675 host->data_cmd = cmd;
1676 sdhci_set_timeout(host, cmd);
1677 }
1678
1679 if (cmd->data) {
1680 if (host->use_external_dma)
1681 sdhci_external_dma_prepare_data(host, cmd);
1682 else
1683 sdhci_prepare_data(host, cmd);
1684 }
1685
1686 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1687
1688 sdhci_set_transfer_mode(host, cmd);
1689
1690 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1691 WARN_ONCE(1, "Unsupported response type!\n");
1692 /*
1693 * This does not happen in practice because 136-bit response
1694 * commands never have busy waiting, so rather than complicate
1695 * the error path, just remove busy waiting and continue.
1696 */
1697 cmd->flags &= ~MMC_RSP_BUSY;
1698 }
1699
1700 if (!(cmd->flags & MMC_RSP_PRESENT))
1701 flags = SDHCI_CMD_RESP_NONE;
1702 else if (cmd->flags & MMC_RSP_136)
1703 flags = SDHCI_CMD_RESP_LONG;
1704 else if (cmd->flags & MMC_RSP_BUSY)
1705 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1706 else
1707 flags = SDHCI_CMD_RESP_SHORT;
1708
1709 if (cmd->flags & MMC_RSP_CRC)
1710 flags |= SDHCI_CMD_CRC;
1711 if (cmd->flags & MMC_RSP_OPCODE)
1712 flags |= SDHCI_CMD_INDEX;
1713
1714 /* CMD19 is special in that the Data Present Select should be set */
1715 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1716 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1717 flags |= SDHCI_CMD_DATA;
1718
1719 timeout = jiffies;
1720 if (host->data_timeout)
1721 timeout += nsecs_to_jiffies(host->data_timeout);
1722 else if (!cmd->data && cmd->busy_timeout > 9000)
1723 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1724 else
1725 timeout += 10 * HZ;
1726 sdhci_mod_timer(host, cmd->mrq, timeout);
1727
1728 if (host->use_external_dma)
1729 sdhci_external_dma_pre_transfer(host, cmd);
1730
1731 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1732
1733 return true;
1734 }
1735
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1736 static bool sdhci_present_error(struct sdhci_host *host,
1737 struct mmc_command *cmd, bool present)
1738 {
1739 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1740 cmd->error = -ENOMEDIUM;
1741 return true;
1742 }
1743
1744 return false;
1745 }
1746
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1747 static bool sdhci_send_command_retry(struct sdhci_host *host,
1748 struct mmc_command *cmd,
1749 unsigned long flags)
1750 __releases(host->lock)
1751 __acquires(host->lock)
1752 {
1753 struct mmc_command *deferred_cmd = host->deferred_cmd;
1754 int timeout = 10; /* Approx. 10 ms */
1755 bool present;
1756
1757 while (!sdhci_send_command(host, cmd)) {
1758 if (!timeout--) {
1759 pr_err("%s: Controller never released inhibit bit(s).\n",
1760 mmc_hostname(host->mmc));
1761 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1762 sdhci_dumpregs(host);
1763 cmd->error = -EIO;
1764 return false;
1765 }
1766
1767 spin_unlock_irqrestore(&host->lock, flags);
1768
1769 usleep_range(1000, 1250);
1770
1771 present = host->mmc->ops->get_cd(host->mmc);
1772
1773 spin_lock_irqsave(&host->lock, flags);
1774
1775 /* A deferred command might disappear, handle that */
1776 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1777 return true;
1778
1779 if (sdhci_present_error(host, cmd, present))
1780 return false;
1781 }
1782
1783 if (cmd == host->deferred_cmd)
1784 host->deferred_cmd = NULL;
1785
1786 return true;
1787 }
1788
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1789 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1790 {
1791 int i, reg;
1792
1793 for (i = 0; i < 4; i++) {
1794 reg = SDHCI_RESPONSE + (3 - i) * 4;
1795 cmd->resp[i] = sdhci_readl(host, reg);
1796 }
1797
1798 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1799 return;
1800
1801 /* CRC is stripped so we need to do some shifting */
1802 for (i = 0; i < 4; i++) {
1803 cmd->resp[i] <<= 8;
1804 if (i != 3)
1805 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1806 }
1807 }
1808
sdhci_finish_command(struct sdhci_host * host)1809 static void sdhci_finish_command(struct sdhci_host *host)
1810 {
1811 struct mmc_command *cmd = host->cmd;
1812
1813 host->cmd = NULL;
1814
1815 if (cmd->flags & MMC_RSP_PRESENT) {
1816 if (cmd->flags & MMC_RSP_136) {
1817 sdhci_read_rsp_136(host, cmd);
1818 } else {
1819 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1820 }
1821 }
1822
1823 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1824 mmc_command_done(host->mmc, cmd->mrq);
1825
1826 /*
1827 * The host can send and interrupt when the busy state has
1828 * ended, allowing us to wait without wasting CPU cycles.
1829 * The busy signal uses DAT0 so this is similar to waiting
1830 * for data to complete.
1831 *
1832 * Note: The 1.0 specification is a bit ambiguous about this
1833 * feature so there might be some problems with older
1834 * controllers.
1835 */
1836 if (cmd->flags & MMC_RSP_BUSY) {
1837 if (cmd->data) {
1838 DBG("Cannot wait for busy signal when also doing a data transfer");
1839 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1840 cmd == host->data_cmd) {
1841 /* Command complete before busy is ended */
1842 return;
1843 }
1844 }
1845
1846 /* Finished CMD23, now send actual command. */
1847 if (cmd == cmd->mrq->sbc) {
1848 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1849 WARN_ON(host->deferred_cmd);
1850 host->deferred_cmd = cmd->mrq->cmd;
1851 }
1852 } else {
1853
1854 /* Processed actual command. */
1855 if (host->data && host->data_early)
1856 sdhci_finish_data(host);
1857
1858 if (!cmd->data)
1859 __sdhci_finish_mrq(host, cmd->mrq);
1860 }
1861 }
1862
sdhci_get_preset_value(struct sdhci_host * host)1863 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1864 {
1865 u16 preset = 0;
1866
1867 switch (host->timing) {
1868 case MMC_TIMING_MMC_HS:
1869 case MMC_TIMING_SD_HS:
1870 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1871 break;
1872 case MMC_TIMING_UHS_SDR12:
1873 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1874 break;
1875 case MMC_TIMING_UHS_SDR25:
1876 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1877 break;
1878 case MMC_TIMING_UHS_SDR50:
1879 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1880 break;
1881 case MMC_TIMING_UHS_SDR104:
1882 case MMC_TIMING_MMC_HS200:
1883 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1884 break;
1885 case MMC_TIMING_UHS_DDR50:
1886 case MMC_TIMING_MMC_DDR52:
1887 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1888 break;
1889 case MMC_TIMING_MMC_HS400:
1890 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1891 break;
1892 default:
1893 pr_warn("%s: Invalid UHS-I mode selected\n",
1894 mmc_hostname(host->mmc));
1895 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1896 break;
1897 }
1898 return preset;
1899 }
1900
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1901 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1902 unsigned int *actual_clock)
1903 {
1904 int div = 0; /* Initialized for compiler warning */
1905 int real_div = div, clk_mul = 1;
1906 u16 clk = 0;
1907 bool switch_base_clk = false;
1908
1909 if (host->version >= SDHCI_SPEC_300) {
1910 if (host->preset_enabled) {
1911 u16 pre_val;
1912
1913 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1914 pre_val = sdhci_get_preset_value(host);
1915 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1916 if (host->clk_mul &&
1917 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1918 clk = SDHCI_PROG_CLOCK_MODE;
1919 real_div = div + 1;
1920 clk_mul = host->clk_mul;
1921 } else {
1922 real_div = max_t(int, 1, div << 1);
1923 }
1924 goto clock_set;
1925 }
1926
1927 /*
1928 * Check if the Host Controller supports Programmable Clock
1929 * Mode.
1930 */
1931 if (host->clk_mul) {
1932 for (div = 1; div <= 1024; div++) {
1933 if ((host->max_clk * host->clk_mul / div)
1934 <= clock)
1935 break;
1936 }
1937 if ((host->max_clk * host->clk_mul / div) <= clock) {
1938 /*
1939 * Set Programmable Clock Mode in the Clock
1940 * Control register.
1941 */
1942 clk = SDHCI_PROG_CLOCK_MODE;
1943 real_div = div;
1944 clk_mul = host->clk_mul;
1945 div--;
1946 } else {
1947 /*
1948 * Divisor can be too small to reach clock
1949 * speed requirement. Then use the base clock.
1950 */
1951 switch_base_clk = true;
1952 }
1953 }
1954
1955 if (!host->clk_mul || switch_base_clk) {
1956 /* Version 3.00 divisors must be a multiple of 2. */
1957 if (host->max_clk <= clock)
1958 div = 1;
1959 else {
1960 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1961 div += 2) {
1962 if ((host->max_clk / div) <= clock)
1963 break;
1964 }
1965 }
1966 real_div = div;
1967 div >>= 1;
1968 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1969 && !div && host->max_clk <= 25000000)
1970 div = 1;
1971 }
1972 } else {
1973 /* Version 2.00 divisors must be a power of 2. */
1974 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1975 if ((host->max_clk / div) <= clock)
1976 break;
1977 }
1978 real_div = div;
1979 div >>= 1;
1980 }
1981
1982 clock_set:
1983 if (real_div)
1984 *actual_clock = (host->max_clk * clk_mul) / real_div;
1985 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1986 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1987 << SDHCI_DIVIDER_HI_SHIFT;
1988
1989 return clk;
1990 }
1991 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1992
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1993 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1994 {
1995 ktime_t timeout;
1996
1997 clk |= SDHCI_CLOCK_INT_EN;
1998 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1999
2000 /* Wait max 150 ms */
2001 timeout = ktime_add_ms(ktime_get(), 150);
2002 while (1) {
2003 bool timedout = ktime_after(ktime_get(), timeout);
2004
2005 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2006 if (clk & SDHCI_CLOCK_INT_STABLE)
2007 break;
2008 if (timedout) {
2009 pr_err("%s: Internal clock never stabilised.\n",
2010 mmc_hostname(host->mmc));
2011 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
2012 sdhci_dumpregs(host);
2013 return;
2014 }
2015 udelay(10);
2016 }
2017
2018 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
2019 clk |= SDHCI_CLOCK_PLL_EN;
2020 clk &= ~SDHCI_CLOCK_INT_STABLE;
2021 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2022
2023 /* Wait max 150 ms */
2024 timeout = ktime_add_ms(ktime_get(), 150);
2025 while (1) {
2026 bool timedout = ktime_after(ktime_get(), timeout);
2027
2028 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2029 if (clk & SDHCI_CLOCK_INT_STABLE)
2030 break;
2031 if (timedout) {
2032 pr_err("%s: PLL clock never stabilised.\n",
2033 mmc_hostname(host->mmc));
2034 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
2035 sdhci_dumpregs(host);
2036 return;
2037 }
2038 udelay(10);
2039 }
2040 }
2041
2042 clk |= SDHCI_CLOCK_CARD_EN;
2043 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2044 }
2045 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2046
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)2047 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2048 {
2049 u16 clk;
2050
2051 host->mmc->actual_clock = 0;
2052
2053 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2054
2055 if (clock == 0)
2056 return;
2057
2058 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2059 sdhci_enable_clk(host, clk);
2060 }
2061 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2062
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2063 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2064 unsigned short vdd)
2065 {
2066 struct mmc_host *mmc = host->mmc;
2067
2068 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2069
2070 if (mode != MMC_POWER_OFF)
2071 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2072 else
2073 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2074 }
2075
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2076 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2077 unsigned short vdd)
2078 {
2079 u8 pwr = 0;
2080
2081 if (mode != MMC_POWER_OFF) {
2082 switch (1 << vdd) {
2083 case MMC_VDD_165_195:
2084 /*
2085 * Without a regulator, SDHCI does not support 2.0v
2086 * so we only get here if the driver deliberately
2087 * added the 2.0v range to ocr_avail. Map it to 1.8v
2088 * for the purpose of turning on the power.
2089 */
2090 case MMC_VDD_20_21:
2091 pwr = SDHCI_POWER_180;
2092 break;
2093 case MMC_VDD_29_30:
2094 case MMC_VDD_30_31:
2095 pwr = SDHCI_POWER_300;
2096 break;
2097 case MMC_VDD_32_33:
2098 case MMC_VDD_33_34:
2099 /*
2100 * 3.4 ~ 3.6V are valid only for those platforms where it's
2101 * known that the voltage range is supported by hardware.
2102 */
2103 case MMC_VDD_34_35:
2104 case MMC_VDD_35_36:
2105 pwr = SDHCI_POWER_330;
2106 break;
2107 default:
2108 WARN(1, "%s: Invalid vdd %#x\n",
2109 mmc_hostname(host->mmc), vdd);
2110 break;
2111 }
2112 }
2113
2114 if (host->pwr == pwr)
2115 return;
2116
2117 host->pwr = pwr;
2118
2119 if (pwr == 0) {
2120 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2121 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2122 sdhci_runtime_pm_bus_off(host);
2123 } else {
2124 /*
2125 * Spec says that we should clear the power reg before setting
2126 * a new value. Some controllers don't seem to like this though.
2127 */
2128 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2129 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2130
2131 /*
2132 * At least the Marvell CaFe chip gets confused if we set the
2133 * voltage and set turn on power at the same time, so set the
2134 * voltage first.
2135 */
2136 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2137 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2138
2139 pwr |= SDHCI_POWER_ON;
2140
2141 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2142
2143 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2144 sdhci_runtime_pm_bus_on(host);
2145
2146 /*
2147 * Some controllers need an extra 10ms delay of 10ms before
2148 * they can apply clock after applying power
2149 */
2150 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2151 mdelay(10);
2152 }
2153 }
2154 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2155
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2156 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2157 unsigned short vdd)
2158 {
2159 if (IS_ERR(host->mmc->supply.vmmc))
2160 sdhci_set_power_noreg(host, mode, vdd);
2161 else
2162 sdhci_set_power_reg(host, mode, vdd);
2163 }
2164 EXPORT_SYMBOL_GPL(sdhci_set_power);
2165
2166 /*
2167 * Some controllers need to configure a valid bus voltage on their power
2168 * register regardless of whether an external regulator is taking care of power
2169 * supply. This helper function takes care of it if set as the controller's
2170 * sdhci_ops.set_power callback.
2171 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2172 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2173 unsigned char mode,
2174 unsigned short vdd)
2175 {
2176 if (!IS_ERR(host->mmc->supply.vmmc)) {
2177 struct mmc_host *mmc = host->mmc;
2178
2179 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2180 }
2181 sdhci_set_power_noreg(host, mode, vdd);
2182 }
2183 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2184
2185 /*****************************************************************************\
2186 * *
2187 * MMC callbacks *
2188 * *
2189 \*****************************************************************************/
2190
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2191 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2192 {
2193 struct sdhci_host *host = mmc_priv(mmc);
2194 struct mmc_command *cmd;
2195 unsigned long flags;
2196 bool present;
2197
2198 /* Firstly check card presence */
2199 present = mmc->ops->get_cd(mmc);
2200
2201 spin_lock_irqsave(&host->lock, flags);
2202
2203 sdhci_led_activate(host);
2204
2205 if (sdhci_present_error(host, mrq->cmd, present))
2206 goto out_finish;
2207
2208 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2209
2210 if (!sdhci_send_command_retry(host, cmd, flags))
2211 goto out_finish;
2212
2213 spin_unlock_irqrestore(&host->lock, flags);
2214
2215 return;
2216
2217 out_finish:
2218 sdhci_finish_mrq(host, mrq);
2219 spin_unlock_irqrestore(&host->lock, flags);
2220 }
2221 EXPORT_SYMBOL_GPL(sdhci_request);
2222
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2223 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2224 {
2225 struct sdhci_host *host = mmc_priv(mmc);
2226 struct mmc_command *cmd;
2227 unsigned long flags;
2228 int ret = 0;
2229
2230 spin_lock_irqsave(&host->lock, flags);
2231
2232 if (sdhci_present_error(host, mrq->cmd, true)) {
2233 sdhci_finish_mrq(host, mrq);
2234 goto out_finish;
2235 }
2236
2237 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2238
2239 /*
2240 * The HSQ may send a command in interrupt context without polling
2241 * the busy signaling, which means we should return BUSY if controller
2242 * has not released inhibit bits to allow HSQ trying to send request
2243 * again in non-atomic context. So we should not finish this request
2244 * here.
2245 */
2246 if (!sdhci_send_command(host, cmd))
2247 ret = -EBUSY;
2248 else
2249 sdhci_led_activate(host);
2250
2251 out_finish:
2252 spin_unlock_irqrestore(&host->lock, flags);
2253 return ret;
2254 }
2255 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2256
sdhci_set_bus_width(struct sdhci_host * host,int width)2257 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2258 {
2259 u8 ctrl;
2260
2261 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2262 if (width == MMC_BUS_WIDTH_8) {
2263 ctrl &= ~SDHCI_CTRL_4BITBUS;
2264 ctrl |= SDHCI_CTRL_8BITBUS;
2265 } else {
2266 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2267 ctrl &= ~SDHCI_CTRL_8BITBUS;
2268 if (width == MMC_BUS_WIDTH_4)
2269 ctrl |= SDHCI_CTRL_4BITBUS;
2270 else
2271 ctrl &= ~SDHCI_CTRL_4BITBUS;
2272 }
2273 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2274 }
2275 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2276
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2277 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2278 {
2279 u16 ctrl_2;
2280
2281 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2282 /* Select Bus Speed Mode for host */
2283 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2284 if ((timing == MMC_TIMING_MMC_HS200) ||
2285 (timing == MMC_TIMING_UHS_SDR104))
2286 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2287 else if (timing == MMC_TIMING_UHS_SDR12)
2288 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2289 else if (timing == MMC_TIMING_UHS_SDR25)
2290 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2291 else if (timing == MMC_TIMING_UHS_SDR50)
2292 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2293 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2294 (timing == MMC_TIMING_MMC_DDR52))
2295 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2296 else if (timing == MMC_TIMING_MMC_HS400)
2297 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2298 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2299 }
2300 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2301
sdhci_timing_has_preset(unsigned char timing)2302 static bool sdhci_timing_has_preset(unsigned char timing)
2303 {
2304 switch (timing) {
2305 case MMC_TIMING_UHS_SDR12:
2306 case MMC_TIMING_UHS_SDR25:
2307 case MMC_TIMING_UHS_SDR50:
2308 case MMC_TIMING_UHS_SDR104:
2309 case MMC_TIMING_UHS_DDR50:
2310 case MMC_TIMING_MMC_DDR52:
2311 return true;
2312 };
2313 return false;
2314 }
2315
sdhci_preset_needed(struct sdhci_host * host,unsigned char timing)2316 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2317 {
2318 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2319 sdhci_timing_has_preset(timing);
2320 }
2321
sdhci_presetable_values_change(struct sdhci_host * host,struct mmc_ios * ios)2322 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2323 {
2324 /*
2325 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2326 * Frequency. Check if preset values need to be enabled, or the Driver
2327 * Strength needs updating. Note, clock changes are handled separately.
2328 */
2329 return !host->preset_enabled &&
2330 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2331 }
2332
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2333 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2334 {
2335 struct sdhci_host *host = mmc_priv(mmc);
2336 bool reinit_uhs = host->reinit_uhs;
2337 bool turning_on_clk = false;
2338 u8 ctrl;
2339
2340 host->reinit_uhs = false;
2341
2342 if (ios->power_mode == MMC_POWER_UNDEFINED)
2343 return;
2344
2345 if (host->flags & SDHCI_DEVICE_DEAD) {
2346 if (!IS_ERR(mmc->supply.vmmc) &&
2347 ios->power_mode == MMC_POWER_OFF)
2348 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2349 return;
2350 }
2351
2352 /*
2353 * Reset the chip on each power off.
2354 * Should clear out any weird states.
2355 */
2356 if (ios->power_mode == MMC_POWER_OFF) {
2357 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2358 sdhci_reinit(host);
2359 }
2360
2361 if (host->version >= SDHCI_SPEC_300 &&
2362 (ios->power_mode == MMC_POWER_UP) &&
2363 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2364 sdhci_enable_preset_value(host, false);
2365
2366 if (!ios->clock || ios->clock != host->clock) {
2367 turning_on_clk = ios->clock && !host->clock;
2368
2369 host->ops->set_clock(host, ios->clock);
2370 host->clock = ios->clock;
2371
2372 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2373 host->clock) {
2374 host->timeout_clk = mmc->actual_clock ?
2375 mmc->actual_clock / 1000 :
2376 host->clock / 1000;
2377 mmc->max_busy_timeout =
2378 host->ops->get_max_timeout_count ?
2379 host->ops->get_max_timeout_count(host) :
2380 1 << 27;
2381 mmc->max_busy_timeout /= host->timeout_clk;
2382 }
2383 }
2384
2385 if (host->ops->set_power)
2386 host->ops->set_power(host, ios->power_mode, ios->vdd);
2387 else
2388 sdhci_set_power(host, ios->power_mode, ios->vdd);
2389
2390 if (host->ops->platform_send_init_74_clocks)
2391 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2392
2393 host->ops->set_bus_width(host, ios->bus_width);
2394
2395 /*
2396 * Special case to avoid multiple clock changes during voltage
2397 * switching.
2398 */
2399 if (!reinit_uhs &&
2400 turning_on_clk &&
2401 host->timing == ios->timing &&
2402 host->version >= SDHCI_SPEC_300 &&
2403 !sdhci_presetable_values_change(host, ios))
2404 return;
2405
2406 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2407
2408 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2409 if (ios->timing == MMC_TIMING_SD_HS ||
2410 ios->timing == MMC_TIMING_MMC_HS ||
2411 ios->timing == MMC_TIMING_MMC_HS400 ||
2412 ios->timing == MMC_TIMING_MMC_HS200 ||
2413 ios->timing == MMC_TIMING_MMC_DDR52 ||
2414 ios->timing == MMC_TIMING_UHS_SDR50 ||
2415 ios->timing == MMC_TIMING_UHS_SDR104 ||
2416 ios->timing == MMC_TIMING_UHS_DDR50 ||
2417 ios->timing == MMC_TIMING_UHS_SDR25)
2418 ctrl |= SDHCI_CTRL_HISPD;
2419 else
2420 ctrl &= ~SDHCI_CTRL_HISPD;
2421 }
2422
2423 if (host->version >= SDHCI_SPEC_300) {
2424 u16 clk, ctrl_2;
2425
2426 if (!host->preset_enabled) {
2427 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2428 /*
2429 * We only need to set Driver Strength if the
2430 * preset value enable is not set.
2431 */
2432 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2433 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2434 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2435 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2436 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2437 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2438 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2439 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2440 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2441 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2442 else {
2443 pr_warn("%s: invalid driver type, default to driver type B\n",
2444 mmc_hostname(mmc));
2445 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2446 }
2447
2448 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2449 host->drv_type = ios->drv_type;
2450 } else {
2451 /*
2452 * According to SDHC Spec v3.00, if the Preset Value
2453 * Enable in the Host Control 2 register is set, we
2454 * need to reset SD Clock Enable before changing High
2455 * Speed Enable to avoid generating clock gliches.
2456 */
2457
2458 /* Reset SD Clock Enable */
2459 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2460 clk &= ~SDHCI_CLOCK_CARD_EN;
2461 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2462
2463 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2464
2465 /* Re-enable SD Clock */
2466 host->ops->set_clock(host, host->clock);
2467 }
2468
2469 /* Reset SD Clock Enable */
2470 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2471 clk &= ~SDHCI_CLOCK_CARD_EN;
2472 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2473
2474 host->ops->set_uhs_signaling(host, ios->timing);
2475 host->timing = ios->timing;
2476
2477 if (sdhci_preset_needed(host, ios->timing)) {
2478 u16 preset;
2479
2480 sdhci_enable_preset_value(host, true);
2481 preset = sdhci_get_preset_value(host);
2482 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2483 preset);
2484 host->drv_type = ios->drv_type;
2485 }
2486
2487 /* Re-enable SD Clock */
2488 host->ops->set_clock(host, host->clock);
2489 } else
2490 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2491 }
2492 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2493
sdhci_get_cd(struct mmc_host * mmc)2494 static int sdhci_get_cd(struct mmc_host *mmc)
2495 {
2496 struct sdhci_host *host = mmc_priv(mmc);
2497 int gpio_cd = mmc_gpio_get_cd(mmc);
2498
2499 if (host->flags & SDHCI_DEVICE_DEAD)
2500 return 0;
2501
2502 /* If nonremovable, assume that the card is always present. */
2503 if (!mmc_card_is_removable(mmc))
2504 return 1;
2505
2506 /*
2507 * Try slot gpio detect, if defined it take precedence
2508 * over build in controller functionality
2509 */
2510 if (gpio_cd >= 0)
2511 return !!gpio_cd;
2512
2513 /* If polling, assume that the card is always present. */
2514 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2515 return 1;
2516
2517 /* Host native card detect */
2518 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2519 }
2520
sdhci_get_cd_nogpio(struct mmc_host * mmc)2521 int sdhci_get_cd_nogpio(struct mmc_host *mmc)
2522 {
2523 struct sdhci_host *host = mmc_priv(mmc);
2524 unsigned long flags;
2525 int ret = 0;
2526
2527 spin_lock_irqsave(&host->lock, flags);
2528
2529 if (host->flags & SDHCI_DEVICE_DEAD)
2530 goto out;
2531
2532 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2533 out:
2534 spin_unlock_irqrestore(&host->lock, flags);
2535
2536 return ret;
2537 }
2538 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
2539
sdhci_check_ro(struct sdhci_host * host)2540 static int sdhci_check_ro(struct sdhci_host *host)
2541 {
2542 unsigned long flags;
2543 int is_readonly;
2544
2545 spin_lock_irqsave(&host->lock, flags);
2546
2547 if (host->flags & SDHCI_DEVICE_DEAD)
2548 is_readonly = 0;
2549 else if (host->ops->get_ro)
2550 is_readonly = host->ops->get_ro(host);
2551 else if (mmc_can_gpio_ro(host->mmc))
2552 is_readonly = mmc_gpio_get_ro(host->mmc);
2553 else
2554 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2555 & SDHCI_WRITE_PROTECT);
2556
2557 spin_unlock_irqrestore(&host->lock, flags);
2558
2559 /* This quirk needs to be replaced by a callback-function later */
2560 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2561 !is_readonly : is_readonly;
2562 }
2563
2564 #define SAMPLE_COUNT 5
2565
sdhci_get_ro(struct mmc_host * mmc)2566 static int sdhci_get_ro(struct mmc_host *mmc)
2567 {
2568 struct sdhci_host *host = mmc_priv(mmc);
2569 int i, ro_count;
2570
2571 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2572 return sdhci_check_ro(host);
2573
2574 ro_count = 0;
2575 for (i = 0; i < SAMPLE_COUNT; i++) {
2576 if (sdhci_check_ro(host)) {
2577 if (++ro_count > SAMPLE_COUNT / 2)
2578 return 1;
2579 }
2580 msleep(30);
2581 }
2582 return 0;
2583 }
2584
sdhci_hw_reset(struct mmc_host * mmc)2585 static void sdhci_hw_reset(struct mmc_host *mmc)
2586 {
2587 struct sdhci_host *host = mmc_priv(mmc);
2588
2589 if (host->ops && host->ops->hw_reset)
2590 host->ops->hw_reset(host);
2591 }
2592
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2593 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2594 {
2595 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2596 if (enable)
2597 host->ier |= SDHCI_INT_CARD_INT;
2598 else
2599 host->ier &= ~SDHCI_INT_CARD_INT;
2600
2601 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2602 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2603 }
2604 }
2605
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2606 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2607 {
2608 struct sdhci_host *host = mmc_priv(mmc);
2609 unsigned long flags;
2610
2611 if (enable)
2612 pm_runtime_get_noresume(mmc_dev(mmc));
2613
2614 spin_lock_irqsave(&host->lock, flags);
2615 sdhci_enable_sdio_irq_nolock(host, enable);
2616 spin_unlock_irqrestore(&host->lock, flags);
2617
2618 if (!enable)
2619 pm_runtime_put_noidle(mmc_dev(mmc));
2620 }
2621 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2622
sdhci_ack_sdio_irq(struct mmc_host * mmc)2623 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2624 {
2625 struct sdhci_host *host = mmc_priv(mmc);
2626 unsigned long flags;
2627
2628 spin_lock_irqsave(&host->lock, flags);
2629 sdhci_enable_sdio_irq_nolock(host, true);
2630 spin_unlock_irqrestore(&host->lock, flags);
2631 }
2632
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2633 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2634 struct mmc_ios *ios)
2635 {
2636 struct sdhci_host *host = mmc_priv(mmc);
2637 u16 ctrl;
2638 int ret;
2639
2640 /*
2641 * Signal Voltage Switching is only applicable for Host Controllers
2642 * v3.00 and above.
2643 */
2644 if (host->version < SDHCI_SPEC_300)
2645 return 0;
2646
2647 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2648
2649 switch (ios->signal_voltage) {
2650 case MMC_SIGNAL_VOLTAGE_330:
2651 if (!(host->flags & SDHCI_SIGNALING_330))
2652 return -EINVAL;
2653 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2654 ctrl &= ~SDHCI_CTRL_VDD_180;
2655 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2656
2657 if (!IS_ERR(mmc->supply.vqmmc)) {
2658 ret = mmc_regulator_set_vqmmc(mmc, ios);
2659 if (ret < 0) {
2660 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2661 mmc_hostname(mmc));
2662 return -EIO;
2663 }
2664 }
2665 /* Wait for 5ms */
2666 usleep_range(5000, 5500);
2667
2668 /* 3.3V regulator output should be stable within 5 ms */
2669 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2670 if (!(ctrl & SDHCI_CTRL_VDD_180))
2671 return 0;
2672
2673 pr_warn("%s: 3.3V regulator output did not become stable\n",
2674 mmc_hostname(mmc));
2675
2676 return -EAGAIN;
2677 case MMC_SIGNAL_VOLTAGE_180:
2678 if (!(host->flags & SDHCI_SIGNALING_180))
2679 return -EINVAL;
2680 if (!IS_ERR(mmc->supply.vqmmc)) {
2681 ret = mmc_regulator_set_vqmmc(mmc, ios);
2682 if (ret < 0) {
2683 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2684 mmc_hostname(mmc));
2685 return -EIO;
2686 }
2687 }
2688
2689 /*
2690 * Enable 1.8V Signal Enable in the Host Control2
2691 * register
2692 */
2693 ctrl |= SDHCI_CTRL_VDD_180;
2694 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2695
2696 /* Some controller need to do more when switching */
2697 if (host->ops->voltage_switch)
2698 host->ops->voltage_switch(host);
2699
2700 /* 1.8V regulator output should be stable within 5 ms */
2701 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2702 if (ctrl & SDHCI_CTRL_VDD_180)
2703 return 0;
2704
2705 pr_warn("%s: 1.8V regulator output did not become stable\n",
2706 mmc_hostname(mmc));
2707
2708 return -EAGAIN;
2709 case MMC_SIGNAL_VOLTAGE_120:
2710 if (!(host->flags & SDHCI_SIGNALING_120))
2711 return -EINVAL;
2712 if (!IS_ERR(mmc->supply.vqmmc)) {
2713 ret = mmc_regulator_set_vqmmc(mmc, ios);
2714 if (ret < 0) {
2715 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2716 mmc_hostname(mmc));
2717 return -EIO;
2718 }
2719 }
2720 return 0;
2721 default:
2722 /* No signal voltage switch required */
2723 return 0;
2724 }
2725 }
2726 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2727
sdhci_card_busy(struct mmc_host * mmc)2728 static int sdhci_card_busy(struct mmc_host *mmc)
2729 {
2730 struct sdhci_host *host = mmc_priv(mmc);
2731 u32 present_state;
2732
2733 /* Check whether DAT[0] is 0 */
2734 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2735
2736 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2737 }
2738
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2739 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2740 {
2741 struct sdhci_host *host = mmc_priv(mmc);
2742 unsigned long flags;
2743
2744 spin_lock_irqsave(&host->lock, flags);
2745 host->flags |= SDHCI_HS400_TUNING;
2746 spin_unlock_irqrestore(&host->lock, flags);
2747
2748 return 0;
2749 }
2750
sdhci_start_tuning(struct sdhci_host * host)2751 void sdhci_start_tuning(struct sdhci_host *host)
2752 {
2753 u16 ctrl;
2754
2755 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2756 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2757 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2758 ctrl |= SDHCI_CTRL_TUNED_CLK;
2759 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2760
2761 /*
2762 * As per the Host Controller spec v3.00, tuning command
2763 * generates Buffer Read Ready interrupt, so enable that.
2764 *
2765 * Note: The spec clearly says that when tuning sequence
2766 * is being performed, the controller does not generate
2767 * interrupts other than Buffer Read Ready interrupt. But
2768 * to make sure we don't hit a controller bug, we _only_
2769 * enable Buffer Read Ready interrupt here.
2770 */
2771 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2772 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2773 }
2774 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2775
sdhci_end_tuning(struct sdhci_host * host)2776 void sdhci_end_tuning(struct sdhci_host *host)
2777 {
2778 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2779 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2780 }
2781 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2782
sdhci_reset_tuning(struct sdhci_host * host)2783 void sdhci_reset_tuning(struct sdhci_host *host)
2784 {
2785 u16 ctrl;
2786
2787 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2788 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2789 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2790 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2791 }
2792 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2793
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2794 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2795 {
2796 sdhci_reset_tuning(host);
2797
2798 sdhci_reset_for(host, TUNING_ABORT);
2799
2800 sdhci_end_tuning(host);
2801
2802 mmc_send_abort_tuning(host->mmc, opcode);
2803 }
2804 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2805
2806 /*
2807 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2808 * tuning command does not have a data payload (or rather the hardware does it
2809 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2810 * interrupt setup is different to other commands and there is no timeout
2811 * interrupt so special handling is needed.
2812 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2813 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2814 {
2815 struct mmc_host *mmc = host->mmc;
2816 struct mmc_command cmd = {};
2817 struct mmc_request mrq = {};
2818 unsigned long flags;
2819 u32 b = host->sdma_boundary;
2820
2821 spin_lock_irqsave(&host->lock, flags);
2822
2823 cmd.opcode = opcode;
2824 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2825 cmd.mrq = &mrq;
2826
2827 mrq.cmd = &cmd;
2828 /*
2829 * In response to CMD19, the card sends 64 bytes of tuning
2830 * block to the Host Controller. So we set the block size
2831 * to 64 here.
2832 */
2833 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2834 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2836 else
2837 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2838
2839 /*
2840 * The tuning block is sent by the card to the host controller.
2841 * So we set the TRNS_READ bit in the Transfer Mode register.
2842 * This also takes care of setting DMA Enable and Multi Block
2843 * Select in the same register to 0.
2844 */
2845 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2846
2847 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2848 spin_unlock_irqrestore(&host->lock, flags);
2849 host->tuning_done = 0;
2850 return;
2851 }
2852
2853 host->cmd = NULL;
2854
2855 sdhci_del_timer(host, &mrq);
2856
2857 host->tuning_done = 0;
2858
2859 spin_unlock_irqrestore(&host->lock, flags);
2860
2861 /* Wait for Buffer Read Ready interrupt */
2862 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2863 msecs_to_jiffies(50));
2864
2865 }
2866 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2867
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2868 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2869 {
2870 int i;
2871
2872 /*
2873 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2874 * of loops reaches tuning loop count.
2875 */
2876 for (i = 0; i < host->tuning_loop_count; i++) {
2877 u16 ctrl;
2878
2879 sdhci_send_tuning(host, opcode);
2880
2881 if (!host->tuning_done) {
2882 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2883 mmc_hostname(host->mmc));
2884 sdhci_abort_tuning(host, opcode);
2885 return -ETIMEDOUT;
2886 }
2887
2888 /* Spec does not require a delay between tuning cycles */
2889 if (host->tuning_delay > 0)
2890 mdelay(host->tuning_delay);
2891
2892 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2893 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2894 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2895 return 0; /* Success! */
2896 break;
2897 }
2898
2899 }
2900
2901 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2902 mmc_hostname(host->mmc));
2903 sdhci_reset_tuning(host);
2904 return -EAGAIN;
2905 }
2906
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2907 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2908 {
2909 struct sdhci_host *host = mmc_priv(mmc);
2910 int err = 0;
2911 unsigned int tuning_count = 0;
2912 bool hs400_tuning;
2913
2914 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2915
2916 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2917 tuning_count = host->tuning_count;
2918
2919 /*
2920 * The Host Controller needs tuning in case of SDR104 and DDR50
2921 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2922 * the Capabilities register.
2923 * If the Host Controller supports the HS200 mode then the
2924 * tuning function has to be executed.
2925 */
2926 switch (host->timing) {
2927 /* HS400 tuning is done in HS200 mode */
2928 case MMC_TIMING_MMC_HS400:
2929 err = -EINVAL;
2930 goto out;
2931
2932 case MMC_TIMING_MMC_HS200:
2933 /*
2934 * Periodic re-tuning for HS400 is not expected to be needed, so
2935 * disable it here.
2936 */
2937 if (hs400_tuning)
2938 tuning_count = 0;
2939 break;
2940
2941 case MMC_TIMING_UHS_SDR104:
2942 case MMC_TIMING_UHS_DDR50:
2943 break;
2944
2945 case MMC_TIMING_UHS_SDR50:
2946 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2947 break;
2948 fallthrough;
2949
2950 default:
2951 goto out;
2952 }
2953
2954 if (host->ops->platform_execute_tuning) {
2955 err = host->ops->platform_execute_tuning(host, opcode);
2956 goto out;
2957 }
2958
2959 mmc->retune_period = tuning_count;
2960
2961 if (host->tuning_delay < 0)
2962 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2963
2964 sdhci_start_tuning(host);
2965
2966 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2967
2968 sdhci_end_tuning(host);
2969 out:
2970 host->flags &= ~SDHCI_HS400_TUNING;
2971
2972 return err;
2973 }
2974 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2975
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2976 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2977 {
2978 /* Host Controller v3.00 defines preset value registers */
2979 if (host->version < SDHCI_SPEC_300)
2980 return;
2981
2982 /*
2983 * We only enable or disable Preset Value if they are not already
2984 * enabled or disabled respectively. Otherwise, we bail out.
2985 */
2986 if (host->preset_enabled != enable) {
2987 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2988
2989 if (enable)
2990 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2991 else
2992 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2993
2994 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2995
2996 if (enable)
2997 host->flags |= SDHCI_PV_ENABLED;
2998 else
2999 host->flags &= ~SDHCI_PV_ENABLED;
3000
3001 host->preset_enabled = enable;
3002 }
3003 }
3004
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)3005 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
3006 int err)
3007 {
3008 struct mmc_data *data = mrq->data;
3009
3010 if (data->host_cookie != COOKIE_UNMAPPED)
3011 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
3012 mmc_get_dma_dir(data));
3013
3014 data->host_cookie = COOKIE_UNMAPPED;
3015 }
3016
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)3017 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
3018 {
3019 struct sdhci_host *host = mmc_priv(mmc);
3020
3021 mrq->data->host_cookie = COOKIE_UNMAPPED;
3022
3023 /*
3024 * No pre-mapping in the pre hook if we're using the bounce buffer,
3025 * for that we would need two bounce buffers since one buffer is
3026 * in flight when this is getting called.
3027 */
3028 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
3029 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
3030 }
3031
sdhci_error_out_mrqs(struct sdhci_host * host,int err)3032 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
3033 {
3034 if (host->data_cmd) {
3035 host->data_cmd->error = err;
3036 sdhci_finish_mrq(host, host->data_cmd->mrq);
3037 }
3038
3039 if (host->cmd) {
3040 host->cmd->error = err;
3041 sdhci_finish_mrq(host, host->cmd->mrq);
3042 }
3043 }
3044
sdhci_card_event(struct mmc_host * mmc)3045 static void sdhci_card_event(struct mmc_host *mmc)
3046 {
3047 struct sdhci_host *host = mmc_priv(mmc);
3048 unsigned long flags;
3049 int present;
3050
3051 /* First check if client has provided their own card event */
3052 if (host->ops->card_event)
3053 host->ops->card_event(host);
3054
3055 present = mmc->ops->get_cd(mmc);
3056
3057 spin_lock_irqsave(&host->lock, flags);
3058
3059 /* Check sdhci_has_requests() first in case we are runtime suspended */
3060 if (sdhci_has_requests(host) && !present) {
3061 pr_err("%s: Card removed during transfer!\n",
3062 mmc_hostname(mmc));
3063 pr_err("%s: Resetting controller.\n",
3064 mmc_hostname(mmc));
3065
3066 sdhci_reset_for(host, CARD_REMOVED);
3067
3068 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3069 }
3070
3071 spin_unlock_irqrestore(&host->lock, flags);
3072 }
3073
3074 static const struct mmc_host_ops sdhci_ops = {
3075 .request = sdhci_request,
3076 .post_req = sdhci_post_req,
3077 .pre_req = sdhci_pre_req,
3078 .set_ios = sdhci_set_ios,
3079 .get_cd = sdhci_get_cd,
3080 .get_ro = sdhci_get_ro,
3081 .card_hw_reset = sdhci_hw_reset,
3082 .enable_sdio_irq = sdhci_enable_sdio_irq,
3083 .ack_sdio_irq = sdhci_ack_sdio_irq,
3084 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3085 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3086 .execute_tuning = sdhci_execute_tuning,
3087 .card_event = sdhci_card_event,
3088 .card_busy = sdhci_card_busy,
3089 };
3090
3091 /*****************************************************************************\
3092 * *
3093 * Request done *
3094 * *
3095 \*****************************************************************************/
3096
sdhci_request_done(struct sdhci_host * host)3097 static bool sdhci_request_done(struct sdhci_host *host)
3098 {
3099 unsigned long flags;
3100 struct mmc_request *mrq;
3101 int i;
3102
3103 spin_lock_irqsave(&host->lock, flags);
3104
3105 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3106 mrq = host->mrqs_done[i];
3107 if (mrq)
3108 break;
3109 }
3110
3111 if (!mrq) {
3112 spin_unlock_irqrestore(&host->lock, flags);
3113 return true;
3114 }
3115
3116 /*
3117 * The controller needs a reset of internal state machines
3118 * upon error conditions.
3119 */
3120 if (sdhci_needs_reset(host, mrq)) {
3121 /*
3122 * Do not finish until command and data lines are available for
3123 * reset. Note there can only be one other mrq, so it cannot
3124 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3125 * would both be null.
3126 */
3127 if (host->cmd || host->data_cmd) {
3128 spin_unlock_irqrestore(&host->lock, flags);
3129 return true;
3130 }
3131
3132 /* Some controllers need this kick or reset won't work here */
3133 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3134 /* This is to force an update */
3135 host->ops->set_clock(host, host->clock);
3136
3137 sdhci_reset_for(host, REQUEST_ERROR);
3138
3139 host->pending_reset = false;
3140 }
3141
3142 /*
3143 * Always unmap the data buffers if they were mapped by
3144 * sdhci_prepare_data() whenever we finish with a request.
3145 * This avoids leaking DMA mappings on error.
3146 */
3147 if (host->flags & SDHCI_REQ_USE_DMA) {
3148 struct mmc_data *data = mrq->data;
3149
3150 if (host->use_external_dma && data &&
3151 (mrq->cmd->error || data->error)) {
3152 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3153
3154 host->mrqs_done[i] = NULL;
3155 spin_unlock_irqrestore(&host->lock, flags);
3156 dmaengine_terminate_sync(chan);
3157 spin_lock_irqsave(&host->lock, flags);
3158 sdhci_set_mrq_done(host, mrq);
3159 }
3160
3161 if (data && data->host_cookie == COOKIE_MAPPED) {
3162 if (host->bounce_buffer) {
3163 /*
3164 * On reads, copy the bounced data into the
3165 * sglist
3166 */
3167 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3168 unsigned int length = data->bytes_xfered;
3169
3170 if (length > host->bounce_buffer_size) {
3171 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3172 mmc_hostname(host->mmc),
3173 host->bounce_buffer_size,
3174 data->bytes_xfered);
3175 /* Cap it down and continue */
3176 length = host->bounce_buffer_size;
3177 }
3178 dma_sync_single_for_cpu(
3179 mmc_dev(host->mmc),
3180 host->bounce_addr,
3181 host->bounce_buffer_size,
3182 DMA_FROM_DEVICE);
3183 sg_copy_from_buffer(data->sg,
3184 data->sg_len,
3185 host->bounce_buffer,
3186 length);
3187 } else {
3188 /* No copying, just switch ownership */
3189 dma_sync_single_for_cpu(
3190 mmc_dev(host->mmc),
3191 host->bounce_addr,
3192 host->bounce_buffer_size,
3193 mmc_get_dma_dir(data));
3194 }
3195 } else {
3196 /* Unmap the raw data */
3197 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3198 data->sg_len,
3199 mmc_get_dma_dir(data));
3200 }
3201 data->host_cookie = COOKIE_UNMAPPED;
3202 }
3203 }
3204
3205 host->mrqs_done[i] = NULL;
3206
3207 spin_unlock_irqrestore(&host->lock, flags);
3208
3209 if (host->ops->request_done)
3210 host->ops->request_done(host, mrq);
3211 else
3212 mmc_request_done(host->mmc, mrq);
3213
3214 return false;
3215 }
3216
sdhci_complete_work(struct work_struct * work)3217 static void sdhci_complete_work(struct work_struct *work)
3218 {
3219 struct sdhci_host *host = container_of(work, struct sdhci_host,
3220 complete_work);
3221
3222 while (!sdhci_request_done(host))
3223 ;
3224 }
3225
sdhci_timeout_timer(struct timer_list * t)3226 static void sdhci_timeout_timer(struct timer_list *t)
3227 {
3228 struct sdhci_host *host;
3229 unsigned long flags;
3230
3231 host = from_timer(host, t, timer);
3232
3233 spin_lock_irqsave(&host->lock, flags);
3234
3235 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3236 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3237 mmc_hostname(host->mmc));
3238 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3239 sdhci_dumpregs(host);
3240
3241 host->cmd->error = -ETIMEDOUT;
3242 sdhci_finish_mrq(host, host->cmd->mrq);
3243 }
3244
3245 spin_unlock_irqrestore(&host->lock, flags);
3246 }
3247
sdhci_timeout_data_timer(struct timer_list * t)3248 static void sdhci_timeout_data_timer(struct timer_list *t)
3249 {
3250 struct sdhci_host *host;
3251 unsigned long flags;
3252
3253 host = from_timer(host, t, data_timer);
3254
3255 spin_lock_irqsave(&host->lock, flags);
3256
3257 if (host->data || host->data_cmd ||
3258 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3259 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3260 mmc_hostname(host->mmc));
3261 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3262 sdhci_dumpregs(host);
3263
3264 if (host->data) {
3265 host->data->error = -ETIMEDOUT;
3266 __sdhci_finish_data(host, true);
3267 queue_work(host->complete_wq, &host->complete_work);
3268 } else if (host->data_cmd) {
3269 host->data_cmd->error = -ETIMEDOUT;
3270 sdhci_finish_mrq(host, host->data_cmd->mrq);
3271 } else {
3272 host->cmd->error = -ETIMEDOUT;
3273 sdhci_finish_mrq(host, host->cmd->mrq);
3274 }
3275 }
3276
3277 spin_unlock_irqrestore(&host->lock, flags);
3278 }
3279
3280 /*****************************************************************************\
3281 * *
3282 * Interrupt handling *
3283 * *
3284 \*****************************************************************************/
3285
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3286 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3287 {
3288 /* Handle auto-CMD12 error */
3289 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3290 struct mmc_request *mrq = host->data_cmd->mrq;
3291 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3292 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3293 SDHCI_INT_DATA_TIMEOUT :
3294 SDHCI_INT_DATA_CRC;
3295
3296 /* Treat auto-CMD12 error the same as data error */
3297 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3298 *intmask_p |= data_err_bit;
3299 return;
3300 }
3301 }
3302
3303 if (!host->cmd) {
3304 /*
3305 * SDHCI recovers from errors by resetting the cmd and data
3306 * circuits. Until that is done, there very well might be more
3307 * interrupts, so ignore them in that case.
3308 */
3309 if (host->pending_reset)
3310 return;
3311 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3312 mmc_hostname(host->mmc), (unsigned)intmask);
3313 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3314 sdhci_dumpregs(host);
3315 return;
3316 }
3317
3318 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3319 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3320 if (intmask & SDHCI_INT_TIMEOUT) {
3321 host->cmd->error = -ETIMEDOUT;
3322 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3323 } else {
3324 host->cmd->error = -EILSEQ;
3325 if (!mmc_op_tuning(host->cmd->opcode))
3326 sdhci_err_stats_inc(host, CMD_CRC);
3327 }
3328 /* Treat data command CRC error the same as data CRC error */
3329 if (host->cmd->data &&
3330 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3331 SDHCI_INT_CRC) {
3332 host->cmd = NULL;
3333 *intmask_p |= SDHCI_INT_DATA_CRC;
3334 return;
3335 }
3336
3337 __sdhci_finish_mrq(host, host->cmd->mrq);
3338 return;
3339 }
3340
3341 /* Handle auto-CMD23 error */
3342 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3343 struct mmc_request *mrq = host->cmd->mrq;
3344 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3345 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3346 -ETIMEDOUT :
3347 -EILSEQ;
3348
3349 sdhci_err_stats_inc(host, AUTO_CMD);
3350
3351 if (sdhci_auto_cmd23(host, mrq)) {
3352 mrq->sbc->error = err;
3353 __sdhci_finish_mrq(host, mrq);
3354 return;
3355 }
3356 }
3357
3358 if (intmask & SDHCI_INT_RESPONSE)
3359 sdhci_finish_command(host);
3360 }
3361
sdhci_adma_show_error(struct sdhci_host * host)3362 static void sdhci_adma_show_error(struct sdhci_host *host)
3363 {
3364 void *desc = host->adma_table;
3365 dma_addr_t dma = host->adma_addr;
3366
3367 sdhci_dumpregs(host);
3368
3369 while (true) {
3370 struct sdhci_adma2_64_desc *dma_desc = desc;
3371
3372 if (host->flags & SDHCI_USE_64_BIT_DMA)
3373 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3374 (unsigned long long)dma,
3375 le32_to_cpu(dma_desc->addr_hi),
3376 le32_to_cpu(dma_desc->addr_lo),
3377 le16_to_cpu(dma_desc->len),
3378 le16_to_cpu(dma_desc->cmd));
3379 else
3380 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3381 (unsigned long long)dma,
3382 le32_to_cpu(dma_desc->addr_lo),
3383 le16_to_cpu(dma_desc->len),
3384 le16_to_cpu(dma_desc->cmd));
3385
3386 desc += host->desc_sz;
3387 dma += host->desc_sz;
3388
3389 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3390 break;
3391 }
3392 }
3393
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3394 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3395 {
3396 u32 command;
3397
3398 /*
3399 * CMD19 generates _only_ Buffer Read Ready interrupt if
3400 * use sdhci_send_tuning.
3401 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3402 * If not, sdhci_transfer_pio will never be called, make the
3403 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3404 */
3405 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3406 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3407 if (command == MMC_SEND_TUNING_BLOCK ||
3408 command == MMC_SEND_TUNING_BLOCK_HS200) {
3409 host->tuning_done = 1;
3410 wake_up(&host->buf_ready_int);
3411 return;
3412 }
3413 }
3414
3415 if (!host->data) {
3416 struct mmc_command *data_cmd = host->data_cmd;
3417
3418 /*
3419 * The "data complete" interrupt is also used to
3420 * indicate that a busy state has ended. See comment
3421 * above in sdhci_cmd_irq().
3422 */
3423 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3424 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3425 host->data_cmd = NULL;
3426 data_cmd->error = -ETIMEDOUT;
3427 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3428 __sdhci_finish_mrq(host, data_cmd->mrq);
3429 return;
3430 }
3431 if (intmask & SDHCI_INT_DATA_END) {
3432 host->data_cmd = NULL;
3433 /*
3434 * Some cards handle busy-end interrupt
3435 * before the command completed, so make
3436 * sure we do things in the proper order.
3437 */
3438 if (host->cmd == data_cmd)
3439 return;
3440
3441 __sdhci_finish_mrq(host, data_cmd->mrq);
3442 return;
3443 }
3444 }
3445
3446 /*
3447 * SDHCI recovers from errors by resetting the cmd and data
3448 * circuits. Until that is done, there very well might be more
3449 * interrupts, so ignore them in that case.
3450 */
3451 if (host->pending_reset)
3452 return;
3453
3454 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3455 mmc_hostname(host->mmc), (unsigned)intmask);
3456 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3457 sdhci_dumpregs(host);
3458
3459 return;
3460 }
3461
3462 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3463 host->data->error = -ETIMEDOUT;
3464 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3465 } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3466 host->data->error = -EILSEQ;
3467 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3468 sdhci_err_stats_inc(host, DAT_CRC);
3469 } else if ((intmask & SDHCI_INT_DATA_CRC) &&
3470 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3471 != MMC_BUS_TEST_R) {
3472 host->data->error = -EILSEQ;
3473 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3474 sdhci_err_stats_inc(host, DAT_CRC);
3475 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3476 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3477 intmask);
3478 sdhci_adma_show_error(host);
3479 sdhci_err_stats_inc(host, ADMA);
3480 host->data->error = -EIO;
3481 if (host->ops->adma_workaround)
3482 host->ops->adma_workaround(host, intmask);
3483 }
3484
3485 if (host->data->error)
3486 sdhci_finish_data(host);
3487 else {
3488 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3489 sdhci_transfer_pio(host);
3490
3491 /*
3492 * We currently don't do anything fancy with DMA
3493 * boundaries, but as we can't disable the feature
3494 * we need to at least restart the transfer.
3495 *
3496 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3497 * should return a valid address to continue from, but as
3498 * some controllers are faulty, don't trust them.
3499 */
3500 if (intmask & SDHCI_INT_DMA_END) {
3501 dma_addr_t dmastart, dmanow;
3502
3503 dmastart = sdhci_sdma_address(host);
3504 dmanow = dmastart + host->data->bytes_xfered;
3505 /*
3506 * Force update to the next DMA block boundary.
3507 */
3508 dmanow = (dmanow &
3509 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3510 SDHCI_DEFAULT_BOUNDARY_SIZE;
3511 host->data->bytes_xfered = dmanow - dmastart;
3512 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3513 &dmastart, host->data->bytes_xfered, &dmanow);
3514 sdhci_set_sdma_addr(host, dmanow);
3515 }
3516
3517 if (intmask & SDHCI_INT_DATA_END) {
3518 if (host->cmd == host->data_cmd) {
3519 /*
3520 * Data managed to finish before the
3521 * command completed. Make sure we do
3522 * things in the proper order.
3523 */
3524 host->data_early = 1;
3525 } else {
3526 sdhci_finish_data(host);
3527 }
3528 }
3529 }
3530 }
3531
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3532 static inline bool sdhci_defer_done(struct sdhci_host *host,
3533 struct mmc_request *mrq)
3534 {
3535 struct mmc_data *data = mrq->data;
3536
3537 return host->pending_reset || host->always_defer_done ||
3538 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3539 data->host_cookie == COOKIE_MAPPED);
3540 }
3541
sdhci_irq(int irq,void * dev_id)3542 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3543 {
3544 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3545 irqreturn_t result = IRQ_NONE;
3546 struct sdhci_host *host = dev_id;
3547 u32 intmask, mask, unexpected = 0;
3548 int max_loops = 16;
3549 int i;
3550
3551 spin_lock(&host->lock);
3552
3553 if (host->runtime_suspended) {
3554 spin_unlock(&host->lock);
3555 return IRQ_NONE;
3556 }
3557
3558 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3559 if (!intmask || intmask == 0xffffffff) {
3560 result = IRQ_NONE;
3561 goto out;
3562 }
3563
3564 do {
3565 DBG("IRQ status 0x%08x\n", intmask);
3566
3567 if (host->ops->irq) {
3568 intmask = host->ops->irq(host, intmask);
3569 if (!intmask)
3570 goto cont;
3571 }
3572
3573 /* Clear selected interrupts. */
3574 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3575 SDHCI_INT_BUS_POWER);
3576 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3577
3578 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3579 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3580 SDHCI_CARD_PRESENT;
3581
3582 /*
3583 * There is a observation on i.mx esdhc. INSERT
3584 * bit will be immediately set again when it gets
3585 * cleared, if a card is inserted. We have to mask
3586 * the irq to prevent interrupt storm which will
3587 * freeze the system. And the REMOVE gets the
3588 * same situation.
3589 *
3590 * More testing are needed here to ensure it works
3591 * for other platforms though.
3592 */
3593 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3594 SDHCI_INT_CARD_REMOVE);
3595 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3596 SDHCI_INT_CARD_INSERT;
3597 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3598 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3599
3600 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3601 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3602
3603 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3604 SDHCI_INT_CARD_REMOVE);
3605 result = IRQ_WAKE_THREAD;
3606 }
3607
3608 if (intmask & SDHCI_INT_CMD_MASK)
3609 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3610
3611 if (intmask & SDHCI_INT_DATA_MASK)
3612 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3613
3614 if (intmask & SDHCI_INT_BUS_POWER)
3615 pr_err("%s: Card is consuming too much power!\n",
3616 mmc_hostname(host->mmc));
3617
3618 if (intmask & SDHCI_INT_RETUNE)
3619 mmc_retune_needed(host->mmc);
3620
3621 if ((intmask & SDHCI_INT_CARD_INT) &&
3622 (host->ier & SDHCI_INT_CARD_INT)) {
3623 sdhci_enable_sdio_irq_nolock(host, false);
3624 sdio_signal_irq(host->mmc);
3625 }
3626
3627 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3628 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3629 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3630 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3631
3632 if (intmask) {
3633 unexpected |= intmask;
3634 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3635 }
3636 cont:
3637 if (result == IRQ_NONE)
3638 result = IRQ_HANDLED;
3639
3640 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3641 } while (intmask && --max_loops);
3642
3643 /* Determine if mrqs can be completed immediately */
3644 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3645 struct mmc_request *mrq = host->mrqs_done[i];
3646
3647 if (!mrq)
3648 continue;
3649
3650 if (sdhci_defer_done(host, mrq)) {
3651 result = IRQ_WAKE_THREAD;
3652 } else {
3653 mrqs_done[i] = mrq;
3654 host->mrqs_done[i] = NULL;
3655 }
3656 }
3657 out:
3658 if (host->deferred_cmd)
3659 result = IRQ_WAKE_THREAD;
3660
3661 spin_unlock(&host->lock);
3662
3663 /* Process mrqs ready for immediate completion */
3664 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3665 if (!mrqs_done[i])
3666 continue;
3667
3668 if (host->ops->request_done)
3669 host->ops->request_done(host, mrqs_done[i]);
3670 else
3671 mmc_request_done(host->mmc, mrqs_done[i]);
3672 }
3673
3674 if (unexpected) {
3675 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3676 mmc_hostname(host->mmc), unexpected);
3677 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3678 sdhci_dumpregs(host);
3679 }
3680
3681 return result;
3682 }
3683
sdhci_thread_irq(int irq,void * dev_id)3684 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3685 {
3686 struct sdhci_host *host = dev_id;
3687 struct mmc_command *cmd;
3688 unsigned long flags;
3689 u32 isr;
3690
3691 while (!sdhci_request_done(host))
3692 ;
3693
3694 spin_lock_irqsave(&host->lock, flags);
3695
3696 isr = host->thread_isr;
3697 host->thread_isr = 0;
3698
3699 cmd = host->deferred_cmd;
3700 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3701 sdhci_finish_mrq(host, cmd->mrq);
3702
3703 spin_unlock_irqrestore(&host->lock, flags);
3704
3705 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3706 struct mmc_host *mmc = host->mmc;
3707
3708 mmc->ops->card_event(mmc);
3709 mmc_detect_change(mmc, msecs_to_jiffies(200));
3710 }
3711
3712 return IRQ_HANDLED;
3713 }
3714
3715 /*****************************************************************************\
3716 * *
3717 * Suspend/resume *
3718 * *
3719 \*****************************************************************************/
3720
3721 #ifdef CONFIG_PM
3722
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3723 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3724 {
3725 return mmc_card_is_removable(host->mmc) &&
3726 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3727 !mmc_can_gpio_cd(host->mmc);
3728 }
3729
3730 /*
3731 * To enable wakeup events, the corresponding events have to be enabled in
3732 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3733 * Table' in the SD Host Controller Standard Specification.
3734 * It is useless to restore SDHCI_INT_ENABLE state in
3735 * sdhci_disable_irq_wakeups() since it will be set by
3736 * sdhci_enable_card_detection() or sdhci_init().
3737 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3738 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3739 {
3740 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3741 SDHCI_WAKE_ON_INT;
3742 u32 irq_val = 0;
3743 u8 wake_val = 0;
3744 u8 val;
3745
3746 if (sdhci_cd_irq_can_wakeup(host)) {
3747 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3748 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3749 }
3750
3751 if (mmc_card_wake_sdio_irq(host->mmc)) {
3752 wake_val |= SDHCI_WAKE_ON_INT;
3753 irq_val |= SDHCI_INT_CARD_INT;
3754 }
3755
3756 if (!irq_val)
3757 return false;
3758
3759 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3760 val &= ~mask;
3761 val |= wake_val;
3762 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3763
3764 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3765
3766 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3767
3768 return host->irq_wake_enabled;
3769 }
3770
sdhci_disable_irq_wakeups(struct sdhci_host * host)3771 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3772 {
3773 u8 val;
3774 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3775 | SDHCI_WAKE_ON_INT;
3776
3777 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3778 val &= ~mask;
3779 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3780
3781 disable_irq_wake(host->irq);
3782
3783 host->irq_wake_enabled = false;
3784 }
3785
sdhci_suspend_host(struct sdhci_host * host)3786 int sdhci_suspend_host(struct sdhci_host *host)
3787 {
3788 sdhci_disable_card_detection(host);
3789
3790 mmc_retune_timer_stop(host->mmc);
3791
3792 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3793 !sdhci_enable_irq_wakeups(host)) {
3794 host->ier = 0;
3795 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3796 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3797 free_irq(host->irq, host);
3798 }
3799
3800 return 0;
3801 }
3802
3803 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3804
sdhci_resume_host(struct sdhci_host * host)3805 int sdhci_resume_host(struct sdhci_host *host)
3806 {
3807 struct mmc_host *mmc = host->mmc;
3808 int ret = 0;
3809
3810 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3811 if (host->ops->enable_dma)
3812 host->ops->enable_dma(host);
3813 }
3814
3815 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3816 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3817 /* Card keeps power but host controller does not */
3818 sdhci_init(host, 0);
3819 host->pwr = 0;
3820 host->clock = 0;
3821 host->reinit_uhs = true;
3822 mmc->ops->set_ios(mmc, &mmc->ios);
3823 } else {
3824 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3825 }
3826
3827 if (host->irq_wake_enabled) {
3828 sdhci_disable_irq_wakeups(host);
3829 } else {
3830 ret = request_threaded_irq(host->irq, sdhci_irq,
3831 sdhci_thread_irq, IRQF_SHARED,
3832 mmc_hostname(mmc), host);
3833 if (ret)
3834 return ret;
3835 }
3836
3837 sdhci_enable_card_detection(host);
3838
3839 return ret;
3840 }
3841
3842 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3843
sdhci_runtime_suspend_host(struct sdhci_host * host)3844 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3845 {
3846 unsigned long flags;
3847
3848 mmc_retune_timer_stop(host->mmc);
3849
3850 spin_lock_irqsave(&host->lock, flags);
3851 host->ier &= SDHCI_INT_CARD_INT;
3852 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3853 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3854 spin_unlock_irqrestore(&host->lock, flags);
3855
3856 synchronize_hardirq(host->irq);
3857
3858 spin_lock_irqsave(&host->lock, flags);
3859 host->runtime_suspended = true;
3860 spin_unlock_irqrestore(&host->lock, flags);
3861
3862 return 0;
3863 }
3864 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3865
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3866 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3867 {
3868 struct mmc_host *mmc = host->mmc;
3869 unsigned long flags;
3870 int host_flags = host->flags;
3871
3872 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3873 if (host->ops->enable_dma)
3874 host->ops->enable_dma(host);
3875 }
3876
3877 sdhci_init(host, soft_reset);
3878
3879 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3880 mmc->ios.power_mode != MMC_POWER_OFF) {
3881 /* Force clock and power re-program */
3882 host->pwr = 0;
3883 host->clock = 0;
3884 host->reinit_uhs = true;
3885 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3886 mmc->ops->set_ios(mmc, &mmc->ios);
3887
3888 if ((host_flags & SDHCI_PV_ENABLED) &&
3889 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3890 spin_lock_irqsave(&host->lock, flags);
3891 sdhci_enable_preset_value(host, true);
3892 spin_unlock_irqrestore(&host->lock, flags);
3893 }
3894
3895 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3896 mmc->ops->hs400_enhanced_strobe)
3897 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3898 }
3899
3900 spin_lock_irqsave(&host->lock, flags);
3901
3902 host->runtime_suspended = false;
3903
3904 /* Enable SDIO IRQ */
3905 if (sdio_irq_claimed(mmc))
3906 sdhci_enable_sdio_irq_nolock(host, true);
3907
3908 /* Enable Card Detection */
3909 sdhci_enable_card_detection(host);
3910
3911 spin_unlock_irqrestore(&host->lock, flags);
3912
3913 return 0;
3914 }
3915 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3916
3917 #endif /* CONFIG_PM */
3918
3919 /*****************************************************************************\
3920 * *
3921 * Command Queue Engine (CQE) helpers *
3922 * *
3923 \*****************************************************************************/
3924
sdhci_cqe_enable(struct mmc_host * mmc)3925 void sdhci_cqe_enable(struct mmc_host *mmc)
3926 {
3927 struct sdhci_host *host = mmc_priv(mmc);
3928 unsigned long flags;
3929 u8 ctrl;
3930
3931 spin_lock_irqsave(&host->lock, flags);
3932
3933 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3934 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3935 /*
3936 * Host from V4.10 supports ADMA3 DMA type.
3937 * ADMA3 performs integrated descriptor which is more suitable
3938 * for cmd queuing to fetch both command and transfer descriptors.
3939 */
3940 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3941 ctrl |= SDHCI_CTRL_ADMA3;
3942 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3943 ctrl |= SDHCI_CTRL_ADMA64;
3944 else
3945 ctrl |= SDHCI_CTRL_ADMA32;
3946 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3947
3948 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3949 SDHCI_BLOCK_SIZE);
3950
3951 /* Set maximum timeout */
3952 sdhci_set_timeout(host, NULL);
3953
3954 host->ier = host->cqe_ier;
3955
3956 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3957 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3958
3959 host->cqe_on = true;
3960
3961 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3962 mmc_hostname(mmc), host->ier,
3963 sdhci_readl(host, SDHCI_INT_STATUS));
3964
3965 spin_unlock_irqrestore(&host->lock, flags);
3966 }
3967 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3968
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3969 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3970 {
3971 struct sdhci_host *host = mmc_priv(mmc);
3972 unsigned long flags;
3973
3974 spin_lock_irqsave(&host->lock, flags);
3975
3976 sdhci_set_default_irqs(host);
3977
3978 host->cqe_on = false;
3979
3980 if (recovery)
3981 sdhci_reset_for(host, CQE_RECOVERY);
3982
3983 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3984 mmc_hostname(mmc), host->ier,
3985 sdhci_readl(host, SDHCI_INT_STATUS));
3986
3987 spin_unlock_irqrestore(&host->lock, flags);
3988 }
3989 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3990
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3991 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3992 int *data_error)
3993 {
3994 u32 mask;
3995
3996 if (!host->cqe_on)
3997 return false;
3998
3999 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
4000 *cmd_error = -EILSEQ;
4001 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
4002 sdhci_err_stats_inc(host, CMD_CRC);
4003 } else if (intmask & SDHCI_INT_TIMEOUT) {
4004 *cmd_error = -ETIMEDOUT;
4005 sdhci_err_stats_inc(host, CMD_TIMEOUT);
4006 } else
4007 *cmd_error = 0;
4008
4009 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
4010 *data_error = -EILSEQ;
4011 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
4012 sdhci_err_stats_inc(host, DAT_CRC);
4013 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
4014 *data_error = -ETIMEDOUT;
4015 sdhci_err_stats_inc(host, DAT_TIMEOUT);
4016 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
4017 *data_error = -EIO;
4018 sdhci_err_stats_inc(host, ADMA);
4019 } else
4020 *data_error = 0;
4021
4022 /* Clear selected interrupts. */
4023 mask = intmask & host->cqe_ier;
4024 sdhci_writel(host, mask, SDHCI_INT_STATUS);
4025
4026 if (intmask & SDHCI_INT_BUS_POWER)
4027 pr_err("%s: Card is consuming too much power!\n",
4028 mmc_hostname(host->mmc));
4029
4030 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
4031 if (intmask) {
4032 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
4033 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
4034 mmc_hostname(host->mmc), intmask);
4035 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
4036 sdhci_dumpregs(host);
4037 }
4038
4039 return true;
4040 }
4041 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
4042
4043 /*****************************************************************************\
4044 * *
4045 * Device allocation/registration *
4046 * *
4047 \*****************************************************************************/
4048
sdhci_alloc_host(struct device * dev,size_t priv_size)4049 struct sdhci_host *sdhci_alloc_host(struct device *dev,
4050 size_t priv_size)
4051 {
4052 struct mmc_host *mmc;
4053 struct sdhci_host *host;
4054
4055 WARN_ON(dev == NULL);
4056
4057 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4058 if (!mmc)
4059 return ERR_PTR(-ENOMEM);
4060
4061 host = mmc_priv(mmc);
4062 host->mmc = mmc;
4063 host->mmc_host_ops = sdhci_ops;
4064 mmc->ops = &host->mmc_host_ops;
4065
4066 host->flags = SDHCI_SIGNALING_330;
4067
4068 host->cqe_ier = SDHCI_CQE_INT_MASK;
4069 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4070
4071 host->tuning_delay = -1;
4072 host->tuning_loop_count = MAX_TUNING_LOOP;
4073
4074 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4075
4076 /*
4077 * The DMA table descriptor count is calculated as the maximum
4078 * number of segments times 2, to allow for an alignment
4079 * descriptor for each segment, plus 1 for a nop end descriptor.
4080 */
4081 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4082 host->max_adma = 65536;
4083
4084 host->max_timeout_count = 0xE;
4085
4086 return host;
4087 }
4088
4089 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4090
sdhci_set_dma_mask(struct sdhci_host * host)4091 static int sdhci_set_dma_mask(struct sdhci_host *host)
4092 {
4093 struct mmc_host *mmc = host->mmc;
4094 struct device *dev = mmc_dev(mmc);
4095 int ret = -EINVAL;
4096
4097 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4098 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4099
4100 /* Try 64-bit mask if hardware is capable of it */
4101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4102 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4103 if (ret) {
4104 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4105 mmc_hostname(mmc));
4106 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4107 }
4108 }
4109
4110 /* 32-bit mask as default & fallback */
4111 if (ret) {
4112 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4113 if (ret)
4114 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4115 mmc_hostname(mmc));
4116 }
4117
4118 return ret;
4119 }
4120
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)4121 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4122 const u32 *caps, const u32 *caps1)
4123 {
4124 u16 v;
4125 u64 dt_caps_mask = 0;
4126 u64 dt_caps = 0;
4127
4128 if (host->read_caps)
4129 return;
4130
4131 host->read_caps = true;
4132
4133 if (debug_quirks)
4134 host->quirks = debug_quirks;
4135
4136 if (debug_quirks2)
4137 host->quirks2 = debug_quirks2;
4138
4139 sdhci_reset_for_all(host);
4140
4141 if (host->v4_mode)
4142 sdhci_do_enable_v4_mode(host);
4143
4144 device_property_read_u64(mmc_dev(host->mmc),
4145 "sdhci-caps-mask", &dt_caps_mask);
4146 device_property_read_u64(mmc_dev(host->mmc),
4147 "sdhci-caps", &dt_caps);
4148
4149 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4150 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4151
4152 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4153 return;
4154
4155 if (caps) {
4156 host->caps = *caps;
4157 } else {
4158 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4159 host->caps &= ~lower_32_bits(dt_caps_mask);
4160 host->caps |= lower_32_bits(dt_caps);
4161 }
4162
4163 if (host->version < SDHCI_SPEC_300)
4164 return;
4165
4166 if (caps1) {
4167 host->caps1 = *caps1;
4168 } else {
4169 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4170 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4171 host->caps1 |= upper_32_bits(dt_caps);
4172 }
4173 }
4174 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4175
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4176 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4177 {
4178 struct mmc_host *mmc = host->mmc;
4179 unsigned int max_blocks;
4180 unsigned int bounce_size;
4181 int ret;
4182
4183 /*
4184 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4185 * has diminishing returns, this is probably because SD/MMC
4186 * cards are usually optimized to handle this size of requests.
4187 */
4188 bounce_size = SZ_64K;
4189 /*
4190 * Adjust downwards to maximum request size if this is less
4191 * than our segment size, else hammer down the maximum
4192 * request size to the maximum buffer size.
4193 */
4194 if (mmc->max_req_size < bounce_size)
4195 bounce_size = mmc->max_req_size;
4196 max_blocks = bounce_size / 512;
4197
4198 /*
4199 * When we just support one segment, we can get significant
4200 * speedups by the help of a bounce buffer to group scattered
4201 * reads/writes together.
4202 */
4203 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4204 bounce_size,
4205 GFP_KERNEL);
4206 if (!host->bounce_buffer) {
4207 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4208 mmc_hostname(mmc),
4209 bounce_size);
4210 /*
4211 * Exiting with zero here makes sure we proceed with
4212 * mmc->max_segs == 1.
4213 */
4214 return;
4215 }
4216
4217 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4218 host->bounce_buffer,
4219 bounce_size,
4220 DMA_BIDIRECTIONAL);
4221 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4222 if (ret) {
4223 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4224 host->bounce_buffer = NULL;
4225 /* Again fall back to max_segs == 1 */
4226 return;
4227 }
4228
4229 host->bounce_buffer_size = bounce_size;
4230
4231 /* Lie about this since we're bouncing */
4232 mmc->max_segs = max_blocks;
4233 mmc->max_seg_size = bounce_size;
4234 mmc->max_req_size = bounce_size;
4235
4236 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4237 mmc_hostname(mmc), max_blocks, bounce_size);
4238 }
4239
sdhci_can_64bit_dma(struct sdhci_host * host)4240 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4241 {
4242 /*
4243 * According to SD Host Controller spec v4.10, bit[27] added from
4244 * version 4.10 in Capabilities Register is used as 64-bit System
4245 * Address support for V4 mode.
4246 */
4247 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4248 return host->caps & SDHCI_CAN_64BIT_V4;
4249
4250 return host->caps & SDHCI_CAN_64BIT;
4251 }
4252
sdhci_setup_host(struct sdhci_host * host)4253 int sdhci_setup_host(struct sdhci_host *host)
4254 {
4255 struct mmc_host *mmc;
4256 u32 max_current_caps;
4257 unsigned int ocr_avail;
4258 unsigned int override_timeout_clk;
4259 u32 max_clk;
4260 int ret = 0;
4261 bool enable_vqmmc = false;
4262
4263 WARN_ON(host == NULL);
4264 if (host == NULL)
4265 return -EINVAL;
4266
4267 mmc = host->mmc;
4268
4269 /*
4270 * If there are external regulators, get them. Note this must be done
4271 * early before resetting the host and reading the capabilities so that
4272 * the host can take the appropriate action if regulators are not
4273 * available.
4274 */
4275 if (!mmc->supply.vqmmc) {
4276 ret = mmc_regulator_get_supply(mmc);
4277 if (ret)
4278 return ret;
4279 enable_vqmmc = true;
4280 }
4281
4282 DBG("Version: 0x%08x | Present: 0x%08x\n",
4283 sdhci_readw(host, SDHCI_HOST_VERSION),
4284 sdhci_readl(host, SDHCI_PRESENT_STATE));
4285 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4286 sdhci_readl(host, SDHCI_CAPABILITIES),
4287 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4288
4289 sdhci_read_caps(host);
4290
4291 override_timeout_clk = host->timeout_clk;
4292
4293 if (host->version > SDHCI_SPEC_420) {
4294 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4295 mmc_hostname(mmc), host->version);
4296 }
4297
4298 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4299 host->flags |= SDHCI_USE_SDMA;
4300 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4301 DBG("Controller doesn't have SDMA capability\n");
4302 else
4303 host->flags |= SDHCI_USE_SDMA;
4304
4305 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4306 (host->flags & SDHCI_USE_SDMA)) {
4307 DBG("Disabling DMA as it is marked broken\n");
4308 host->flags &= ~SDHCI_USE_SDMA;
4309 }
4310
4311 if ((host->version >= SDHCI_SPEC_200) &&
4312 (host->caps & SDHCI_CAN_DO_ADMA2))
4313 host->flags |= SDHCI_USE_ADMA;
4314
4315 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4316 (host->flags & SDHCI_USE_ADMA)) {
4317 DBG("Disabling ADMA as it is marked broken\n");
4318 host->flags &= ~SDHCI_USE_ADMA;
4319 }
4320
4321 if (sdhci_can_64bit_dma(host))
4322 host->flags |= SDHCI_USE_64_BIT_DMA;
4323
4324 if (host->use_external_dma) {
4325 ret = sdhci_external_dma_init(host);
4326 if (ret == -EPROBE_DEFER)
4327 goto unreg;
4328 /*
4329 * Fall back to use the DMA/PIO integrated in standard SDHCI
4330 * instead of external DMA devices.
4331 */
4332 else if (ret)
4333 sdhci_switch_external_dma(host, false);
4334 /* Disable internal DMA sources */
4335 else
4336 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4337 }
4338
4339 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4340 if (host->ops->set_dma_mask)
4341 ret = host->ops->set_dma_mask(host);
4342 else
4343 ret = sdhci_set_dma_mask(host);
4344
4345 if (!ret && host->ops->enable_dma)
4346 ret = host->ops->enable_dma(host);
4347
4348 if (ret) {
4349 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4350 mmc_hostname(mmc));
4351 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4352
4353 ret = 0;
4354 }
4355 }
4356
4357 /* SDMA does not support 64-bit DMA if v4 mode not set */
4358 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4359 host->flags &= ~SDHCI_USE_SDMA;
4360
4361 if (host->flags & SDHCI_USE_ADMA) {
4362 dma_addr_t dma;
4363 void *buf;
4364
4365 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4366 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4367 else if (!host->alloc_desc_sz)
4368 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4369
4370 host->desc_sz = host->alloc_desc_sz;
4371 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4372
4373 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4374 /*
4375 * Use zalloc to zero the reserved high 32-bits of 128-bit
4376 * descriptors so that they never need to be written.
4377 */
4378 buf = dma_alloc_coherent(mmc_dev(mmc),
4379 host->align_buffer_sz + host->adma_table_sz,
4380 &dma, GFP_KERNEL);
4381 if (!buf) {
4382 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4383 mmc_hostname(mmc));
4384 host->flags &= ~SDHCI_USE_ADMA;
4385 } else if ((dma + host->align_buffer_sz) &
4386 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4387 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4388 mmc_hostname(mmc));
4389 host->flags &= ~SDHCI_USE_ADMA;
4390 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4391 host->adma_table_sz, buf, dma);
4392 } else {
4393 host->align_buffer = buf;
4394 host->align_addr = dma;
4395
4396 host->adma_table = buf + host->align_buffer_sz;
4397 host->adma_addr = dma + host->align_buffer_sz;
4398 }
4399 }
4400
4401 /*
4402 * If we use DMA, then it's up to the caller to set the DMA
4403 * mask, but PIO does not need the hw shim so we set a new
4404 * mask here in that case.
4405 */
4406 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4407 host->dma_mask = DMA_BIT_MASK(64);
4408 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4409 }
4410
4411 if (host->version >= SDHCI_SPEC_300)
4412 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4413 else
4414 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4415
4416 host->max_clk *= 1000000;
4417 if (host->max_clk == 0 || host->quirks &
4418 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4419 if (!host->ops->get_max_clock) {
4420 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4421 mmc_hostname(mmc));
4422 ret = -ENODEV;
4423 goto undma;
4424 }
4425 host->max_clk = host->ops->get_max_clock(host);
4426 }
4427
4428 /*
4429 * In case of Host Controller v3.00, find out whether clock
4430 * multiplier is supported.
4431 */
4432 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4433
4434 /*
4435 * In case the value in Clock Multiplier is 0, then programmable
4436 * clock mode is not supported, otherwise the actual clock
4437 * multiplier is one more than the value of Clock Multiplier
4438 * in the Capabilities Register.
4439 */
4440 if (host->clk_mul)
4441 host->clk_mul += 1;
4442
4443 /*
4444 * Set host parameters.
4445 */
4446 max_clk = host->max_clk;
4447
4448 if (host->ops->get_min_clock)
4449 mmc->f_min = host->ops->get_min_clock(host);
4450 else if (host->version >= SDHCI_SPEC_300) {
4451 if (host->clk_mul)
4452 max_clk = host->max_clk * host->clk_mul;
4453 /*
4454 * Divided Clock Mode minimum clock rate is always less than
4455 * Programmable Clock Mode minimum clock rate.
4456 */
4457 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4458 } else
4459 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4460
4461 if (!mmc->f_max || mmc->f_max > max_clk)
4462 mmc->f_max = max_clk;
4463
4464 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4465 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4466
4467 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4468 host->timeout_clk *= 1000;
4469
4470 if (host->timeout_clk == 0) {
4471 if (!host->ops->get_timeout_clock) {
4472 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4473 mmc_hostname(mmc));
4474 ret = -ENODEV;
4475 goto undma;
4476 }
4477
4478 host->timeout_clk =
4479 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4480 1000);
4481 }
4482
4483 if (override_timeout_clk)
4484 host->timeout_clk = override_timeout_clk;
4485
4486 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4487 host->ops->get_max_timeout_count(host) : 1 << 27;
4488 mmc->max_busy_timeout /= host->timeout_clk;
4489 }
4490
4491 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4492 !host->ops->get_max_timeout_count)
4493 mmc->max_busy_timeout = 0;
4494
4495 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4496 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4497
4498 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4499 host->flags |= SDHCI_AUTO_CMD12;
4500
4501 /*
4502 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4503 * For v4 mode, SDMA may use Auto-CMD23 as well.
4504 */
4505 if ((host->version >= SDHCI_SPEC_300) &&
4506 ((host->flags & SDHCI_USE_ADMA) ||
4507 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4508 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4509 host->flags |= SDHCI_AUTO_CMD23;
4510 DBG("Auto-CMD23 available\n");
4511 } else {
4512 DBG("Auto-CMD23 unavailable\n");
4513 }
4514
4515 /*
4516 * A controller may support 8-bit width, but the board itself
4517 * might not have the pins brought out. Boards that support
4518 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4519 * their platform code before calling sdhci_add_host(), and we
4520 * won't assume 8-bit width for hosts without that CAP.
4521 */
4522 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4523 mmc->caps |= MMC_CAP_4_BIT_DATA;
4524
4525 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4526 mmc->caps &= ~MMC_CAP_CMD23;
4527
4528 if (host->caps & SDHCI_CAN_DO_HISPD)
4529 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4530
4531 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4532 mmc_card_is_removable(mmc) &&
4533 mmc_gpio_get_cd(mmc) < 0)
4534 mmc->caps |= MMC_CAP_NEEDS_POLL;
4535
4536 if (!IS_ERR(mmc->supply.vqmmc)) {
4537 if (enable_vqmmc) {
4538 ret = regulator_enable(mmc->supply.vqmmc);
4539 host->sdhci_core_to_disable_vqmmc = !ret;
4540 }
4541
4542 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4543 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4544 1950000))
4545 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4546 SDHCI_SUPPORT_SDR50 |
4547 SDHCI_SUPPORT_DDR50);
4548
4549 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4550 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4551 3600000))
4552 host->flags &= ~SDHCI_SIGNALING_330;
4553
4554 if (ret) {
4555 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4556 mmc_hostname(mmc), ret);
4557 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4558 }
4559
4560 }
4561
4562 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4563 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4564 SDHCI_SUPPORT_DDR50);
4565 /*
4566 * The SDHCI controller in a SoC might support HS200/HS400
4567 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4568 * but if the board is modeled such that the IO lines are not
4569 * connected to 1.8v then HS200/HS400 cannot be supported.
4570 * Disable HS200/HS400 if the board does not have 1.8v connected
4571 * to the IO lines. (Applicable for other modes in 1.8v)
4572 */
4573 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4574 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4575 }
4576
4577 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4578 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4579 SDHCI_SUPPORT_DDR50))
4580 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4581
4582 /* SDR104 supports also implies SDR50 support */
4583 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4584 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4585 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4586 * field can be promoted to support HS200.
4587 */
4588 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4589 mmc->caps2 |= MMC_CAP2_HS200;
4590 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4591 mmc->caps |= MMC_CAP_UHS_SDR50;
4592 }
4593
4594 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4595 (host->caps1 & SDHCI_SUPPORT_HS400))
4596 mmc->caps2 |= MMC_CAP2_HS400;
4597
4598 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4599 (IS_ERR(mmc->supply.vqmmc) ||
4600 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4601 1300000)))
4602 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4603
4604 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4605 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4606 mmc->caps |= MMC_CAP_UHS_DDR50;
4607
4608 /* Does the host need tuning for SDR50? */
4609 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4610 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4611
4612 /* Driver Type(s) (A, C, D) supported by the host */
4613 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4614 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4615 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4616 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4617 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4618 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4619
4620 /* Initial value for re-tuning timer count */
4621 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4622 host->caps1);
4623
4624 /*
4625 * In case Re-tuning Timer is not disabled, the actual value of
4626 * re-tuning timer will be 2 ^ (n - 1).
4627 */
4628 if (host->tuning_count)
4629 host->tuning_count = 1 << (host->tuning_count - 1);
4630
4631 /* Re-tuning mode supported by the Host Controller */
4632 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4633
4634 ocr_avail = 0;
4635
4636 /*
4637 * According to SD Host Controller spec v3.00, if the Host System
4638 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4639 * the value is meaningful only if Voltage Support in the Capabilities
4640 * register is set. The actual current value is 4 times the register
4641 * value.
4642 */
4643 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4644 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4645 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4646 if (curr > 0) {
4647
4648 /* convert to SDHCI_MAX_CURRENT format */
4649 curr = curr/1000; /* convert to mA */
4650 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4651
4652 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4653 max_current_caps =
4654 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4655 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4656 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4657 }
4658 }
4659
4660 if (host->caps & SDHCI_CAN_VDD_330) {
4661 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4662
4663 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4664 max_current_caps) *
4665 SDHCI_MAX_CURRENT_MULTIPLIER;
4666 }
4667 if (host->caps & SDHCI_CAN_VDD_300) {
4668 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4669
4670 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4671 max_current_caps) *
4672 SDHCI_MAX_CURRENT_MULTIPLIER;
4673 }
4674 if (host->caps & SDHCI_CAN_VDD_180) {
4675 ocr_avail |= MMC_VDD_165_195;
4676
4677 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4678 max_current_caps) *
4679 SDHCI_MAX_CURRENT_MULTIPLIER;
4680 }
4681
4682 /* If OCR set by host, use it instead. */
4683 if (host->ocr_mask)
4684 ocr_avail = host->ocr_mask;
4685
4686 /* If OCR set by external regulators, give it highest prio. */
4687 if (mmc->ocr_avail)
4688 ocr_avail = mmc->ocr_avail;
4689
4690 mmc->ocr_avail = ocr_avail;
4691 mmc->ocr_avail_sdio = ocr_avail;
4692 if (host->ocr_avail_sdio)
4693 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4694 mmc->ocr_avail_sd = ocr_avail;
4695 if (host->ocr_avail_sd)
4696 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4697 else /* normal SD controllers don't support 1.8V */
4698 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4699 mmc->ocr_avail_mmc = ocr_avail;
4700 if (host->ocr_avail_mmc)
4701 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4702
4703 if (mmc->ocr_avail == 0) {
4704 pr_err("%s: Hardware doesn't report any support voltages.\n",
4705 mmc_hostname(mmc));
4706 ret = -ENODEV;
4707 goto unreg;
4708 }
4709
4710 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4711 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4712 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4713 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4714 host->flags |= SDHCI_SIGNALING_180;
4715
4716 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4717 host->flags |= SDHCI_SIGNALING_120;
4718
4719 spin_lock_init(&host->lock);
4720
4721 /*
4722 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4723 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4724 * is less anyway.
4725 */
4726 mmc->max_req_size = 524288;
4727
4728 /*
4729 * Maximum number of segments. Depends on if the hardware
4730 * can do scatter/gather or not.
4731 */
4732 if (host->flags & SDHCI_USE_ADMA) {
4733 mmc->max_segs = SDHCI_MAX_SEGS;
4734 } else if (host->flags & SDHCI_USE_SDMA) {
4735 mmc->max_segs = 1;
4736 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4737 dma_max_mapping_size(mmc_dev(mmc)));
4738 } else { /* PIO */
4739 mmc->max_segs = SDHCI_MAX_SEGS;
4740 }
4741
4742 /*
4743 * Maximum segment size. Could be one segment with the maximum number
4744 * of bytes. When doing hardware scatter/gather, each entry cannot
4745 * be larger than 64 KiB though.
4746 */
4747 if (host->flags & SDHCI_USE_ADMA) {
4748 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4749 host->max_adma = 65532; /* 32-bit alignment */
4750 mmc->max_seg_size = 65535;
4751 } else {
4752 mmc->max_seg_size = 65536;
4753 }
4754 } else {
4755 mmc->max_seg_size = mmc->max_req_size;
4756 }
4757
4758 /*
4759 * Maximum block size. This varies from controller to controller and
4760 * is specified in the capabilities register.
4761 */
4762 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4763 mmc->max_blk_size = 2;
4764 } else {
4765 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4766 SDHCI_MAX_BLOCK_SHIFT;
4767 if (mmc->max_blk_size >= 3) {
4768 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4769 mmc_hostname(mmc));
4770 mmc->max_blk_size = 0;
4771 }
4772 }
4773
4774 mmc->max_blk_size = 512 << mmc->max_blk_size;
4775
4776 /*
4777 * Maximum block count.
4778 */
4779 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4780
4781 if (mmc->max_segs == 1)
4782 /* This may alter mmc->*_blk_* parameters */
4783 sdhci_allocate_bounce_buffer(host);
4784
4785 return 0;
4786
4787 unreg:
4788 if (host->sdhci_core_to_disable_vqmmc)
4789 regulator_disable(mmc->supply.vqmmc);
4790 undma:
4791 if (host->align_buffer)
4792 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4793 host->adma_table_sz, host->align_buffer,
4794 host->align_addr);
4795 host->adma_table = NULL;
4796 host->align_buffer = NULL;
4797
4798 return ret;
4799 }
4800 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4801
sdhci_cleanup_host(struct sdhci_host * host)4802 void sdhci_cleanup_host(struct sdhci_host *host)
4803 {
4804 struct mmc_host *mmc = host->mmc;
4805
4806 if (host->sdhci_core_to_disable_vqmmc)
4807 regulator_disable(mmc->supply.vqmmc);
4808
4809 if (host->align_buffer)
4810 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4811 host->adma_table_sz, host->align_buffer,
4812 host->align_addr);
4813
4814 if (host->use_external_dma)
4815 sdhci_external_dma_release(host);
4816
4817 host->adma_table = NULL;
4818 host->align_buffer = NULL;
4819 }
4820 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4821
__sdhci_add_host(struct sdhci_host * host)4822 int __sdhci_add_host(struct sdhci_host *host)
4823 {
4824 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4825 struct mmc_host *mmc = host->mmc;
4826 int ret;
4827
4828 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4829 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4830 mmc->caps2 &= ~MMC_CAP2_CQE;
4831 mmc->cqe_ops = NULL;
4832 }
4833
4834 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4835 if (!host->complete_wq)
4836 return -ENOMEM;
4837
4838 INIT_WORK(&host->complete_work, sdhci_complete_work);
4839
4840 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4841 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4842
4843 init_waitqueue_head(&host->buf_ready_int);
4844
4845 sdhci_init(host, 0);
4846
4847 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4848 IRQF_SHARED, mmc_hostname(mmc), host);
4849 if (ret) {
4850 pr_err("%s: Failed to request IRQ %d: %d\n",
4851 mmc_hostname(mmc), host->irq, ret);
4852 goto unwq;
4853 }
4854
4855 ret = sdhci_led_register(host);
4856 if (ret) {
4857 pr_err("%s: Failed to register LED device: %d\n",
4858 mmc_hostname(mmc), ret);
4859 goto unirq;
4860 }
4861
4862 ret = mmc_add_host(mmc);
4863 if (ret)
4864 goto unled;
4865
4866 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4867 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4868 host->use_external_dma ? "External DMA" :
4869 (host->flags & SDHCI_USE_ADMA) ?
4870 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4871 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4872
4873 sdhci_enable_card_detection(host);
4874
4875 return 0;
4876
4877 unled:
4878 sdhci_led_unregister(host);
4879 unirq:
4880 sdhci_reset_for_all(host);
4881 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4882 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4883 free_irq(host->irq, host);
4884 unwq:
4885 destroy_workqueue(host->complete_wq);
4886
4887 return ret;
4888 }
4889 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4890
sdhci_add_host(struct sdhci_host * host)4891 int sdhci_add_host(struct sdhci_host *host)
4892 {
4893 int ret;
4894
4895 ret = sdhci_setup_host(host);
4896 if (ret)
4897 return ret;
4898
4899 ret = __sdhci_add_host(host);
4900 if (ret)
4901 goto cleanup;
4902
4903 return 0;
4904
4905 cleanup:
4906 sdhci_cleanup_host(host);
4907
4908 return ret;
4909 }
4910 EXPORT_SYMBOL_GPL(sdhci_add_host);
4911
sdhci_remove_host(struct sdhci_host * host,int dead)4912 void sdhci_remove_host(struct sdhci_host *host, int dead)
4913 {
4914 struct mmc_host *mmc = host->mmc;
4915 unsigned long flags;
4916
4917 if (dead) {
4918 spin_lock_irqsave(&host->lock, flags);
4919
4920 host->flags |= SDHCI_DEVICE_DEAD;
4921
4922 if (sdhci_has_requests(host)) {
4923 pr_err("%s: Controller removed during "
4924 " transfer!\n", mmc_hostname(mmc));
4925 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4926 }
4927
4928 spin_unlock_irqrestore(&host->lock, flags);
4929 }
4930
4931 sdhci_disable_card_detection(host);
4932
4933 mmc_remove_host(mmc);
4934
4935 sdhci_led_unregister(host);
4936
4937 if (!dead)
4938 sdhci_reset_for_all(host);
4939
4940 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4941 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4942 free_irq(host->irq, host);
4943
4944 del_timer_sync(&host->timer);
4945 del_timer_sync(&host->data_timer);
4946
4947 destroy_workqueue(host->complete_wq);
4948
4949 if (host->sdhci_core_to_disable_vqmmc)
4950 regulator_disable(mmc->supply.vqmmc);
4951
4952 if (host->align_buffer)
4953 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4954 host->adma_table_sz, host->align_buffer,
4955 host->align_addr);
4956
4957 if (host->use_external_dma)
4958 sdhci_external_dma_release(host);
4959
4960 host->adma_table = NULL;
4961 host->align_buffer = NULL;
4962 }
4963
4964 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4965
sdhci_free_host(struct sdhci_host * host)4966 void sdhci_free_host(struct sdhci_host *host)
4967 {
4968 mmc_free_host(host->mmc);
4969 }
4970
4971 EXPORT_SYMBOL_GPL(sdhci_free_host);
4972
4973 /*****************************************************************************\
4974 * *
4975 * Driver init/exit *
4976 * *
4977 \*****************************************************************************/
4978
sdhci_drv_init(void)4979 static int __init sdhci_drv_init(void)
4980 {
4981 pr_info(DRIVER_NAME
4982 ": Secure Digital Host Controller Interface driver\n");
4983 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4984
4985 return 0;
4986 }
4987
sdhci_drv_exit(void)4988 static void __exit sdhci_drv_exit(void)
4989 {
4990 }
4991
4992 module_init(sdhci_drv_init);
4993 module_exit(sdhci_drv_exit);
4994
4995 module_param(debug_quirks, uint, 0444);
4996 module_param(debug_quirks2, uint, 0444);
4997
4998 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4999 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
5000 MODULE_LICENSE("GPL");
5001
5002 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
5003 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
5004