1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * Thanks to the following companies for their support:
7 *
8 * - JMicron (hardware and technical support)
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_qos.h>
26 #include <linux/debugfs.h>
27 #include <linux/acpi.h>
28 #include <linux/dmi.h>
29
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/slot-gpio.h>
33
34 #ifdef CONFIG_X86
35 #include <asm/iosf_mbi.h>
36 #endif
37
38 #include "cqhci.h"
39
40 #include "sdhci.h"
41 #include "sdhci-pci.h"
42
43 static void sdhci_pci_hw_reset(struct sdhci_host *host);
44
45 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)46 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
47 {
48 mmc_pm_flag_t pm_flags = 0;
49 bool cap_cd_wake = false;
50 int i;
51
52 for (i = 0; i < chip->num_slots; i++) {
53 struct sdhci_pci_slot *slot = chip->slots[i];
54
55 if (slot) {
56 pm_flags |= slot->host->mmc->pm_flags;
57 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
58 cap_cd_wake = true;
59 }
60 }
61
62 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
63 return device_wakeup_enable(&chip->pdev->dev);
64 else if (!cap_cd_wake)
65 return device_wakeup_disable(&chip->pdev->dev);
66
67 return 0;
68 }
69
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)70 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
71 {
72 int i, ret;
73
74 sdhci_pci_init_wakeup(chip);
75
76 for (i = 0; i < chip->num_slots; i++) {
77 struct sdhci_pci_slot *slot = chip->slots[i];
78 struct sdhci_host *host;
79
80 if (!slot)
81 continue;
82
83 host = slot->host;
84
85 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
86 mmc_retune_needed(host->mmc);
87
88 ret = sdhci_suspend_host(host);
89 if (ret)
90 goto err_pci_suspend;
91
92 if (device_may_wakeup(&chip->pdev->dev))
93 mmc_gpio_set_cd_wake(host->mmc, true);
94 }
95
96 return 0;
97
98 err_pci_suspend:
99 while (--i >= 0)
100 sdhci_resume_host(chip->slots[i]->host);
101 return ret;
102 }
103
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)104 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
105 {
106 struct sdhci_pci_slot *slot;
107 int i, ret;
108
109 for (i = 0; i < chip->num_slots; i++) {
110 slot = chip->slots[i];
111 if (!slot)
112 continue;
113
114 ret = sdhci_resume_host(slot->host);
115 if (ret)
116 return ret;
117
118 mmc_gpio_set_cd_wake(slot->host->mmc, false);
119 }
120
121 return 0;
122 }
123
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)124 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
125 {
126 int ret;
127
128 ret = cqhci_suspend(chip->slots[0]->host->mmc);
129 if (ret)
130 return ret;
131
132 return sdhci_pci_suspend_host(chip);
133 }
134
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)135 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
136 {
137 int ret;
138
139 ret = sdhci_pci_resume_host(chip);
140 if (ret)
141 return ret;
142
143 return cqhci_resume(chip->slots[0]->host->mmc);
144 }
145 #endif
146
147 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)148 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
149 {
150 struct sdhci_pci_slot *slot;
151 struct sdhci_host *host;
152 int i, ret;
153
154 for (i = 0; i < chip->num_slots; i++) {
155 slot = chip->slots[i];
156 if (!slot)
157 continue;
158
159 host = slot->host;
160
161 ret = sdhci_runtime_suspend_host(host);
162 if (ret)
163 goto err_pci_runtime_suspend;
164
165 if (chip->rpm_retune &&
166 host->tuning_mode != SDHCI_TUNING_MODE_3)
167 mmc_retune_needed(host->mmc);
168 }
169
170 return 0;
171
172 err_pci_runtime_suspend:
173 while (--i >= 0)
174 sdhci_runtime_resume_host(chip->slots[i]->host, 0);
175 return ret;
176 }
177
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)178 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
179 {
180 struct sdhci_pci_slot *slot;
181 int i, ret;
182
183 for (i = 0; i < chip->num_slots; i++) {
184 slot = chip->slots[i];
185 if (!slot)
186 continue;
187
188 ret = sdhci_runtime_resume_host(slot->host, 0);
189 if (ret)
190 return ret;
191 }
192
193 return 0;
194 }
195
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)196 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
197 {
198 int ret;
199
200 ret = cqhci_suspend(chip->slots[0]->host->mmc);
201 if (ret)
202 return ret;
203
204 return sdhci_pci_runtime_suspend_host(chip);
205 }
206
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)207 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
208 {
209 int ret;
210
211 ret = sdhci_pci_runtime_resume_host(chip);
212 if (ret)
213 return ret;
214
215 return cqhci_resume(chip->slots[0]->host->mmc);
216 }
217 #endif
218
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)219 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
220 {
221 int cmd_error = 0;
222 int data_error = 0;
223
224 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
225 return intmask;
226
227 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
228
229 return 0;
230 }
231
sdhci_pci_dumpregs(struct mmc_host * mmc)232 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
233 {
234 sdhci_dumpregs(mmc_priv(mmc));
235 }
236
sdhci_cqhci_reset(struct sdhci_host * host,u8 mask)237 static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
238 {
239 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
240 host->mmc->cqe_private)
241 cqhci_deactivate(host->mmc);
242 sdhci_reset(host, mask);
243 }
244
245 /*****************************************************************************\
246 * *
247 * Hardware specific quirk handling *
248 * *
249 \*****************************************************************************/
250
ricoh_probe(struct sdhci_pci_chip * chip)251 static int ricoh_probe(struct sdhci_pci_chip *chip)
252 {
253 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
254 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
255 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
256 return 0;
257 }
258
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)259 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
260 {
261 slot->host->caps =
262 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
263 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
264 SDHCI_TIMEOUT_CLK_UNIT |
265 SDHCI_CAN_VDD_330 |
266 SDHCI_CAN_DO_HISPD |
267 SDHCI_CAN_DO_SDMA;
268 return 0;
269 }
270
271 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)272 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
273 {
274 /* Apply a delay to allow controller to settle */
275 /* Otherwise it becomes confused if card state changed
276 during suspend */
277 msleep(500);
278 return sdhci_pci_resume_host(chip);
279 }
280 #endif
281
282 static const struct sdhci_pci_fixes sdhci_ricoh = {
283 .probe = ricoh_probe,
284 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
285 SDHCI_QUIRK_FORCE_DMA |
286 SDHCI_QUIRK_CLOCK_BEFORE_RESET,
287 };
288
289 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
290 .probe_slot = ricoh_mmc_probe_slot,
291 #ifdef CONFIG_PM_SLEEP
292 .resume = ricoh_mmc_resume,
293 #endif
294 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
295 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
296 SDHCI_QUIRK_NO_CARD_NO_RESET |
297 SDHCI_QUIRK_MISSING_CAPS
298 };
299
300 static const struct sdhci_pci_fixes sdhci_ene_712 = {
301 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
302 SDHCI_QUIRK_BROKEN_DMA,
303 };
304
305 static const struct sdhci_pci_fixes sdhci_ene_714 = {
306 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
307 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
308 SDHCI_QUIRK_BROKEN_DMA,
309 };
310
311 static const struct sdhci_pci_fixes sdhci_cafe = {
312 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
313 SDHCI_QUIRK_NO_BUSY_IRQ |
314 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
315 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
316 };
317
318 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
319 .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
320 };
321
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)322 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
323 {
324 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
325 return 0;
326 }
327
328 /*
329 * ADMA operation is disabled for Moorestown platform due to
330 * hardware bugs.
331 */
mrst_hc_probe(struct sdhci_pci_chip * chip)332 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
333 {
334 /*
335 * slots number is fixed here for MRST as SDIO3/5 are never used and
336 * have hardware bugs.
337 */
338 chip->num_slots = 1;
339 return 0;
340 }
341
pch_hc_probe_slot(struct sdhci_pci_slot * slot)342 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
343 {
344 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
345 return 0;
346 }
347
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)348 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
349 {
350 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
351 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
352 return 0;
353 }
354
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)355 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
356 {
357 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
358 return 0;
359 }
360
361 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
362 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
363 .probe_slot = mrst_hc_probe_slot,
364 };
365
366 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
367 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
368 .probe = mrst_hc_probe,
369 };
370
371 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
372 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
373 .allow_runtime_pm = true,
374 .own_cd_for_runtime_pm = true,
375 };
376
377 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
378 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
379 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
380 .allow_runtime_pm = true,
381 .probe_slot = mfd_sdio_probe_slot,
382 };
383
384 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
385 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
386 .allow_runtime_pm = true,
387 .probe_slot = mfd_emmc_probe_slot,
388 };
389
390 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
391 .quirks = SDHCI_QUIRK_BROKEN_ADMA,
392 .probe_slot = pch_hc_probe_slot,
393 };
394
395 #ifdef CONFIG_X86
396
397 #define BYT_IOSF_SCCEP 0x63
398 #define BYT_IOSF_OCP_NETCTRL0 0x1078
399 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
400
byt_ocp_setting(struct pci_dev * pdev)401 static void byt_ocp_setting(struct pci_dev *pdev)
402 {
403 u32 val = 0;
404
405 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
406 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
407 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
408 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
409 return;
410
411 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
412 &val)) {
413 dev_err(&pdev->dev, "%s read error\n", __func__);
414 return;
415 }
416
417 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
418 return;
419
420 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
421
422 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
423 val)) {
424 dev_err(&pdev->dev, "%s write error\n", __func__);
425 return;
426 }
427
428 dev_dbg(&pdev->dev, "%s completed\n", __func__);
429 }
430
431 #else
432
byt_ocp_setting(struct pci_dev * pdev)433 static inline void byt_ocp_setting(struct pci_dev *pdev)
434 {
435 }
436
437 #endif
438
439 enum {
440 INTEL_DSM_FNS = 0,
441 INTEL_DSM_V18_SWITCH = 3,
442 INTEL_DSM_V33_SWITCH = 4,
443 INTEL_DSM_DRV_STRENGTH = 9,
444 INTEL_DSM_D3_RETUNE = 10,
445 };
446
447 struct intel_host {
448 u32 dsm_fns;
449 int drv_strength;
450 bool d3_retune;
451 bool rpm_retune_ok;
452 bool needs_pwr_off;
453 u32 glk_rx_ctrl1;
454 u32 glk_tun_val;
455 u32 active_ltr;
456 u32 idle_ltr;
457 };
458
459 static const guid_t intel_dsm_guid =
460 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
461 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
462
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)463 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
464 unsigned int fn, u32 *result)
465 {
466 union acpi_object *obj;
467 int err = 0;
468 size_t len;
469
470 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
471 if (!obj)
472 return -EOPNOTSUPP;
473
474 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
475 err = -EINVAL;
476 goto out;
477 }
478
479 len = min_t(size_t, obj->buffer.length, 4);
480
481 *result = 0;
482 memcpy(result, obj->buffer.pointer, len);
483 out:
484 ACPI_FREE(obj);
485
486 return err;
487 }
488
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)489 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
490 unsigned int fn, u32 *result)
491 {
492 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
493 return -EOPNOTSUPP;
494
495 return __intel_dsm(intel_host, dev, fn, result);
496 }
497
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)498 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
499 struct mmc_host *mmc)
500 {
501 int err;
502 u32 val;
503
504 intel_host->d3_retune = true;
505
506 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
507 if (err) {
508 pr_debug("%s: DSM not supported, error %d\n",
509 mmc_hostname(mmc), err);
510 return;
511 }
512
513 pr_debug("%s: DSM function mask %#x\n",
514 mmc_hostname(mmc), intel_host->dsm_fns);
515
516 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
517 intel_host->drv_strength = err ? 0 : val;
518
519 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
520 intel_host->d3_retune = err ? true : !!val;
521 }
522
sdhci_pci_int_hw_reset(struct sdhci_host * host)523 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
524 {
525 u8 reg;
526
527 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
528 reg |= 0x10;
529 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
530 /* For eMMC, minimum is 1us but give it 9us for good measure */
531 udelay(9);
532 reg &= ~0x10;
533 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
534 /* For eMMC, minimum is 200us but give it 300us for good measure */
535 usleep_range(300, 1000);
536 }
537
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)538 static int intel_select_drive_strength(struct mmc_card *card,
539 unsigned int max_dtr, int host_drv,
540 int card_drv, int *drv_type)
541 {
542 struct sdhci_host *host = mmc_priv(card->host);
543 struct sdhci_pci_slot *slot = sdhci_priv(host);
544 struct intel_host *intel_host = sdhci_pci_priv(slot);
545
546 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
547 return 0;
548
549 return intel_host->drv_strength;
550 }
551
bxt_get_cd(struct mmc_host * mmc)552 static int bxt_get_cd(struct mmc_host *mmc)
553 {
554 int gpio_cd = mmc_gpio_get_cd(mmc);
555
556 if (!gpio_cd)
557 return 0;
558
559 return sdhci_get_cd_nogpio(mmc);
560 }
561
mrfld_get_cd(struct mmc_host * mmc)562 static int mrfld_get_cd(struct mmc_host *mmc)
563 {
564 return sdhci_get_cd_nogpio(mmc);
565 }
566
567 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
568 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
569
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)570 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
571 unsigned short vdd)
572 {
573 struct sdhci_pci_slot *slot = sdhci_priv(host);
574 struct intel_host *intel_host = sdhci_pci_priv(slot);
575 int cntr;
576 u8 reg;
577
578 /*
579 * Bus power may control card power, but a full reset still may not
580 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
581 * That might be needed to initialize correctly, if the card was left
582 * powered on previously.
583 */
584 if (intel_host->needs_pwr_off) {
585 intel_host->needs_pwr_off = false;
586 if (mode != MMC_POWER_OFF) {
587 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
588 usleep_range(10000, 12500);
589 }
590 }
591
592 sdhci_set_power(host, mode, vdd);
593
594 if (mode == MMC_POWER_OFF)
595 return;
596
597 /*
598 * Bus power might not enable after D3 -> D0 transition due to the
599 * present state not yet having propagated. Retry for up to 2ms.
600 */
601 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
602 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
603 if (reg & SDHCI_POWER_ON)
604 break;
605 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
606 reg |= SDHCI_POWER_ON;
607 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
608 }
609 }
610
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)611 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
612 unsigned int timing)
613 {
614 /* Set UHS timing to SDR25 for High Speed mode */
615 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
616 timing = MMC_TIMING_UHS_SDR25;
617 sdhci_set_uhs_signaling(host, timing);
618 }
619
620 #define INTEL_HS400_ES_REG 0x78
621 #define INTEL_HS400_ES_BIT BIT(0)
622
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)623 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
624 struct mmc_ios *ios)
625 {
626 struct sdhci_host *host = mmc_priv(mmc);
627 u32 val;
628
629 val = sdhci_readl(host, INTEL_HS400_ES_REG);
630 if (ios->enhanced_strobe)
631 val |= INTEL_HS400_ES_BIT;
632 else
633 val &= ~INTEL_HS400_ES_BIT;
634 sdhci_writel(host, val, INTEL_HS400_ES_REG);
635 }
636
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)637 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
638 struct mmc_ios *ios)
639 {
640 struct device *dev = mmc_dev(mmc);
641 struct sdhci_host *host = mmc_priv(mmc);
642 struct sdhci_pci_slot *slot = sdhci_priv(host);
643 struct intel_host *intel_host = sdhci_pci_priv(slot);
644 unsigned int fn;
645 u32 result = 0;
646 int err;
647
648 err = sdhci_start_signal_voltage_switch(mmc, ios);
649 if (err)
650 return err;
651
652 switch (ios->signal_voltage) {
653 case MMC_SIGNAL_VOLTAGE_330:
654 fn = INTEL_DSM_V33_SWITCH;
655 break;
656 case MMC_SIGNAL_VOLTAGE_180:
657 fn = INTEL_DSM_V18_SWITCH;
658 break;
659 default:
660 return 0;
661 }
662
663 err = intel_dsm(intel_host, dev, fn, &result);
664 pr_debug("%s: %s DSM fn %u error %d result %u\n",
665 mmc_hostname(mmc), __func__, fn, err, result);
666
667 return 0;
668 }
669
670 static const struct sdhci_ops sdhci_intel_byt_ops = {
671 .set_clock = sdhci_set_clock,
672 .set_power = sdhci_intel_set_power,
673 .enable_dma = sdhci_pci_enable_dma,
674 .set_bus_width = sdhci_set_bus_width,
675 .reset = sdhci_reset,
676 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
677 .hw_reset = sdhci_pci_hw_reset,
678 };
679
680 static const struct sdhci_ops sdhci_intel_glk_ops = {
681 .set_clock = sdhci_set_clock,
682 .set_power = sdhci_intel_set_power,
683 .enable_dma = sdhci_pci_enable_dma,
684 .set_bus_width = sdhci_set_bus_width,
685 .reset = sdhci_cqhci_reset,
686 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
687 .hw_reset = sdhci_pci_hw_reset,
688 .irq = sdhci_cqhci_irq,
689 };
690
byt_read_dsm(struct sdhci_pci_slot * slot)691 static void byt_read_dsm(struct sdhci_pci_slot *slot)
692 {
693 struct intel_host *intel_host = sdhci_pci_priv(slot);
694 struct device *dev = &slot->chip->pdev->dev;
695 struct mmc_host *mmc = slot->host->mmc;
696
697 intel_dsm_init(intel_host, dev, mmc);
698 slot->chip->rpm_retune = intel_host->d3_retune;
699 }
700
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)701 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
702 {
703 int err = sdhci_execute_tuning(mmc, opcode);
704 struct sdhci_host *host = mmc_priv(mmc);
705
706 if (err)
707 return err;
708
709 /*
710 * Tuning can leave the IP in an active state (Buffer Read Enable bit
711 * set) which prevents the entry to low power states (i.e. S0i3). Data
712 * reset will clear it.
713 */
714 sdhci_reset(host, SDHCI_RESET_DATA);
715
716 return 0;
717 }
718
719 #define INTEL_ACTIVELTR 0x804
720 #define INTEL_IDLELTR 0x808
721
722 #define INTEL_LTR_REQ BIT(15)
723 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
724 #define INTEL_LTR_SCALE_1US (2 << 10)
725 #define INTEL_LTR_SCALE_32US (3 << 10)
726 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
727
intel_cache_ltr(struct sdhci_pci_slot * slot)728 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
729 {
730 struct intel_host *intel_host = sdhci_pci_priv(slot);
731 struct sdhci_host *host = slot->host;
732
733 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
734 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
735 }
736
intel_ltr_set(struct device * dev,s32 val)737 static void intel_ltr_set(struct device *dev, s32 val)
738 {
739 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
740 struct sdhci_pci_slot *slot = chip->slots[0];
741 struct intel_host *intel_host = sdhci_pci_priv(slot);
742 struct sdhci_host *host = slot->host;
743 u32 ltr;
744
745 pm_runtime_get_sync(dev);
746
747 /*
748 * Program latency tolerance (LTR) accordingly what has been asked
749 * by the PM QoS layer or disable it in case we were passed
750 * negative value or PM_QOS_LATENCY_ANY.
751 */
752 ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
753
754 if (val == PM_QOS_LATENCY_ANY || val < 0) {
755 ltr &= ~INTEL_LTR_REQ;
756 } else {
757 ltr |= INTEL_LTR_REQ;
758 ltr &= ~INTEL_LTR_SCALE_MASK;
759 ltr &= ~INTEL_LTR_VALUE_MASK;
760
761 if (val > INTEL_LTR_VALUE_MASK) {
762 val >>= 5;
763 if (val > INTEL_LTR_VALUE_MASK)
764 val = INTEL_LTR_VALUE_MASK;
765 ltr |= INTEL_LTR_SCALE_32US | val;
766 } else {
767 ltr |= INTEL_LTR_SCALE_1US | val;
768 }
769 }
770
771 if (ltr == intel_host->active_ltr)
772 goto out;
773
774 writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
775 writel(ltr, host->ioaddr + INTEL_IDLELTR);
776
777 /* Cache the values into lpss structure */
778 intel_cache_ltr(slot);
779 out:
780 pm_runtime_put_autosuspend(dev);
781 }
782
intel_use_ltr(struct sdhci_pci_chip * chip)783 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
784 {
785 switch (chip->pdev->device) {
786 case PCI_DEVICE_ID_INTEL_BYT_EMMC:
787 case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
788 case PCI_DEVICE_ID_INTEL_BYT_SDIO:
789 case PCI_DEVICE_ID_INTEL_BYT_SD:
790 case PCI_DEVICE_ID_INTEL_BSW_EMMC:
791 case PCI_DEVICE_ID_INTEL_BSW_SDIO:
792 case PCI_DEVICE_ID_INTEL_BSW_SD:
793 return false;
794 default:
795 return true;
796 }
797 }
798
intel_ltr_expose(struct sdhci_pci_chip * chip)799 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
800 {
801 struct device *dev = &chip->pdev->dev;
802
803 if (!intel_use_ltr(chip))
804 return;
805
806 dev->power.set_latency_tolerance = intel_ltr_set;
807 dev_pm_qos_expose_latency_tolerance(dev);
808 }
809
intel_ltr_hide(struct sdhci_pci_chip * chip)810 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
811 {
812 struct device *dev = &chip->pdev->dev;
813
814 if (!intel_use_ltr(chip))
815 return;
816
817 dev_pm_qos_hide_latency_tolerance(dev);
818 dev->power.set_latency_tolerance = NULL;
819 }
820
byt_probe_slot(struct sdhci_pci_slot * slot)821 static void byt_probe_slot(struct sdhci_pci_slot *slot)
822 {
823 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
824 struct device *dev = &slot->chip->pdev->dev;
825 struct mmc_host *mmc = slot->host->mmc;
826
827 byt_read_dsm(slot);
828
829 byt_ocp_setting(slot->chip->pdev);
830
831 ops->execute_tuning = intel_execute_tuning;
832 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
833
834 device_property_read_u32(dev, "max-frequency", &mmc->f_max);
835
836 if (!mmc->slotno) {
837 slot->chip->slots[mmc->slotno] = slot;
838 intel_ltr_expose(slot->chip);
839 }
840 }
841
byt_add_debugfs(struct sdhci_pci_slot * slot)842 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
843 {
844 struct intel_host *intel_host = sdhci_pci_priv(slot);
845 struct mmc_host *mmc = slot->host->mmc;
846 struct dentry *dir = mmc->debugfs_root;
847
848 if (!intel_use_ltr(slot->chip))
849 return;
850
851 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
852 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
853
854 intel_cache_ltr(slot);
855 }
856
byt_add_host(struct sdhci_pci_slot * slot)857 static int byt_add_host(struct sdhci_pci_slot *slot)
858 {
859 int ret = sdhci_add_host(slot->host);
860
861 if (!ret)
862 byt_add_debugfs(slot);
863 return ret;
864 }
865
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)866 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
867 {
868 struct mmc_host *mmc = slot->host->mmc;
869
870 if (!mmc->slotno)
871 intel_ltr_hide(slot->chip);
872 }
873
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)874 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
875 {
876 byt_probe_slot(slot);
877 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
878 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
879 MMC_CAP_CMD_DURING_TFR |
880 MMC_CAP_WAIT_WHILE_BUSY;
881 slot->hw_reset = sdhci_pci_int_hw_reset;
882 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
883 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
884 slot->host->mmc_host_ops.select_drive_strength =
885 intel_select_drive_strength;
886 return 0;
887 }
888
glk_broken_cqhci(struct sdhci_pci_slot * slot)889 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
890 {
891 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
892 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
893 dmi_match(DMI_SYS_VENDOR, "IRBIS"));
894 }
895
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)896 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
897 {
898 int ret = byt_emmc_probe_slot(slot);
899
900 if (!glk_broken_cqhci(slot))
901 slot->host->mmc->caps2 |= MMC_CAP2_CQE;
902
903 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
904 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
905 slot->host->mmc_host_ops.hs400_enhanced_strobe =
906 intel_hs400_enhanced_strobe;
907 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
908 }
909
910 return ret;
911 }
912
913 static const struct cqhci_host_ops glk_cqhci_ops = {
914 .enable = sdhci_cqe_enable,
915 .disable = sdhci_cqe_disable,
916 .dumpregs = sdhci_pci_dumpregs,
917 };
918
glk_emmc_add_host(struct sdhci_pci_slot * slot)919 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
920 {
921 struct device *dev = &slot->chip->pdev->dev;
922 struct sdhci_host *host = slot->host;
923 struct cqhci_host *cq_host;
924 bool dma64;
925 int ret;
926
927 ret = sdhci_setup_host(host);
928 if (ret)
929 return ret;
930
931 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
932 if (!cq_host) {
933 ret = -ENOMEM;
934 goto cleanup;
935 }
936
937 cq_host->mmio = host->ioaddr + 0x200;
938 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
939 cq_host->ops = &glk_cqhci_ops;
940
941 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
942 if (dma64)
943 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
944
945 ret = cqhci_init(cq_host, host->mmc, dma64);
946 if (ret)
947 goto cleanup;
948
949 ret = __sdhci_add_host(host);
950 if (ret)
951 goto cleanup;
952
953 byt_add_debugfs(slot);
954
955 return 0;
956
957 cleanup:
958 sdhci_cleanup_host(host);
959 return ret;
960 }
961
962 #ifdef CONFIG_PM
963 #define GLK_RX_CTRL1 0x834
964 #define GLK_TUN_VAL 0x840
965 #define GLK_PATH_PLL GENMASK(13, 8)
966 #define GLK_DLY GENMASK(6, 0)
967 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)968 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
969 {
970 struct sdhci_pci_slot *slot = chip->slots[0];
971 struct intel_host *intel_host = sdhci_pci_priv(slot);
972 struct sdhci_host *host = slot->host;
973 u32 glk_rx_ctrl1;
974 u32 glk_tun_val;
975 u32 dly;
976
977 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
978 return;
979
980 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
981 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
982
983 if (susp) {
984 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
985 intel_host->glk_tun_val = glk_tun_val;
986 return;
987 }
988
989 if (!intel_host->glk_tun_val)
990 return;
991
992 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
993 intel_host->rpm_retune_ok = true;
994 return;
995 }
996
997 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
998 (intel_host->glk_tun_val << 1));
999 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1000 return;
1001
1002 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1003 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1004
1005 intel_host->rpm_retune_ok = true;
1006 chip->rpm_retune = true;
1007 mmc_retune_needed(host->mmc);
1008 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1009 }
1010
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1011 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1012 {
1013 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1014 !chip->rpm_retune)
1015 glk_rpm_retune_wa(chip, susp);
1016 }
1017
glk_runtime_suspend(struct sdhci_pci_chip * chip)1018 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1019 {
1020 glk_rpm_retune_chk(chip, true);
1021
1022 return sdhci_cqhci_runtime_suspend(chip);
1023 }
1024
glk_runtime_resume(struct sdhci_pci_chip * chip)1025 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1026 {
1027 glk_rpm_retune_chk(chip, false);
1028
1029 return sdhci_cqhci_runtime_resume(chip);
1030 }
1031 #endif
1032
1033 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1034 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1035 {
1036 acpi_status status;
1037 unsigned long long max_freq;
1038
1039 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1040 "MXFQ", NULL, &max_freq);
1041 if (ACPI_FAILURE(status)) {
1042 dev_err(&slot->chip->pdev->dev,
1043 "MXFQ not found in acpi table\n");
1044 return -EINVAL;
1045 }
1046
1047 slot->host->mmc->f_max = max_freq * 1000000;
1048
1049 return 0;
1050 }
1051 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1052 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1053 {
1054 return 0;
1055 }
1056 #endif
1057
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1058 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1059 {
1060 int err;
1061
1062 byt_probe_slot(slot);
1063
1064 err = ni_set_max_freq(slot);
1065 if (err)
1066 return err;
1067
1068 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1069 MMC_CAP_WAIT_WHILE_BUSY;
1070 return 0;
1071 }
1072
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1073 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1074 {
1075 byt_probe_slot(slot);
1076 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1077 MMC_CAP_WAIT_WHILE_BUSY;
1078 return 0;
1079 }
1080
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1081 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1082 {
1083 struct intel_host *intel_host = sdhci_pci_priv(slot);
1084 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1085
1086 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
1087 }
1088
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1089 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1090 {
1091 byt_probe_slot(slot);
1092 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1093 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1094 slot->cd_idx = 0;
1095 slot->cd_override_level = true;
1096 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1097 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1098 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1099 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1100 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1101
1102 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1103 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1104 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1105
1106 byt_needs_pwr_off(slot);
1107
1108 return 0;
1109 }
1110
1111 #ifdef CONFIG_PM_SLEEP
1112
byt_resume(struct sdhci_pci_chip * chip)1113 static int byt_resume(struct sdhci_pci_chip *chip)
1114 {
1115 byt_ocp_setting(chip->pdev);
1116
1117 return sdhci_pci_resume_host(chip);
1118 }
1119
1120 #endif
1121
1122 #ifdef CONFIG_PM
1123
byt_runtime_resume(struct sdhci_pci_chip * chip)1124 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1125 {
1126 byt_ocp_setting(chip->pdev);
1127
1128 return sdhci_pci_runtime_resume_host(chip);
1129 }
1130
1131 #endif
1132
1133 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1134 #ifdef CONFIG_PM_SLEEP
1135 .resume = byt_resume,
1136 #endif
1137 #ifdef CONFIG_PM
1138 .runtime_resume = byt_runtime_resume,
1139 #endif
1140 .allow_runtime_pm = true,
1141 .probe_slot = byt_emmc_probe_slot,
1142 .add_host = byt_add_host,
1143 .remove_slot = byt_remove_slot,
1144 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1145 SDHCI_QUIRK_NO_LED,
1146 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1147 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1148 SDHCI_QUIRK2_STOP_WITH_TC,
1149 .ops = &sdhci_intel_byt_ops,
1150 .priv_size = sizeof(struct intel_host),
1151 };
1152
1153 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1154 .allow_runtime_pm = true,
1155 .probe_slot = glk_emmc_probe_slot,
1156 .add_host = glk_emmc_add_host,
1157 .remove_slot = byt_remove_slot,
1158 #ifdef CONFIG_PM_SLEEP
1159 .suspend = sdhci_cqhci_suspend,
1160 .resume = sdhci_cqhci_resume,
1161 #endif
1162 #ifdef CONFIG_PM
1163 .runtime_suspend = glk_runtime_suspend,
1164 .runtime_resume = glk_runtime_resume,
1165 #endif
1166 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1167 SDHCI_QUIRK_NO_LED,
1168 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1169 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1170 SDHCI_QUIRK2_STOP_WITH_TC,
1171 .ops = &sdhci_intel_glk_ops,
1172 .priv_size = sizeof(struct intel_host),
1173 };
1174
1175 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1176 #ifdef CONFIG_PM_SLEEP
1177 .resume = byt_resume,
1178 #endif
1179 #ifdef CONFIG_PM
1180 .runtime_resume = byt_runtime_resume,
1181 #endif
1182 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1183 SDHCI_QUIRK_NO_LED,
1184 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1185 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1186 .allow_runtime_pm = true,
1187 .probe_slot = ni_byt_sdio_probe_slot,
1188 .add_host = byt_add_host,
1189 .remove_slot = byt_remove_slot,
1190 .ops = &sdhci_intel_byt_ops,
1191 .priv_size = sizeof(struct intel_host),
1192 };
1193
1194 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1195 #ifdef CONFIG_PM_SLEEP
1196 .resume = byt_resume,
1197 #endif
1198 #ifdef CONFIG_PM
1199 .runtime_resume = byt_runtime_resume,
1200 #endif
1201 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1202 SDHCI_QUIRK_NO_LED,
1203 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1204 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1205 .allow_runtime_pm = true,
1206 .probe_slot = byt_sdio_probe_slot,
1207 .add_host = byt_add_host,
1208 .remove_slot = byt_remove_slot,
1209 .ops = &sdhci_intel_byt_ops,
1210 .priv_size = sizeof(struct intel_host),
1211 };
1212
1213 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1214 #ifdef CONFIG_PM_SLEEP
1215 .resume = byt_resume,
1216 #endif
1217 #ifdef CONFIG_PM
1218 .runtime_resume = byt_runtime_resume,
1219 #endif
1220 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1221 SDHCI_QUIRK_NO_LED,
1222 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1223 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1224 SDHCI_QUIRK2_STOP_WITH_TC,
1225 .allow_runtime_pm = true,
1226 .own_cd_for_runtime_pm = true,
1227 .probe_slot = byt_sd_probe_slot,
1228 .add_host = byt_add_host,
1229 .remove_slot = byt_remove_slot,
1230 .ops = &sdhci_intel_byt_ops,
1231 .priv_size = sizeof(struct intel_host),
1232 };
1233
1234 /* Define Host controllers for Intel Merrifield platform */
1235 #define INTEL_MRFLD_EMMC_0 0
1236 #define INTEL_MRFLD_EMMC_1 1
1237 #define INTEL_MRFLD_SD 2
1238 #define INTEL_MRFLD_SDIO 3
1239
1240 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1241 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1242 {
1243 struct acpi_device *device, *child;
1244
1245 device = ACPI_COMPANION(&slot->chip->pdev->dev);
1246 if (!device)
1247 return;
1248
1249 acpi_device_fix_up_power(device);
1250 list_for_each_entry(child, &device->children, node)
1251 if (child->status.present && child->status.enabled)
1252 acpi_device_fix_up_power(child);
1253 }
1254 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1255 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1256 #endif
1257
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1258 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1259 {
1260 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1261
1262 switch (func) {
1263 case INTEL_MRFLD_EMMC_0:
1264 case INTEL_MRFLD_EMMC_1:
1265 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1266 MMC_CAP_8_BIT_DATA |
1267 MMC_CAP_1_8V_DDR;
1268 break;
1269 case INTEL_MRFLD_SD:
1270 slot->cd_idx = 0;
1271 slot->cd_override_level = true;
1272 /*
1273 * There are two PCB designs of SD card slot with the opposite
1274 * card detection sense. Quirk this out by ignoring GPIO state
1275 * completely in the custom ->get_cd() callback.
1276 */
1277 slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1278 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1279 break;
1280 case INTEL_MRFLD_SDIO:
1281 /* Advertise 2.0v for compatibility with the SDIO card's OCR */
1282 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1283 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1284 MMC_CAP_POWER_OFF_CARD;
1285 break;
1286 default:
1287 return -ENODEV;
1288 }
1289
1290 intel_mrfld_mmc_fix_up_power_slot(slot);
1291 return 0;
1292 }
1293
1294 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1295 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1296 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
1297 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1298 .allow_runtime_pm = true,
1299 .probe_slot = intel_mrfld_mmc_probe_slot,
1300 };
1301
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1302 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1303 {
1304 u8 scratch;
1305 int ret;
1306
1307 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1308 if (ret)
1309 return ret;
1310
1311 /*
1312 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1313 * [bit 1:2] and enable over current debouncing [bit 6].
1314 */
1315 if (on)
1316 scratch |= 0x47;
1317 else
1318 scratch &= ~0x47;
1319
1320 return pci_write_config_byte(chip->pdev, 0xAE, scratch);
1321 }
1322
jmicron_probe(struct sdhci_pci_chip * chip)1323 static int jmicron_probe(struct sdhci_pci_chip *chip)
1324 {
1325 int ret;
1326 u16 mmcdev = 0;
1327
1328 if (chip->pdev->revision == 0) {
1329 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1330 SDHCI_QUIRK_32BIT_DMA_SIZE |
1331 SDHCI_QUIRK_32BIT_ADMA_SIZE |
1332 SDHCI_QUIRK_RESET_AFTER_REQUEST |
1333 SDHCI_QUIRK_BROKEN_SMALL_PIO;
1334 }
1335
1336 /*
1337 * JMicron chips can have two interfaces to the same hardware
1338 * in order to work around limitations in Microsoft's driver.
1339 * We need to make sure we only bind to one of them.
1340 *
1341 * This code assumes two things:
1342 *
1343 * 1. The PCI code adds subfunctions in order.
1344 *
1345 * 2. The MMC interface has a lower subfunction number
1346 * than the SD interface.
1347 */
1348 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1349 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1350 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1351 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1352
1353 if (mmcdev) {
1354 struct pci_dev *sd_dev;
1355
1356 sd_dev = NULL;
1357 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1358 mmcdev, sd_dev)) != NULL) {
1359 if ((PCI_SLOT(chip->pdev->devfn) ==
1360 PCI_SLOT(sd_dev->devfn)) &&
1361 (chip->pdev->bus == sd_dev->bus))
1362 break;
1363 }
1364
1365 if (sd_dev) {
1366 pci_dev_put(sd_dev);
1367 dev_info(&chip->pdev->dev, "Refusing to bind to "
1368 "secondary interface.\n");
1369 return -ENODEV;
1370 }
1371 }
1372
1373 /*
1374 * JMicron chips need a bit of a nudge to enable the power
1375 * output pins.
1376 */
1377 ret = jmicron_pmos(chip, 1);
1378 if (ret) {
1379 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1380 return ret;
1381 }
1382
1383 /* quirk for unsable RO-detection on JM388 chips */
1384 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1385 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1386 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
1387
1388 return 0;
1389 }
1390
jmicron_enable_mmc(struct sdhci_host * host,int on)1391 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1392 {
1393 u8 scratch;
1394
1395 scratch = readb(host->ioaddr + 0xC0);
1396
1397 if (on)
1398 scratch |= 0x01;
1399 else
1400 scratch &= ~0x01;
1401
1402 writeb(scratch, host->ioaddr + 0xC0);
1403 }
1404
jmicron_probe_slot(struct sdhci_pci_slot * slot)1405 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1406 {
1407 if (slot->chip->pdev->revision == 0) {
1408 u16 version;
1409
1410 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1411 version = (version & SDHCI_VENDOR_VER_MASK) >>
1412 SDHCI_VENDOR_VER_SHIFT;
1413
1414 /*
1415 * Older versions of the chip have lots of nasty glitches
1416 * in the ADMA engine. It's best just to avoid it
1417 * completely.
1418 */
1419 if (version < 0xAC)
1420 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1421 }
1422
1423 /* JM388 MMC doesn't support 1.8V while SD supports it */
1424 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1425 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1426 MMC_VDD_29_30 | MMC_VDD_30_31 |
1427 MMC_VDD_165_195; /* allow 1.8V */
1428 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1429 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1430 }
1431
1432 /*
1433 * The secondary interface requires a bit set to get the
1434 * interrupts.
1435 */
1436 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1437 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1438 jmicron_enable_mmc(slot->host, 1);
1439
1440 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1441
1442 return 0;
1443 }
1444
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1445 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1446 {
1447 if (dead)
1448 return;
1449
1450 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1451 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1452 jmicron_enable_mmc(slot->host, 0);
1453 }
1454
1455 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1456 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1457 {
1458 int i, ret;
1459
1460 ret = sdhci_pci_suspend_host(chip);
1461 if (ret)
1462 return ret;
1463
1464 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1465 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1466 for (i = 0; i < chip->num_slots; i++)
1467 jmicron_enable_mmc(chip->slots[i]->host, 0);
1468 }
1469
1470 return 0;
1471 }
1472
jmicron_resume(struct sdhci_pci_chip * chip)1473 static int jmicron_resume(struct sdhci_pci_chip *chip)
1474 {
1475 int ret, i;
1476
1477 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1478 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1479 for (i = 0; i < chip->num_slots; i++)
1480 jmicron_enable_mmc(chip->slots[i]->host, 1);
1481 }
1482
1483 ret = jmicron_pmos(chip, 1);
1484 if (ret) {
1485 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1486 return ret;
1487 }
1488
1489 return sdhci_pci_resume_host(chip);
1490 }
1491 #endif
1492
1493 static const struct sdhci_pci_fixes sdhci_jmicron = {
1494 .probe = jmicron_probe,
1495
1496 .probe_slot = jmicron_probe_slot,
1497 .remove_slot = jmicron_remove_slot,
1498
1499 #ifdef CONFIG_PM_SLEEP
1500 .suspend = jmicron_suspend,
1501 .resume = jmicron_resume,
1502 #endif
1503 };
1504
1505 /* SysKonnect CardBus2SDIO extra registers */
1506 #define SYSKT_CTRL 0x200
1507 #define SYSKT_RDFIFO_STAT 0x204
1508 #define SYSKT_WRFIFO_STAT 0x208
1509 #define SYSKT_POWER_DATA 0x20c
1510 #define SYSKT_POWER_330 0xef
1511 #define SYSKT_POWER_300 0xf8
1512 #define SYSKT_POWER_184 0xcc
1513 #define SYSKT_POWER_CMD 0x20d
1514 #define SYSKT_POWER_START (1 << 7)
1515 #define SYSKT_POWER_STATUS 0x20e
1516 #define SYSKT_POWER_STATUS_OK (1 << 0)
1517 #define SYSKT_BOARD_REV 0x210
1518 #define SYSKT_CHIP_REV 0x211
1519 #define SYSKT_CONF_DATA 0x212
1520 #define SYSKT_CONF_DATA_1V8 (1 << 2)
1521 #define SYSKT_CONF_DATA_2V5 (1 << 1)
1522 #define SYSKT_CONF_DATA_3V3 (1 << 0)
1523
syskt_probe(struct sdhci_pci_chip * chip)1524 static int syskt_probe(struct sdhci_pci_chip *chip)
1525 {
1526 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1527 chip->pdev->class &= ~0x0000FF;
1528 chip->pdev->class |= PCI_SDHCI_IFDMA;
1529 }
1530 return 0;
1531 }
1532
syskt_probe_slot(struct sdhci_pci_slot * slot)1533 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1534 {
1535 int tm, ps;
1536
1537 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1538 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1539 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1540 "board rev %d.%d, chip rev %d.%d\n",
1541 board_rev >> 4, board_rev & 0xf,
1542 chip_rev >> 4, chip_rev & 0xf);
1543 if (chip_rev >= 0x20)
1544 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1545
1546 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1547 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1548 udelay(50);
1549 tm = 10; /* Wait max 1 ms */
1550 do {
1551 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1552 if (ps & SYSKT_POWER_STATUS_OK)
1553 break;
1554 udelay(100);
1555 } while (--tm);
1556 if (!tm) {
1557 dev_err(&slot->chip->pdev->dev,
1558 "power regulator never stabilized");
1559 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1560 return -ENODEV;
1561 }
1562
1563 return 0;
1564 }
1565
1566 static const struct sdhci_pci_fixes sdhci_syskt = {
1567 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1568 .probe = syskt_probe,
1569 .probe_slot = syskt_probe_slot,
1570 };
1571
via_probe(struct sdhci_pci_chip * chip)1572 static int via_probe(struct sdhci_pci_chip *chip)
1573 {
1574 if (chip->pdev->revision == 0x10)
1575 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1576
1577 return 0;
1578 }
1579
1580 static const struct sdhci_pci_fixes sdhci_via = {
1581 .probe = via_probe,
1582 };
1583
rtsx_probe_slot(struct sdhci_pci_slot * slot)1584 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1585 {
1586 slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1587 return 0;
1588 }
1589
1590 static const struct sdhci_pci_fixes sdhci_rtsx = {
1591 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1592 SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1593 SDHCI_QUIRK2_BROKEN_DDR50,
1594 .probe_slot = rtsx_probe_slot,
1595 };
1596
1597 /*AMD chipset generation*/
1598 enum amd_chipset_gen {
1599 AMD_CHIPSET_BEFORE_ML,
1600 AMD_CHIPSET_CZ,
1601 AMD_CHIPSET_NL,
1602 AMD_CHIPSET_UNKNOWN,
1603 };
1604
1605 /* AMD registers */
1606 #define AMD_SD_AUTO_PATTERN 0xB8
1607 #define AMD_MSLEEP_DURATION 4
1608 #define AMD_SD_MISC_CONTROL 0xD0
1609 #define AMD_MAX_TUNE_VALUE 0x0B
1610 #define AMD_AUTO_TUNE_SEL 0x10800
1611 #define AMD_FIFO_PTR 0x30
1612 #define AMD_BIT_MASK 0x1F
1613
amd_tuning_reset(struct sdhci_host * host)1614 static void amd_tuning_reset(struct sdhci_host *host)
1615 {
1616 unsigned int val;
1617
1618 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1619 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1620 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1621
1622 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1623 val &= ~SDHCI_CTRL_EXEC_TUNING;
1624 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1625 }
1626
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1627 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1628 {
1629 unsigned int val;
1630
1631 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1632 val &= ~AMD_BIT_MASK;
1633 val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1634 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1635 }
1636
amd_enable_manual_tuning(struct pci_dev * pdev)1637 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1638 {
1639 unsigned int val;
1640
1641 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1642 val |= AMD_FIFO_PTR;
1643 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1644 }
1645
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1646 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1647 {
1648 struct sdhci_pci_slot *slot = sdhci_priv(host);
1649 struct pci_dev *pdev = slot->chip->pdev;
1650 u8 valid_win = 0;
1651 u8 valid_win_max = 0;
1652 u8 valid_win_end = 0;
1653 u8 ctrl, tune_around;
1654
1655 amd_tuning_reset(host);
1656
1657 for (tune_around = 0; tune_around < 12; tune_around++) {
1658 amd_config_tuning_phase(pdev, tune_around);
1659
1660 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1661 valid_win = 0;
1662 msleep(AMD_MSLEEP_DURATION);
1663 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1664 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1665 } else if (++valid_win > valid_win_max) {
1666 valid_win_max = valid_win;
1667 valid_win_end = tune_around;
1668 }
1669 }
1670
1671 if (!valid_win_max) {
1672 dev_err(&pdev->dev, "no tuning point found\n");
1673 return -EIO;
1674 }
1675
1676 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1677
1678 amd_enable_manual_tuning(pdev);
1679
1680 host->mmc->retune_period = 0;
1681
1682 return 0;
1683 }
1684
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1685 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1686 {
1687 struct sdhci_host *host = mmc_priv(mmc);
1688
1689 /* AMD requires custom HS200 tuning */
1690 if (host->timing == MMC_TIMING_MMC_HS200)
1691 return amd_execute_tuning_hs200(host, opcode);
1692
1693 /* Otherwise perform standard SDHCI tuning */
1694 return sdhci_execute_tuning(mmc, opcode);
1695 }
1696
amd_probe_slot(struct sdhci_pci_slot * slot)1697 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1698 {
1699 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1700
1701 ops->execute_tuning = amd_execute_tuning;
1702
1703 return 0;
1704 }
1705
amd_probe(struct sdhci_pci_chip * chip)1706 static int amd_probe(struct sdhci_pci_chip *chip)
1707 {
1708 struct pci_dev *smbus_dev;
1709 enum amd_chipset_gen gen;
1710
1711 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1712 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1713 if (smbus_dev) {
1714 gen = AMD_CHIPSET_BEFORE_ML;
1715 } else {
1716 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1717 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1718 if (smbus_dev) {
1719 if (smbus_dev->revision < 0x51)
1720 gen = AMD_CHIPSET_CZ;
1721 else
1722 gen = AMD_CHIPSET_NL;
1723 } else {
1724 gen = AMD_CHIPSET_UNKNOWN;
1725 }
1726 }
1727
1728 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1729 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1730
1731 return 0;
1732 }
1733
sdhci_read_present_state(struct sdhci_host * host)1734 static u32 sdhci_read_present_state(struct sdhci_host *host)
1735 {
1736 return sdhci_readl(host, SDHCI_PRESENT_STATE);
1737 }
1738
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1739 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1740 {
1741 struct sdhci_pci_slot *slot = sdhci_priv(host);
1742 struct pci_dev *pdev = slot->chip->pdev;
1743 u32 present_state;
1744
1745 /*
1746 * SDHC 0x7906 requires a hard reset to clear all internal state.
1747 * Otherwise it can get into a bad state where the DATA lines are always
1748 * read as zeros.
1749 */
1750 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1751 pci_clear_master(pdev);
1752
1753 pci_save_state(pdev);
1754
1755 pci_set_power_state(pdev, PCI_D3cold);
1756 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1757 pdev->current_state);
1758 pci_set_power_state(pdev, PCI_D0);
1759
1760 pci_restore_state(pdev);
1761
1762 /*
1763 * SDHCI_RESET_ALL says the card detect logic should not be
1764 * reset, but since we need to reset the entire controller
1765 * we should wait until the card detect logic has stabilized.
1766 *
1767 * This normally takes about 40ms.
1768 */
1769 readx_poll_timeout(
1770 sdhci_read_present_state,
1771 host,
1772 present_state,
1773 present_state & SDHCI_CD_STABLE,
1774 10000,
1775 100000
1776 );
1777 }
1778
1779 return sdhci_reset(host, mask);
1780 }
1781
1782 static const struct sdhci_ops amd_sdhci_pci_ops = {
1783 .set_clock = sdhci_set_clock,
1784 .enable_dma = sdhci_pci_enable_dma,
1785 .set_bus_width = sdhci_set_bus_width,
1786 .reset = amd_sdhci_reset,
1787 .set_uhs_signaling = sdhci_set_uhs_signaling,
1788 };
1789
1790 static const struct sdhci_pci_fixes sdhci_amd = {
1791 .probe = amd_probe,
1792 .ops = &amd_sdhci_pci_ops,
1793 .probe_slot = amd_probe_slot,
1794 };
1795
1796 static const struct pci_device_id pci_ids[] = {
1797 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
1798 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
1799 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1800 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1801 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
1802 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1803 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
1804 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1805 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1806 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
1807 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1808 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
1809 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1810 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1811 SDHCI_PCI_DEVICE(VIA, 95D0, via),
1812 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1813 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
1814 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
1815 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
1816 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
1817 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
1818 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1819 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1820 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1821 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1822 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1823 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1824 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
1825 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1826 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
1827 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
1828 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1829 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
1830 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
1831 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
1832 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1833 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1834 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1835 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1836 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1837 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1838 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
1839 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
1840 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
1841 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
1842 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
1843 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
1844 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
1845 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
1846 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1847 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1848 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
1849 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
1850 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
1851 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
1852 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
1853 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
1854 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
1855 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
1856 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
1857 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
1858 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
1859 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
1860 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
1861 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
1862 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
1863 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
1864 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
1865 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
1866 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
1867 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
1868 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
1869 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
1870 SDHCI_PCI_DEVICE(O2, 8120, o2),
1871 SDHCI_PCI_DEVICE(O2, 8220, o2),
1872 SDHCI_PCI_DEVICE(O2, 8221, o2),
1873 SDHCI_PCI_DEVICE(O2, 8320, o2),
1874 SDHCI_PCI_DEVICE(O2, 8321, o2),
1875 SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
1876 SDHCI_PCI_DEVICE(O2, SDS0, o2),
1877 SDHCI_PCI_DEVICE(O2, SDS1, o2),
1878 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1879 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1880 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1881 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1882 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1883 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1884 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1885 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1886 /* Generic SD host controller */
1887 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1888 { /* end: all zeroes */ },
1889 };
1890
1891 MODULE_DEVICE_TABLE(pci, pci_ids);
1892
1893 /*****************************************************************************\
1894 * *
1895 * SDHCI core callbacks *
1896 * *
1897 \*****************************************************************************/
1898
sdhci_pci_enable_dma(struct sdhci_host * host)1899 int sdhci_pci_enable_dma(struct sdhci_host *host)
1900 {
1901 struct sdhci_pci_slot *slot;
1902 struct pci_dev *pdev;
1903
1904 slot = sdhci_priv(host);
1905 pdev = slot->chip->pdev;
1906
1907 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1908 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1909 (host->flags & SDHCI_USE_SDMA)) {
1910 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1911 "doesn't fully claim to support it.\n");
1912 }
1913
1914 pci_set_master(pdev);
1915
1916 return 0;
1917 }
1918
sdhci_pci_hw_reset(struct sdhci_host * host)1919 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1920 {
1921 struct sdhci_pci_slot *slot = sdhci_priv(host);
1922
1923 if (slot->hw_reset)
1924 slot->hw_reset(host);
1925 }
1926
1927 static const struct sdhci_ops sdhci_pci_ops = {
1928 .set_clock = sdhci_set_clock,
1929 .enable_dma = sdhci_pci_enable_dma,
1930 .set_bus_width = sdhci_set_bus_width,
1931 .reset = sdhci_reset,
1932 .set_uhs_signaling = sdhci_set_uhs_signaling,
1933 .hw_reset = sdhci_pci_hw_reset,
1934 };
1935
1936 /*****************************************************************************\
1937 * *
1938 * Suspend/resume *
1939 * *
1940 \*****************************************************************************/
1941
1942 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)1943 static int sdhci_pci_suspend(struct device *dev)
1944 {
1945 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1946
1947 if (!chip)
1948 return 0;
1949
1950 if (chip->fixes && chip->fixes->suspend)
1951 return chip->fixes->suspend(chip);
1952
1953 return sdhci_pci_suspend_host(chip);
1954 }
1955
sdhci_pci_resume(struct device * dev)1956 static int sdhci_pci_resume(struct device *dev)
1957 {
1958 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1959
1960 if (!chip)
1961 return 0;
1962
1963 if (chip->fixes && chip->fixes->resume)
1964 return chip->fixes->resume(chip);
1965
1966 return sdhci_pci_resume_host(chip);
1967 }
1968 #endif
1969
1970 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)1971 static int sdhci_pci_runtime_suspend(struct device *dev)
1972 {
1973 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1974
1975 if (!chip)
1976 return 0;
1977
1978 if (chip->fixes && chip->fixes->runtime_suspend)
1979 return chip->fixes->runtime_suspend(chip);
1980
1981 return sdhci_pci_runtime_suspend_host(chip);
1982 }
1983
sdhci_pci_runtime_resume(struct device * dev)1984 static int sdhci_pci_runtime_resume(struct device *dev)
1985 {
1986 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1987
1988 if (!chip)
1989 return 0;
1990
1991 if (chip->fixes && chip->fixes->runtime_resume)
1992 return chip->fixes->runtime_resume(chip);
1993
1994 return sdhci_pci_runtime_resume_host(chip);
1995 }
1996 #endif
1997
1998 static const struct dev_pm_ops sdhci_pci_pm_ops = {
1999 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2000 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2001 sdhci_pci_runtime_resume, NULL)
2002 };
2003
2004 /*****************************************************************************\
2005 * *
2006 * Device probing/removal *
2007 * *
2008 \*****************************************************************************/
2009
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2010 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2011 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2012 int slotno)
2013 {
2014 struct sdhci_pci_slot *slot;
2015 struct sdhci_host *host;
2016 int ret, bar = first_bar + slotno;
2017 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2018
2019 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2020 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2021 return ERR_PTR(-ENODEV);
2022 }
2023
2024 if (pci_resource_len(pdev, bar) < 0x100) {
2025 dev_err(&pdev->dev, "Invalid iomem size. You may "
2026 "experience problems.\n");
2027 }
2028
2029 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2030 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2031 return ERR_PTR(-ENODEV);
2032 }
2033
2034 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2035 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2036 return ERR_PTR(-ENODEV);
2037 }
2038
2039 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2040 if (IS_ERR(host)) {
2041 dev_err(&pdev->dev, "cannot allocate host\n");
2042 return ERR_CAST(host);
2043 }
2044
2045 slot = sdhci_priv(host);
2046
2047 slot->chip = chip;
2048 slot->host = host;
2049 slot->cd_idx = -1;
2050
2051 host->hw_name = "PCI";
2052 host->ops = chip->fixes && chip->fixes->ops ?
2053 chip->fixes->ops :
2054 &sdhci_pci_ops;
2055 host->quirks = chip->quirks;
2056 host->quirks2 = chip->quirks2;
2057
2058 host->irq = pdev->irq;
2059
2060 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2061 if (ret) {
2062 dev_err(&pdev->dev, "cannot request region\n");
2063 goto cleanup;
2064 }
2065
2066 host->ioaddr = pcim_iomap_table(pdev)[bar];
2067
2068 if (chip->fixes && chip->fixes->probe_slot) {
2069 ret = chip->fixes->probe_slot(slot);
2070 if (ret)
2071 goto cleanup;
2072 }
2073
2074 host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2075 host->mmc->slotno = slotno;
2076 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2077
2078 if (device_can_wakeup(&pdev->dev))
2079 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2080
2081 if (host->mmc->caps & MMC_CAP_CD_WAKE)
2082 device_init_wakeup(&pdev->dev, true);
2083
2084 if (slot->cd_idx >= 0) {
2085 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2086 slot->cd_override_level, 0);
2087 if (ret && ret != -EPROBE_DEFER)
2088 ret = mmc_gpiod_request_cd(host->mmc, NULL,
2089 slot->cd_idx,
2090 slot->cd_override_level,
2091 0);
2092 if (ret == -EPROBE_DEFER)
2093 goto remove;
2094
2095 if (ret) {
2096 dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2097 slot->cd_idx = -1;
2098 }
2099 }
2100
2101 if (chip->fixes && chip->fixes->add_host)
2102 ret = chip->fixes->add_host(slot);
2103 else
2104 ret = sdhci_add_host(host);
2105 if (ret)
2106 goto remove;
2107
2108 /*
2109 * Check if the chip needs a separate GPIO for card detect to wake up
2110 * from runtime suspend. If it is not there, don't allow runtime PM.
2111 */
2112 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2113 chip->allow_runtime_pm = false;
2114
2115 return slot;
2116
2117 remove:
2118 if (chip->fixes && chip->fixes->remove_slot)
2119 chip->fixes->remove_slot(slot, 0);
2120
2121 cleanup:
2122 sdhci_free_host(host);
2123
2124 return ERR_PTR(ret);
2125 }
2126
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2127 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2128 {
2129 int dead;
2130 u32 scratch;
2131
2132 dead = 0;
2133 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2134 if (scratch == (u32)-1)
2135 dead = 1;
2136
2137 sdhci_remove_host(slot->host, dead);
2138
2139 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2140 slot->chip->fixes->remove_slot(slot, dead);
2141
2142 sdhci_free_host(slot->host);
2143 }
2144
sdhci_pci_runtime_pm_allow(struct device * dev)2145 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2146 {
2147 pm_suspend_ignore_children(dev, 1);
2148 pm_runtime_set_autosuspend_delay(dev, 50);
2149 pm_runtime_use_autosuspend(dev);
2150 pm_runtime_allow(dev);
2151 /* Stay active until mmc core scans for a card */
2152 pm_runtime_put_noidle(dev);
2153 }
2154
sdhci_pci_runtime_pm_forbid(struct device * dev)2155 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2156 {
2157 pm_runtime_forbid(dev);
2158 pm_runtime_get_noresume(dev);
2159 }
2160
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2161 static int sdhci_pci_probe(struct pci_dev *pdev,
2162 const struct pci_device_id *ent)
2163 {
2164 struct sdhci_pci_chip *chip;
2165 struct sdhci_pci_slot *slot;
2166
2167 u8 slots, first_bar;
2168 int ret, i;
2169
2170 BUG_ON(pdev == NULL);
2171 BUG_ON(ent == NULL);
2172
2173 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2174 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2175
2176 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2177 if (ret)
2178 return ret;
2179
2180 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2181 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2182
2183 BUG_ON(slots > MAX_SLOTS);
2184
2185 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2186 if (ret)
2187 return ret;
2188
2189 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2190
2191 if (first_bar > 5) {
2192 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2193 return -ENODEV;
2194 }
2195
2196 ret = pcim_enable_device(pdev);
2197 if (ret)
2198 return ret;
2199
2200 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2201 if (!chip)
2202 return -ENOMEM;
2203
2204 chip->pdev = pdev;
2205 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2206 if (chip->fixes) {
2207 chip->quirks = chip->fixes->quirks;
2208 chip->quirks2 = chip->fixes->quirks2;
2209 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2210 }
2211 chip->num_slots = slots;
2212 chip->pm_retune = true;
2213 chip->rpm_retune = true;
2214
2215 pci_set_drvdata(pdev, chip);
2216
2217 if (chip->fixes && chip->fixes->probe) {
2218 ret = chip->fixes->probe(chip);
2219 if (ret)
2220 return ret;
2221 }
2222
2223 slots = chip->num_slots; /* Quirk may have changed this */
2224
2225 for (i = 0; i < slots; i++) {
2226 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2227 if (IS_ERR(slot)) {
2228 for (i--; i >= 0; i--)
2229 sdhci_pci_remove_slot(chip->slots[i]);
2230 return PTR_ERR(slot);
2231 }
2232
2233 chip->slots[i] = slot;
2234 }
2235
2236 if (chip->allow_runtime_pm)
2237 sdhci_pci_runtime_pm_allow(&pdev->dev);
2238
2239 return 0;
2240 }
2241
sdhci_pci_remove(struct pci_dev * pdev)2242 static void sdhci_pci_remove(struct pci_dev *pdev)
2243 {
2244 int i;
2245 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2246
2247 if (chip->allow_runtime_pm)
2248 sdhci_pci_runtime_pm_forbid(&pdev->dev);
2249
2250 for (i = 0; i < chip->num_slots; i++)
2251 sdhci_pci_remove_slot(chip->slots[i]);
2252 }
2253
2254 static struct pci_driver sdhci_driver = {
2255 .name = "sdhci-pci",
2256 .id_table = pci_ids,
2257 .probe = sdhci_pci_probe,
2258 .remove = sdhci_pci_remove,
2259 .driver = {
2260 .pm = &sdhci_pci_pm_ops
2261 },
2262 };
2263
2264 module_pci_driver(sdhci_driver);
2265
2266 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2267 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2268 MODULE_LICENSE("GPL");
2269