1 /*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34
35 #include "core.h"
36 #include "bus.h"
37 #include "host.h"
38 #include "sdio_bus.h"
39
40 #include "mmc_ops.h"
41 #include "sd_ops.h"
42 #include "sdio_ops.h"
43
44 static struct workqueue_struct *workqueue;
45
46 /*
47 * Enabling software CRCs on the data blocks can be a significant (30%)
48 * performance cost, and for other reasons may not always be desired.
49 * So we allow it it to be disabled.
50 */
51 bool use_spi_crc = 1;
52 module_param(use_spi_crc, bool, 0);
53
54 /*
55 * We normally treat cards as removed during suspend if they are not
56 * known to be on a non-removable bus, to avoid the risk of writing
57 * back data to a different card after resume. Allow this to be
58 * overridden if necessary.
59 */
60 #ifdef CONFIG_MMC_UNSAFE_RESUME
61 bool mmc_assume_removable;
62 #else
63 bool mmc_assume_removable = 1;
64 #endif
65 EXPORT_SYMBOL(mmc_assume_removable);
66 module_param_named(removable, mmc_assume_removable, bool, 0644);
67 MODULE_PARM_DESC(
68 removable,
69 "MMC/SD cards are removable and may be removed during suspend");
70
71 /*
72 * Internal function. Schedule delayed work in the MMC work queue.
73 */
mmc_schedule_delayed_work(struct delayed_work * work,unsigned long delay)74 static int mmc_schedule_delayed_work(struct delayed_work *work,
75 unsigned long delay)
76 {
77 return queue_delayed_work(workqueue, work, delay);
78 }
79
80 /*
81 * Internal function. Flush all scheduled work from the MMC work queue.
82 */
mmc_flush_scheduled_work(void)83 static void mmc_flush_scheduled_work(void)
84 {
85 flush_workqueue(workqueue);
86 }
87
88 #ifdef CONFIG_FAIL_MMC_REQUEST
89
90 /*
91 * Internal function. Inject random data errors.
92 * If mmc_data is NULL no errors are injected.
93 */
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)94 static void mmc_should_fail_request(struct mmc_host *host,
95 struct mmc_request *mrq)
96 {
97 struct mmc_command *cmd = mrq->cmd;
98 struct mmc_data *data = mrq->data;
99 static const int data_errors[] = {
100 -ETIMEDOUT,
101 -EILSEQ,
102 -EIO,
103 };
104
105 if (!data)
106 return;
107
108 if (cmd->error || data->error ||
109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
110 return;
111
112 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
113 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
114 }
115
116 #else /* CONFIG_FAIL_MMC_REQUEST */
117
mmc_should_fail_request(struct mmc_host * host,struct mmc_request * mrq)118 static inline void mmc_should_fail_request(struct mmc_host *host,
119 struct mmc_request *mrq)
120 {
121 }
122
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
124
125 /**
126 * mmc_request_done - finish processing an MMC request
127 * @host: MMC host which completed request
128 * @mrq: MMC request which request
129 *
130 * MMC drivers should call this function when they have completed
131 * their processing of a request.
132 */
mmc_request_done(struct mmc_host * host,struct mmc_request * mrq)133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134 {
135 struct mmc_command *cmd = mrq->cmd;
136 int err = cmd->error;
137
138 if (err && cmd->retries && mmc_host_is_spi(host)) {
139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
140 cmd->retries = 0;
141 }
142
143 if (err && cmd->retries && !mmc_card_removed(host->card)) {
144 /*
145 * Request starter must handle retries - see
146 * mmc_wait_for_req_done().
147 */
148 if (mrq->done)
149 mrq->done(mrq);
150 } else {
151 mmc_should_fail_request(host, mrq);
152
153 led_trigger_event(host->led, LED_OFF);
154
155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 mmc_hostname(host), cmd->opcode, err,
157 cmd->resp[0], cmd->resp[1],
158 cmd->resp[2], cmd->resp[3]);
159
160 if (mrq->data) {
161 pr_debug("%s: %d bytes transferred: %d\n",
162 mmc_hostname(host),
163 mrq->data->bytes_xfered, mrq->data->error);
164 }
165
166 if (mrq->stop) {
167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host), mrq->stop->opcode,
169 mrq->stop->error,
170 mrq->stop->resp[0], mrq->stop->resp[1],
171 mrq->stop->resp[2], mrq->stop->resp[3]);
172 }
173
174 if (mrq->done)
175 mrq->done(mrq);
176
177 mmc_host_clk_release(host);
178 }
179 }
180
181 EXPORT_SYMBOL(mmc_request_done);
182
183 static void
mmc_start_request(struct mmc_host * host,struct mmc_request * mrq)184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
185 {
186 #ifdef CONFIG_MMC_DEBUG
187 unsigned int i, sz;
188 struct scatterlist *sg;
189 #endif
190
191 if (mrq->sbc) {
192 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
193 mmc_hostname(host), mrq->sbc->opcode,
194 mrq->sbc->arg, mrq->sbc->flags);
195 }
196
197 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
198 mmc_hostname(host), mrq->cmd->opcode,
199 mrq->cmd->arg, mrq->cmd->flags);
200
201 if (mrq->data) {
202 pr_debug("%s: blksz %d blocks %d flags %08x "
203 "tsac %d ms nsac %d\n",
204 mmc_hostname(host), mrq->data->blksz,
205 mrq->data->blocks, mrq->data->flags,
206 mrq->data->timeout_ns / 1000000,
207 mrq->data->timeout_clks);
208 }
209
210 if (mrq->stop) {
211 pr_debug("%s: CMD%u arg %08x flags %08x\n",
212 mmc_hostname(host), mrq->stop->opcode,
213 mrq->stop->arg, mrq->stop->flags);
214 }
215
216 WARN_ON(!host->claimed);
217
218 mrq->cmd->error = 0;
219 mrq->cmd->mrq = mrq;
220 if (mrq->data) {
221 BUG_ON(mrq->data->blksz > host->max_blk_size);
222 BUG_ON(mrq->data->blocks > host->max_blk_count);
223 BUG_ON(mrq->data->blocks * mrq->data->blksz >
224 host->max_req_size);
225
226 #ifdef CONFIG_MMC_DEBUG
227 sz = 0;
228 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
229 sz += sg->length;
230 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
231 #endif
232
233 mrq->cmd->data = mrq->data;
234 mrq->data->error = 0;
235 mrq->data->mrq = mrq;
236 if (mrq->stop) {
237 mrq->data->stop = mrq->stop;
238 mrq->stop->error = 0;
239 mrq->stop->mrq = mrq;
240 }
241 }
242 mmc_host_clk_hold(host);
243 led_trigger_event(host->led, LED_FULL);
244 host->ops->request(host, mrq);
245 }
246
mmc_wait_done(struct mmc_request * mrq)247 static void mmc_wait_done(struct mmc_request *mrq)
248 {
249 complete(&mrq->completion);
250 }
251
__mmc_start_req(struct mmc_host * host,struct mmc_request * mrq)252 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
253 {
254 init_completion(&mrq->completion);
255 mrq->done = mmc_wait_done;
256 if (mmc_card_removed(host->card)) {
257 mrq->cmd->error = -ENOMEDIUM;
258 complete(&mrq->completion);
259 return -ENOMEDIUM;
260 }
261 mmc_start_request(host, mrq);
262 return 0;
263 }
264
mmc_wait_for_req_done(struct mmc_host * host,struct mmc_request * mrq)265 static void mmc_wait_for_req_done(struct mmc_host *host,
266 struct mmc_request *mrq)
267 {
268 struct mmc_command *cmd;
269
270 while (1) {
271 wait_for_completion(&mrq->completion);
272
273 cmd = mrq->cmd;
274 if (!cmd->error || !cmd->retries ||
275 mmc_card_removed(host->card))
276 break;
277
278 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
279 mmc_hostname(host), cmd->opcode, cmd->error);
280 cmd->retries--;
281 cmd->error = 0;
282 host->ops->request(host, mrq);
283 }
284 }
285
286 /**
287 * mmc_pre_req - Prepare for a new request
288 * @host: MMC host to prepare command
289 * @mrq: MMC request to prepare for
290 * @is_first_req: true if there is no previous started request
291 * that may run in parellel to this call, otherwise false
292 *
293 * mmc_pre_req() is called in prior to mmc_start_req() to let
294 * host prepare for the new request. Preparation of a request may be
295 * performed while another request is running on the host.
296 */
mmc_pre_req(struct mmc_host * host,struct mmc_request * mrq,bool is_first_req)297 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
298 bool is_first_req)
299 {
300 if (host->ops->pre_req) {
301 mmc_host_clk_hold(host);
302 host->ops->pre_req(host, mrq, is_first_req);
303 mmc_host_clk_release(host);
304 }
305 }
306
307 /**
308 * mmc_post_req - Post process a completed request
309 * @host: MMC host to post process command
310 * @mrq: MMC request to post process for
311 * @err: Error, if non zero, clean up any resources made in pre_req
312 *
313 * Let the host post process a completed request. Post processing of
314 * a request may be performed while another reuqest is running.
315 */
mmc_post_req(struct mmc_host * host,struct mmc_request * mrq,int err)316 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
317 int err)
318 {
319 if (host->ops->post_req) {
320 mmc_host_clk_hold(host);
321 host->ops->post_req(host, mrq, err);
322 mmc_host_clk_release(host);
323 }
324 }
325
326 /**
327 * mmc_start_req - start a non-blocking request
328 * @host: MMC host to start command
329 * @areq: async request to start
330 * @error: out parameter returns 0 for success, otherwise non zero
331 *
332 * Start a new MMC custom command request for a host.
333 * If there is on ongoing async request wait for completion
334 * of that request and start the new one and return.
335 * Does not wait for the new request to complete.
336 *
337 * Returns the completed request, NULL in case of none completed.
338 * Wait for the an ongoing request (previoulsy started) to complete and
339 * return the completed request. If there is no ongoing request, NULL
340 * is returned without waiting. NULL is not an error condition.
341 */
mmc_start_req(struct mmc_host * host,struct mmc_async_req * areq,int * error)342 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
343 struct mmc_async_req *areq, int *error)
344 {
345 int err = 0;
346 int start_err = 0;
347 struct mmc_async_req *data = host->areq;
348
349 /* Prepare a new request */
350 if (areq)
351 mmc_pre_req(host, areq->mrq, !host->areq);
352
353 if (host->areq) {
354 mmc_wait_for_req_done(host, host->areq->mrq);
355 err = host->areq->err_check(host->card, host->areq);
356 }
357
358 if (!err && areq)
359 start_err = __mmc_start_req(host, areq->mrq);
360
361 if (host->areq)
362 mmc_post_req(host, host->areq->mrq, 0);
363
364 /* Cancel a prepared request if it was not started. */
365 if ((err || start_err) && areq)
366 mmc_post_req(host, areq->mrq, -EINVAL);
367
368 if (err)
369 host->areq = NULL;
370 else
371 host->areq = areq;
372
373 if (error)
374 *error = err;
375 return data;
376 }
377 EXPORT_SYMBOL(mmc_start_req);
378
379 /**
380 * mmc_wait_for_req - start a request and wait for completion
381 * @host: MMC host to start command
382 * @mrq: MMC request to start
383 *
384 * Start a new MMC custom command request for a host, and wait
385 * for the command to complete. Does not attempt to parse the
386 * response.
387 */
mmc_wait_for_req(struct mmc_host * host,struct mmc_request * mrq)388 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
389 {
390 __mmc_start_req(host, mrq);
391 mmc_wait_for_req_done(host, mrq);
392 }
393 EXPORT_SYMBOL(mmc_wait_for_req);
394
395 /**
396 * mmc_interrupt_hpi - Issue for High priority Interrupt
397 * @card: the MMC card associated with the HPI transfer
398 *
399 * Issued High Priority Interrupt, and check for card status
400 * util out-of prg-state.
401 */
mmc_interrupt_hpi(struct mmc_card * card)402 int mmc_interrupt_hpi(struct mmc_card *card)
403 {
404 int err;
405 u32 status;
406
407 BUG_ON(!card);
408
409 if (!card->ext_csd.hpi_en) {
410 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
411 return 1;
412 }
413
414 mmc_claim_host(card->host);
415 err = mmc_send_status(card, &status);
416 if (err) {
417 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
418 goto out;
419 }
420
421 /*
422 * If the card status is in PRG-state, we can send the HPI command.
423 */
424 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
425 do {
426 /*
427 * We don't know when the HPI command will finish
428 * processing, so we need to resend HPI until out
429 * of prg-state, and keep checking the card status
430 * with SEND_STATUS. If a timeout error occurs when
431 * sending the HPI command, we are already out of
432 * prg-state.
433 */
434 err = mmc_send_hpi_cmd(card, &status);
435 if (err)
436 pr_debug("%s: abort HPI (%d error)\n",
437 mmc_hostname(card->host), err);
438
439 err = mmc_send_status(card, &status);
440 if (err)
441 break;
442 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
443 } else
444 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
445
446 out:
447 mmc_release_host(card->host);
448 return err;
449 }
450 EXPORT_SYMBOL(mmc_interrupt_hpi);
451
452 /**
453 * mmc_wait_for_cmd - start a command and wait for completion
454 * @host: MMC host to start command
455 * @cmd: MMC command to start
456 * @retries: maximum number of retries
457 *
458 * Start a new MMC command for a host, and wait for the command
459 * to complete. Return any error that occurred while the command
460 * was executing. Do not attempt to parse the response.
461 */
mmc_wait_for_cmd(struct mmc_host * host,struct mmc_command * cmd,int retries)462 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
463 {
464 struct mmc_request mrq = {NULL};
465
466 WARN_ON(!host->claimed);
467
468 memset(cmd->resp, 0, sizeof(cmd->resp));
469 cmd->retries = retries;
470
471 mrq.cmd = cmd;
472 cmd->data = NULL;
473
474 mmc_wait_for_req(host, &mrq);
475
476 return cmd->error;
477 }
478
479 EXPORT_SYMBOL(mmc_wait_for_cmd);
480
481 /**
482 * mmc_set_data_timeout - set the timeout for a data command
483 * @data: data phase for command
484 * @card: the MMC card associated with the data transfer
485 *
486 * Computes the data timeout parameters according to the
487 * correct algorithm given the card type.
488 */
mmc_set_data_timeout(struct mmc_data * data,const struct mmc_card * card)489 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
490 {
491 unsigned int mult;
492
493 /*
494 * SDIO cards only define an upper 1 s limit on access.
495 */
496 if (mmc_card_sdio(card)) {
497 data->timeout_ns = 1000000000;
498 data->timeout_clks = 0;
499 return;
500 }
501
502 /*
503 * SD cards use a 100 multiplier rather than 10
504 */
505 mult = mmc_card_sd(card) ? 100 : 10;
506
507 /*
508 * Scale up the multiplier (and therefore the timeout) by
509 * the r2w factor for writes.
510 */
511 if (data->flags & MMC_DATA_WRITE)
512 mult <<= card->csd.r2w_factor;
513
514 data->timeout_ns = card->csd.tacc_ns * mult;
515 data->timeout_clks = card->csd.tacc_clks * mult;
516
517 /*
518 * SD cards also have an upper limit on the timeout.
519 */
520 if (mmc_card_sd(card)) {
521 unsigned int timeout_us, limit_us;
522
523 timeout_us = data->timeout_ns / 1000;
524 if (mmc_host_clk_rate(card->host))
525 timeout_us += data->timeout_clks * 1000 /
526 (mmc_host_clk_rate(card->host) / 1000);
527
528 if (data->flags & MMC_DATA_WRITE)
529 /*
530 * The MMC spec "It is strongly recommended
531 * for hosts to implement more than 500ms
532 * timeout value even if the card indicates
533 * the 250ms maximum busy length." Even the
534 * previous value of 300ms is known to be
535 * insufficient for some cards.
536 */
537 limit_us = 3000000;
538 else
539 limit_us = 100000;
540
541 /*
542 * SDHC cards always use these fixed values.
543 */
544 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
545 data->timeout_ns = limit_us * 1000;
546 data->timeout_clks = 0;
547 }
548 }
549
550 /*
551 * Some cards require longer data read timeout than indicated in CSD.
552 * Address this by setting the read timeout to a "reasonably high"
553 * value. For the cards tested, 300ms has proven enough. If necessary,
554 * this value can be increased if other problematic cards require this.
555 */
556 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
557 data->timeout_ns = 300000000;
558 data->timeout_clks = 0;
559 }
560
561 /*
562 * Some cards need very high timeouts if driven in SPI mode.
563 * The worst observed timeout was 900ms after writing a
564 * continuous stream of data until the internal logic
565 * overflowed.
566 */
567 if (mmc_host_is_spi(card->host)) {
568 if (data->flags & MMC_DATA_WRITE) {
569 if (data->timeout_ns < 1000000000)
570 data->timeout_ns = 1000000000; /* 1s */
571 } else {
572 if (data->timeout_ns < 100000000)
573 data->timeout_ns = 100000000; /* 100ms */
574 }
575 }
576 }
577 EXPORT_SYMBOL(mmc_set_data_timeout);
578
579 /**
580 * mmc_align_data_size - pads a transfer size to a more optimal value
581 * @card: the MMC card associated with the data transfer
582 * @sz: original transfer size
583 *
584 * Pads the original data size with a number of extra bytes in
585 * order to avoid controller bugs and/or performance hits
586 * (e.g. some controllers revert to PIO for certain sizes).
587 *
588 * Returns the improved size, which might be unmodified.
589 *
590 * Note that this function is only relevant when issuing a
591 * single scatter gather entry.
592 */
mmc_align_data_size(struct mmc_card * card,unsigned int sz)593 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
594 {
595 /*
596 * FIXME: We don't have a system for the controller to tell
597 * the core about its problems yet, so for now we just 32-bit
598 * align the size.
599 */
600 sz = ((sz + 3) / 4) * 4;
601
602 return sz;
603 }
604 EXPORT_SYMBOL(mmc_align_data_size);
605
606 /**
607 * __mmc_claim_host - exclusively claim a host
608 * @host: mmc host to claim
609 * @abort: whether or not the operation should be aborted
610 *
611 * Claim a host for a set of operations. If @abort is non null and
612 * dereference a non-zero value then this will return prematurely with
613 * that non-zero value without acquiring the lock. Returns zero
614 * with the lock held otherwise.
615 */
__mmc_claim_host(struct mmc_host * host,atomic_t * abort)616 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
617 {
618 DECLARE_WAITQUEUE(wait, current);
619 unsigned long flags;
620 int stop;
621
622 might_sleep();
623
624 add_wait_queue(&host->wq, &wait);
625 spin_lock_irqsave(&host->lock, flags);
626 while (1) {
627 set_current_state(TASK_UNINTERRUPTIBLE);
628 stop = abort ? atomic_read(abort) : 0;
629 if (stop || !host->claimed || host->claimer == current)
630 break;
631 spin_unlock_irqrestore(&host->lock, flags);
632 schedule();
633 spin_lock_irqsave(&host->lock, flags);
634 }
635 set_current_state(TASK_RUNNING);
636 if (!stop) {
637 host->claimed = 1;
638 host->claimer = current;
639 host->claim_cnt += 1;
640 } else
641 wake_up(&host->wq);
642 spin_unlock_irqrestore(&host->lock, flags);
643 remove_wait_queue(&host->wq, &wait);
644 if (host->ops->enable && !stop && host->claim_cnt == 1)
645 host->ops->enable(host);
646 return stop;
647 }
648
649 EXPORT_SYMBOL(__mmc_claim_host);
650
651 /**
652 * mmc_try_claim_host - try exclusively to claim a host
653 * @host: mmc host to claim
654 *
655 * Returns %1 if the host is claimed, %0 otherwise.
656 */
mmc_try_claim_host(struct mmc_host * host)657 int mmc_try_claim_host(struct mmc_host *host)
658 {
659 int claimed_host = 0;
660 unsigned long flags;
661
662 spin_lock_irqsave(&host->lock, flags);
663 if (!host->claimed || host->claimer == current) {
664 host->claimed = 1;
665 host->claimer = current;
666 host->claim_cnt += 1;
667 claimed_host = 1;
668 }
669 spin_unlock_irqrestore(&host->lock, flags);
670 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
671 host->ops->enable(host);
672 return claimed_host;
673 }
674 EXPORT_SYMBOL(mmc_try_claim_host);
675
676 /**
677 * mmc_release_host - release a host
678 * @host: mmc host to release
679 *
680 * Release a MMC host, allowing others to claim the host
681 * for their operations.
682 */
mmc_release_host(struct mmc_host * host)683 void mmc_release_host(struct mmc_host *host)
684 {
685 unsigned long flags;
686
687 WARN_ON(!host->claimed);
688
689 if (host->ops->disable && host->claim_cnt == 1)
690 host->ops->disable(host);
691
692 spin_lock_irqsave(&host->lock, flags);
693 if (--host->claim_cnt) {
694 /* Release for nested claim */
695 spin_unlock_irqrestore(&host->lock, flags);
696 } else {
697 host->claimed = 0;
698 host->claimer = NULL;
699 spin_unlock_irqrestore(&host->lock, flags);
700 wake_up(&host->wq);
701 }
702 }
703 EXPORT_SYMBOL(mmc_release_host);
704
705 /*
706 * Internal function that does the actual ios call to the host driver,
707 * optionally printing some debug output.
708 */
mmc_set_ios(struct mmc_host * host)709 static inline void mmc_set_ios(struct mmc_host *host)
710 {
711 struct mmc_ios *ios = &host->ios;
712
713 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
714 "width %u timing %u\n",
715 mmc_hostname(host), ios->clock, ios->bus_mode,
716 ios->power_mode, ios->chip_select, ios->vdd,
717 ios->bus_width, ios->timing);
718
719 if (ios->clock > 0)
720 mmc_set_ungated(host);
721 host->ops->set_ios(host, ios);
722 }
723
724 /*
725 * Control chip select pin on a host.
726 */
mmc_set_chip_select(struct mmc_host * host,int mode)727 void mmc_set_chip_select(struct mmc_host *host, int mode)
728 {
729 mmc_host_clk_hold(host);
730 host->ios.chip_select = mode;
731 mmc_set_ios(host);
732 mmc_host_clk_release(host);
733 }
734
735 /*
736 * Sets the host clock to the highest possible frequency that
737 * is below "hz".
738 */
__mmc_set_clock(struct mmc_host * host,unsigned int hz)739 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
740 {
741 WARN_ON(hz < host->f_min);
742
743 if (hz > host->f_max)
744 hz = host->f_max;
745
746 host->ios.clock = hz;
747 mmc_set_ios(host);
748 }
749
mmc_set_clock(struct mmc_host * host,unsigned int hz)750 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
751 {
752 mmc_host_clk_hold(host);
753 __mmc_set_clock(host, hz);
754 mmc_host_clk_release(host);
755 }
756
757 #ifdef CONFIG_MMC_CLKGATE
758 /*
759 * This gates the clock by setting it to 0 Hz.
760 */
mmc_gate_clock(struct mmc_host * host)761 void mmc_gate_clock(struct mmc_host *host)
762 {
763 unsigned long flags;
764
765 spin_lock_irqsave(&host->clk_lock, flags);
766 host->clk_old = host->ios.clock;
767 host->ios.clock = 0;
768 host->clk_gated = true;
769 spin_unlock_irqrestore(&host->clk_lock, flags);
770 mmc_set_ios(host);
771 }
772
773 /*
774 * This restores the clock from gating by using the cached
775 * clock value.
776 */
mmc_ungate_clock(struct mmc_host * host)777 void mmc_ungate_clock(struct mmc_host *host)
778 {
779 /*
780 * We should previously have gated the clock, so the clock shall
781 * be 0 here! The clock may however be 0 during initialization,
782 * when some request operations are performed before setting
783 * the frequency. When ungate is requested in that situation
784 * we just ignore the call.
785 */
786 if (host->clk_old) {
787 BUG_ON(host->ios.clock);
788 /* This call will also set host->clk_gated to false */
789 __mmc_set_clock(host, host->clk_old);
790 }
791 }
792
mmc_set_ungated(struct mmc_host * host)793 void mmc_set_ungated(struct mmc_host *host)
794 {
795 unsigned long flags;
796
797 /*
798 * We've been given a new frequency while the clock is gated,
799 * so make sure we regard this as ungating it.
800 */
801 spin_lock_irqsave(&host->clk_lock, flags);
802 host->clk_gated = false;
803 spin_unlock_irqrestore(&host->clk_lock, flags);
804 }
805
806 #else
mmc_set_ungated(struct mmc_host * host)807 void mmc_set_ungated(struct mmc_host *host)
808 {
809 }
810 #endif
811
812 /*
813 * Change the bus mode (open drain/push-pull) of a host.
814 */
mmc_set_bus_mode(struct mmc_host * host,unsigned int mode)815 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816 {
817 mmc_host_clk_hold(host);
818 host->ios.bus_mode = mode;
819 mmc_set_ios(host);
820 mmc_host_clk_release(host);
821 }
822
823 /*
824 * Change data bus width of a host.
825 */
mmc_set_bus_width(struct mmc_host * host,unsigned int width)826 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
827 {
828 mmc_host_clk_hold(host);
829 host->ios.bus_width = width;
830 mmc_set_ios(host);
831 mmc_host_clk_release(host);
832 }
833
834 /**
835 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
836 * @vdd: voltage (mV)
837 * @low_bits: prefer low bits in boundary cases
838 *
839 * This function returns the OCR bit number according to the provided @vdd
840 * value. If conversion is not possible a negative errno value returned.
841 *
842 * Depending on the @low_bits flag the function prefers low or high OCR bits
843 * on boundary voltages. For example,
844 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
845 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
846 *
847 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
848 */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)849 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
850 {
851 const int max_bit = ilog2(MMC_VDD_35_36);
852 int bit;
853
854 if (vdd < 1650 || vdd > 3600)
855 return -EINVAL;
856
857 if (vdd >= 1650 && vdd <= 1950)
858 return ilog2(MMC_VDD_165_195);
859
860 if (low_bits)
861 vdd -= 1;
862
863 /* Base 2000 mV, step 100 mV, bit's base 8. */
864 bit = (vdd - 2000) / 100 + 8;
865 if (bit > max_bit)
866 return max_bit;
867 return bit;
868 }
869
870 /**
871 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
872 * @vdd_min: minimum voltage value (mV)
873 * @vdd_max: maximum voltage value (mV)
874 *
875 * This function returns the OCR mask bits according to the provided @vdd_min
876 * and @vdd_max values. If conversion is not possible the function returns 0.
877 *
878 * Notes wrt boundary cases:
879 * This function sets the OCR bits for all boundary voltages, for example
880 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
881 * MMC_VDD_34_35 mask.
882 */
mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)883 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
884 {
885 u32 mask = 0;
886
887 if (vdd_max < vdd_min)
888 return 0;
889
890 /* Prefer high bits for the boundary vdd_max values. */
891 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
892 if (vdd_max < 0)
893 return 0;
894
895 /* Prefer low bits for the boundary vdd_min values. */
896 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
897 if (vdd_min < 0)
898 return 0;
899
900 /* Fill the mask, from max bit to min bit. */
901 while (vdd_max >= vdd_min)
902 mask |= 1 << vdd_max--;
903
904 return mask;
905 }
906 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
907
908 #ifdef CONFIG_REGULATOR
909
910 /**
911 * mmc_regulator_get_ocrmask - return mask of supported voltages
912 * @supply: regulator to use
913 *
914 * This returns either a negative errno, or a mask of voltages that
915 * can be provided to MMC/SD/SDIO devices using the specified voltage
916 * regulator. This would normally be called before registering the
917 * MMC host adapter.
918 */
mmc_regulator_get_ocrmask(struct regulator * supply)919 int mmc_regulator_get_ocrmask(struct regulator *supply)
920 {
921 int result = 0;
922 int count;
923 int i;
924
925 count = regulator_count_voltages(supply);
926 if (count < 0)
927 return count;
928
929 for (i = 0; i < count; i++) {
930 int vdd_uV;
931 int vdd_mV;
932
933 vdd_uV = regulator_list_voltage(supply, i);
934 if (vdd_uV <= 0)
935 continue;
936
937 vdd_mV = vdd_uV / 1000;
938 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
939 }
940
941 return result;
942 }
943 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
944
945 /**
946 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
947 * @mmc: the host to regulate
948 * @supply: regulator to use
949 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
950 *
951 * Returns zero on success, else negative errno.
952 *
953 * MMC host drivers may use this to enable or disable a regulator using
954 * a particular supply voltage. This would normally be called from the
955 * set_ios() method.
956 */
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)957 int mmc_regulator_set_ocr(struct mmc_host *mmc,
958 struct regulator *supply,
959 unsigned short vdd_bit)
960 {
961 int result = 0;
962 int min_uV, max_uV;
963
964 if (vdd_bit) {
965 int tmp;
966 int voltage;
967
968 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
969 * bits this regulator doesn't quite support ... don't
970 * be too picky, most cards and regulators are OK with
971 * a 0.1V range goof (it's a small error percentage).
972 */
973 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
974 if (tmp == 0) {
975 min_uV = 1650 * 1000;
976 max_uV = 1950 * 1000;
977 } else {
978 min_uV = 1900 * 1000 + tmp * 100 * 1000;
979 max_uV = min_uV + 100 * 1000;
980 }
981
982 /* avoid needless changes to this voltage; the regulator
983 * might not allow this operation
984 */
985 voltage = regulator_get_voltage(supply);
986
987 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
988 min_uV = max_uV = voltage;
989
990 if (voltage < 0)
991 result = voltage;
992 else if (voltage < min_uV || voltage > max_uV)
993 result = regulator_set_voltage(supply, min_uV, max_uV);
994 else
995 result = 0;
996
997 if (result == 0 && !mmc->regulator_enabled) {
998 result = regulator_enable(supply);
999 if (!result)
1000 mmc->regulator_enabled = true;
1001 }
1002 } else if (mmc->regulator_enabled) {
1003 result = regulator_disable(supply);
1004 if (result == 0)
1005 mmc->regulator_enabled = false;
1006 }
1007
1008 if (result)
1009 dev_err(mmc_dev(mmc),
1010 "could not set regulator OCR (%d)\n", result);
1011 return result;
1012 }
1013 EXPORT_SYMBOL(mmc_regulator_set_ocr);
1014
1015 #endif /* CONFIG_REGULATOR */
1016
1017 /*
1018 * Mask off any voltages we don't support and select
1019 * the lowest voltage
1020 */
mmc_select_voltage(struct mmc_host * host,u32 ocr)1021 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1022 {
1023 int bit;
1024
1025 ocr &= host->ocr_avail;
1026
1027 bit = ffs(ocr);
1028 if (bit) {
1029 bit -= 1;
1030
1031 ocr &= 3 << bit;
1032
1033 mmc_host_clk_hold(host);
1034 host->ios.vdd = bit;
1035 mmc_set_ios(host);
1036 mmc_host_clk_release(host);
1037 } else {
1038 pr_warning("%s: host doesn't support card's voltages\n",
1039 mmc_hostname(host));
1040 ocr = 0;
1041 }
1042
1043 return ocr;
1044 }
1045
mmc_set_signal_voltage(struct mmc_host * host,int signal_voltage,bool cmd11)1046 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1047 {
1048 struct mmc_command cmd = {0};
1049 int err = 0;
1050
1051 BUG_ON(!host);
1052
1053 /*
1054 * Send CMD11 only if the request is to switch the card to
1055 * 1.8V signalling.
1056 */
1057 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1058 cmd.opcode = SD_SWITCH_VOLTAGE;
1059 cmd.arg = 0;
1060 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1061
1062 err = mmc_wait_for_cmd(host, &cmd, 0);
1063 if (err)
1064 return err;
1065
1066 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1067 return -EIO;
1068 }
1069
1070 host->ios.signal_voltage = signal_voltage;
1071
1072 if (host->ops->start_signal_voltage_switch) {
1073 mmc_host_clk_hold(host);
1074 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1075 mmc_host_clk_release(host);
1076 }
1077
1078 return err;
1079 }
1080
1081 /*
1082 * Select timing parameters for host.
1083 */
mmc_set_timing(struct mmc_host * host,unsigned int timing)1084 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1085 {
1086 mmc_host_clk_hold(host);
1087 host->ios.timing = timing;
1088 mmc_set_ios(host);
1089 mmc_host_clk_release(host);
1090 }
1091
1092 /*
1093 * Select appropriate driver type for host.
1094 */
mmc_set_driver_type(struct mmc_host * host,unsigned int drv_type)1095 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1096 {
1097 mmc_host_clk_hold(host);
1098 host->ios.drv_type = drv_type;
1099 mmc_set_ios(host);
1100 mmc_host_clk_release(host);
1101 }
1102
mmc_poweroff_notify(struct mmc_host * host)1103 static void mmc_poweroff_notify(struct mmc_host *host)
1104 {
1105 struct mmc_card *card;
1106 unsigned int timeout;
1107 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1108 int err = 0;
1109
1110 card = host->card;
1111 mmc_claim_host(host);
1112
1113 /*
1114 * Send power notify command only if card
1115 * is mmc and notify state is powered ON
1116 */
1117 if (card && mmc_card_mmc(card) &&
1118 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1119
1120 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1121 notify_type = EXT_CSD_POWER_OFF_SHORT;
1122 timeout = card->ext_csd.generic_cmd6_time;
1123 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1124 } else {
1125 notify_type = EXT_CSD_POWER_OFF_LONG;
1126 timeout = card->ext_csd.power_off_longtime;
1127 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1128 }
1129
1130 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1131 EXT_CSD_POWER_OFF_NOTIFICATION,
1132 notify_type, timeout);
1133
1134 if (err && err != -EBADMSG)
1135 pr_err("Device failed to respond within %d poweroff "
1136 "time. Forcefully powering down the device\n",
1137 timeout);
1138
1139 /* Set the card state to no notification after the poweroff */
1140 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1141 }
1142 mmc_release_host(host);
1143 }
1144
1145 /*
1146 * Apply power to the MMC stack. This is a two-stage process.
1147 * First, we enable power to the card without the clock running.
1148 * We then wait a bit for the power to stabilise. Finally,
1149 * enable the bus drivers and clock to the card.
1150 *
1151 * We must _NOT_ enable the clock prior to power stablising.
1152 *
1153 * If a host does all the power sequencing itself, ignore the
1154 * initial MMC_POWER_UP stage.
1155 */
mmc_power_up(struct mmc_host * host)1156 static void mmc_power_up(struct mmc_host *host)
1157 {
1158 int bit;
1159
1160 mmc_host_clk_hold(host);
1161
1162 /* If ocr is set, we use it */
1163 if (host->ocr)
1164 bit = ffs(host->ocr) - 1;
1165 else
1166 bit = fls(host->ocr_avail) - 1;
1167
1168 host->ios.vdd = bit;
1169 if (mmc_host_is_spi(host))
1170 host->ios.chip_select = MMC_CS_HIGH;
1171 else
1172 host->ios.chip_select = MMC_CS_DONTCARE;
1173 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1174 host->ios.power_mode = MMC_POWER_UP;
1175 host->ios.bus_width = MMC_BUS_WIDTH_1;
1176 host->ios.timing = MMC_TIMING_LEGACY;
1177 mmc_set_ios(host);
1178
1179 /*
1180 * This delay should be sufficient to allow the power supply
1181 * to reach the minimum voltage.
1182 */
1183 mmc_delay(10);
1184
1185 host->ios.clock = host->f_init;
1186
1187 host->ios.power_mode = MMC_POWER_ON;
1188 mmc_set_ios(host);
1189
1190 /*
1191 * This delay must be at least 74 clock sizes, or 1 ms, or the
1192 * time required to reach a stable voltage.
1193 */
1194 mmc_delay(10);
1195
1196 mmc_host_clk_release(host);
1197 }
1198
mmc_power_off(struct mmc_host * host)1199 void mmc_power_off(struct mmc_host *host)
1200 {
1201 int err = 0;
1202 mmc_host_clk_hold(host);
1203
1204 host->ios.clock = 0;
1205 host->ios.vdd = 0;
1206
1207 /*
1208 * For eMMC 4.5 device send AWAKE command before
1209 * POWER_OFF_NOTIFY command, because in sleep state
1210 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1211 */
1212 if (host->card && mmc_card_is_sleep(host->card) &&
1213 host->bus_ops->resume) {
1214 err = host->bus_ops->resume(host);
1215
1216 if (!err)
1217 mmc_poweroff_notify(host);
1218 else
1219 pr_warning("%s: error %d during resume "
1220 "(continue with poweroff sequence)\n",
1221 mmc_hostname(host), err);
1222 }
1223
1224 /*
1225 * Reset ocr mask to be the highest possible voltage supported for
1226 * this mmc host. This value will be used at next power up.
1227 */
1228 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1229
1230 if (!mmc_host_is_spi(host)) {
1231 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1232 host->ios.chip_select = MMC_CS_DONTCARE;
1233 }
1234 host->ios.power_mode = MMC_POWER_OFF;
1235 host->ios.bus_width = MMC_BUS_WIDTH_1;
1236 host->ios.timing = MMC_TIMING_LEGACY;
1237 mmc_set_ios(host);
1238
1239 /*
1240 * Some configurations, such as the 802.11 SDIO card in the OLPC
1241 * XO-1.5, require a short delay after poweroff before the card
1242 * can be successfully turned on again.
1243 */
1244 mmc_delay(1);
1245
1246 mmc_host_clk_release(host);
1247 }
1248
1249 /*
1250 * Cleanup when the last reference to the bus operator is dropped.
1251 */
__mmc_release_bus(struct mmc_host * host)1252 static void __mmc_release_bus(struct mmc_host *host)
1253 {
1254 BUG_ON(!host);
1255 BUG_ON(host->bus_refs);
1256 BUG_ON(!host->bus_dead);
1257
1258 host->bus_ops = NULL;
1259 }
1260
1261 /*
1262 * Increase reference count of bus operator
1263 */
mmc_bus_get(struct mmc_host * host)1264 static inline void mmc_bus_get(struct mmc_host *host)
1265 {
1266 unsigned long flags;
1267
1268 spin_lock_irqsave(&host->lock, flags);
1269 host->bus_refs++;
1270 spin_unlock_irqrestore(&host->lock, flags);
1271 }
1272
1273 /*
1274 * Decrease reference count of bus operator and free it if
1275 * it is the last reference.
1276 */
mmc_bus_put(struct mmc_host * host)1277 static inline void mmc_bus_put(struct mmc_host *host)
1278 {
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&host->lock, flags);
1282 host->bus_refs--;
1283 if ((host->bus_refs == 0) && host->bus_ops)
1284 __mmc_release_bus(host);
1285 spin_unlock_irqrestore(&host->lock, flags);
1286 }
1287
1288 /*
1289 * Assign a mmc bus handler to a host. Only one bus handler may control a
1290 * host at any given time.
1291 */
mmc_attach_bus(struct mmc_host * host,const struct mmc_bus_ops * ops)1292 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1293 {
1294 unsigned long flags;
1295
1296 BUG_ON(!host);
1297 BUG_ON(!ops);
1298
1299 WARN_ON(!host->claimed);
1300
1301 spin_lock_irqsave(&host->lock, flags);
1302
1303 BUG_ON(host->bus_ops);
1304 BUG_ON(host->bus_refs);
1305
1306 host->bus_ops = ops;
1307 host->bus_refs = 1;
1308 host->bus_dead = 0;
1309
1310 spin_unlock_irqrestore(&host->lock, flags);
1311 }
1312
1313 /*
1314 * Remove the current bus handler from a host.
1315 */
mmc_detach_bus(struct mmc_host * host)1316 void mmc_detach_bus(struct mmc_host *host)
1317 {
1318 unsigned long flags;
1319
1320 BUG_ON(!host);
1321
1322 WARN_ON(!host->claimed);
1323 WARN_ON(!host->bus_ops);
1324
1325 spin_lock_irqsave(&host->lock, flags);
1326
1327 host->bus_dead = 1;
1328
1329 spin_unlock_irqrestore(&host->lock, flags);
1330
1331 mmc_bus_put(host);
1332 }
1333
1334 /**
1335 * mmc_detect_change - process change of state on a MMC socket
1336 * @host: host which changed state.
1337 * @delay: optional delay to wait before detection (jiffies)
1338 *
1339 * MMC drivers should call this when they detect a card has been
1340 * inserted or removed. The MMC layer will confirm that any
1341 * present card is still functional, and initialize any newly
1342 * inserted.
1343 */
mmc_detect_change(struct mmc_host * host,unsigned long delay)1344 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1345 {
1346 #ifdef CONFIG_MMC_DEBUG
1347 unsigned long flags;
1348 spin_lock_irqsave(&host->lock, flags);
1349 WARN_ON(host->removed);
1350 spin_unlock_irqrestore(&host->lock, flags);
1351 #endif
1352 host->detect_change = 1;
1353 mmc_schedule_delayed_work(&host->detect, delay);
1354 }
1355
1356 EXPORT_SYMBOL(mmc_detect_change);
1357
mmc_init_erase(struct mmc_card * card)1358 void mmc_init_erase(struct mmc_card *card)
1359 {
1360 unsigned int sz;
1361
1362 if (is_power_of_2(card->erase_size))
1363 card->erase_shift = ffs(card->erase_size) - 1;
1364 else
1365 card->erase_shift = 0;
1366
1367 /*
1368 * It is possible to erase an arbitrarily large area of an SD or MMC
1369 * card. That is not desirable because it can take a long time
1370 * (minutes) potentially delaying more important I/O, and also the
1371 * timeout calculations become increasingly hugely over-estimated.
1372 * Consequently, 'pref_erase' is defined as a guide to limit erases
1373 * to that size and alignment.
1374 *
1375 * For SD cards that define Allocation Unit size, limit erases to one
1376 * Allocation Unit at a time. For MMC cards that define High Capacity
1377 * Erase Size, whether it is switched on or not, limit to that size.
1378 * Otherwise just have a stab at a good value. For modern cards it
1379 * will end up being 4MiB. Note that if the value is too small, it
1380 * can end up taking longer to erase.
1381 */
1382 if (mmc_card_sd(card) && card->ssr.au) {
1383 card->pref_erase = card->ssr.au;
1384 card->erase_shift = ffs(card->ssr.au) - 1;
1385 } else if (card->ext_csd.hc_erase_size) {
1386 card->pref_erase = card->ext_csd.hc_erase_size;
1387 } else {
1388 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1389 if (sz < 128)
1390 card->pref_erase = 512 * 1024 / 512;
1391 else if (sz < 512)
1392 card->pref_erase = 1024 * 1024 / 512;
1393 else if (sz < 1024)
1394 card->pref_erase = 2 * 1024 * 1024 / 512;
1395 else
1396 card->pref_erase = 4 * 1024 * 1024 / 512;
1397 if (card->pref_erase < card->erase_size)
1398 card->pref_erase = card->erase_size;
1399 else {
1400 sz = card->pref_erase % card->erase_size;
1401 if (sz)
1402 card->pref_erase += card->erase_size - sz;
1403 }
1404 }
1405 }
1406
mmc_mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1407 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1408 unsigned int arg, unsigned int qty)
1409 {
1410 unsigned int erase_timeout;
1411
1412 if (arg == MMC_DISCARD_ARG ||
1413 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1414 erase_timeout = card->ext_csd.trim_timeout;
1415 } else if (card->ext_csd.erase_group_def & 1) {
1416 /* High Capacity Erase Group Size uses HC timeouts */
1417 if (arg == MMC_TRIM_ARG)
1418 erase_timeout = card->ext_csd.trim_timeout;
1419 else
1420 erase_timeout = card->ext_csd.hc_erase_timeout;
1421 } else {
1422 /* CSD Erase Group Size uses write timeout */
1423 unsigned int mult = (10 << card->csd.r2w_factor);
1424 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1425 unsigned int timeout_us;
1426
1427 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1428 if (card->csd.tacc_ns < 1000000)
1429 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1430 else
1431 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1432
1433 /*
1434 * ios.clock is only a target. The real clock rate might be
1435 * less but not that much less, so fudge it by multiplying by 2.
1436 */
1437 timeout_clks <<= 1;
1438 timeout_us += (timeout_clks * 1000) /
1439 (mmc_host_clk_rate(card->host) / 1000);
1440
1441 erase_timeout = timeout_us / 1000;
1442
1443 /*
1444 * Theoretically, the calculation could underflow so round up
1445 * to 1ms in that case.
1446 */
1447 if (!erase_timeout)
1448 erase_timeout = 1;
1449 }
1450
1451 /* Multiplier for secure operations */
1452 if (arg & MMC_SECURE_ARGS) {
1453 if (arg == MMC_SECURE_ERASE_ARG)
1454 erase_timeout *= card->ext_csd.sec_erase_mult;
1455 else
1456 erase_timeout *= card->ext_csd.sec_trim_mult;
1457 }
1458
1459 erase_timeout *= qty;
1460
1461 /*
1462 * Ensure at least a 1 second timeout for SPI as per
1463 * 'mmc_set_data_timeout()'
1464 */
1465 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1466 erase_timeout = 1000;
1467
1468 return erase_timeout;
1469 }
1470
mmc_sd_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1471 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1472 unsigned int arg,
1473 unsigned int qty)
1474 {
1475 unsigned int erase_timeout;
1476
1477 if (card->ssr.erase_timeout) {
1478 /* Erase timeout specified in SD Status Register (SSR) */
1479 erase_timeout = card->ssr.erase_timeout * qty +
1480 card->ssr.erase_offset;
1481 } else {
1482 /*
1483 * Erase timeout not specified in SD Status Register (SSR) so
1484 * use 250ms per write block.
1485 */
1486 erase_timeout = 250 * qty;
1487 }
1488
1489 /* Must not be less than 1 second */
1490 if (erase_timeout < 1000)
1491 erase_timeout = 1000;
1492
1493 return erase_timeout;
1494 }
1495
mmc_erase_timeout(struct mmc_card * card,unsigned int arg,unsigned int qty)1496 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1497 unsigned int arg,
1498 unsigned int qty)
1499 {
1500 if (mmc_card_sd(card))
1501 return mmc_sd_erase_timeout(card, arg, qty);
1502 else
1503 return mmc_mmc_erase_timeout(card, arg, qty);
1504 }
1505
mmc_do_erase(struct mmc_card * card,unsigned int from,unsigned int to,unsigned int arg)1506 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1507 unsigned int to, unsigned int arg)
1508 {
1509 struct mmc_command cmd = {0};
1510 unsigned int qty = 0;
1511 int err;
1512
1513 /*
1514 * qty is used to calculate the erase timeout which depends on how many
1515 * erase groups (or allocation units in SD terminology) are affected.
1516 * We count erasing part of an erase group as one erase group.
1517 * For SD, the allocation units are always a power of 2. For MMC, the
1518 * erase group size is almost certainly also power of 2, but it does not
1519 * seem to insist on that in the JEDEC standard, so we fall back to
1520 * division in that case. SD may not specify an allocation unit size,
1521 * in which case the timeout is based on the number of write blocks.
1522 *
1523 * Note that the timeout for secure trim 2 will only be correct if the
1524 * number of erase groups specified is the same as the total of all
1525 * preceding secure trim 1 commands. Since the power may have been
1526 * lost since the secure trim 1 commands occurred, it is generally
1527 * impossible to calculate the secure trim 2 timeout correctly.
1528 */
1529 if (card->erase_shift)
1530 qty += ((to >> card->erase_shift) -
1531 (from >> card->erase_shift)) + 1;
1532 else if (mmc_card_sd(card))
1533 qty += to - from + 1;
1534 else
1535 qty += ((to / card->erase_size) -
1536 (from / card->erase_size)) + 1;
1537
1538 if (!mmc_card_blockaddr(card)) {
1539 from <<= 9;
1540 to <<= 9;
1541 }
1542
1543 if (mmc_card_sd(card))
1544 cmd.opcode = SD_ERASE_WR_BLK_START;
1545 else
1546 cmd.opcode = MMC_ERASE_GROUP_START;
1547 cmd.arg = from;
1548 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1549 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1550 if (err) {
1551 pr_err("mmc_erase: group start error %d, "
1552 "status %#x\n", err, cmd.resp[0]);
1553 err = -EIO;
1554 goto out;
1555 }
1556
1557 memset(&cmd, 0, sizeof(struct mmc_command));
1558 if (mmc_card_sd(card))
1559 cmd.opcode = SD_ERASE_WR_BLK_END;
1560 else
1561 cmd.opcode = MMC_ERASE_GROUP_END;
1562 cmd.arg = to;
1563 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1564 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1565 if (err) {
1566 pr_err("mmc_erase: group end error %d, status %#x\n",
1567 err, cmd.resp[0]);
1568 err = -EIO;
1569 goto out;
1570 }
1571
1572 memset(&cmd, 0, sizeof(struct mmc_command));
1573 cmd.opcode = MMC_ERASE;
1574 cmd.arg = arg;
1575 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1576 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1577 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1578 if (err) {
1579 pr_err("mmc_erase: erase error %d, status %#x\n",
1580 err, cmd.resp[0]);
1581 err = -EIO;
1582 goto out;
1583 }
1584
1585 if (mmc_host_is_spi(card->host))
1586 goto out;
1587
1588 do {
1589 memset(&cmd, 0, sizeof(struct mmc_command));
1590 cmd.opcode = MMC_SEND_STATUS;
1591 cmd.arg = card->rca << 16;
1592 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1593 /* Do not retry else we can't see errors */
1594 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1595 if (err || (cmd.resp[0] & 0xFDF92000)) {
1596 pr_err("error %d requesting status %#x\n",
1597 err, cmd.resp[0]);
1598 err = -EIO;
1599 goto out;
1600 }
1601 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1602 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1603 out:
1604 return err;
1605 }
1606
1607 /**
1608 * mmc_erase - erase sectors.
1609 * @card: card to erase
1610 * @from: first sector to erase
1611 * @nr: number of sectors to erase
1612 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1613 *
1614 * Caller must claim host before calling this function.
1615 */
mmc_erase(struct mmc_card * card,unsigned int from,unsigned int nr,unsigned int arg)1616 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1617 unsigned int arg)
1618 {
1619 unsigned int rem, to = from + nr;
1620
1621 if (!(card->host->caps & MMC_CAP_ERASE) ||
1622 !(card->csd.cmdclass & CCC_ERASE))
1623 return -EOPNOTSUPP;
1624
1625 if (!card->erase_size)
1626 return -EOPNOTSUPP;
1627
1628 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1629 return -EOPNOTSUPP;
1630
1631 if ((arg & MMC_SECURE_ARGS) &&
1632 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1633 return -EOPNOTSUPP;
1634
1635 if ((arg & MMC_TRIM_ARGS) &&
1636 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1637 return -EOPNOTSUPP;
1638
1639 if (arg == MMC_SECURE_ERASE_ARG) {
1640 if (from % card->erase_size || nr % card->erase_size)
1641 return -EINVAL;
1642 }
1643
1644 if (arg == MMC_ERASE_ARG) {
1645 rem = from % card->erase_size;
1646 if (rem) {
1647 rem = card->erase_size - rem;
1648 from += rem;
1649 if (nr > rem)
1650 nr -= rem;
1651 else
1652 return 0;
1653 }
1654 rem = nr % card->erase_size;
1655 if (rem)
1656 nr -= rem;
1657 }
1658
1659 if (nr == 0)
1660 return 0;
1661
1662 to = from + nr;
1663
1664 if (to <= from)
1665 return -EINVAL;
1666
1667 /* 'from' and 'to' are inclusive */
1668 to -= 1;
1669
1670 return mmc_do_erase(card, from, to, arg);
1671 }
1672 EXPORT_SYMBOL(mmc_erase);
1673
mmc_can_erase(struct mmc_card * card)1674 int mmc_can_erase(struct mmc_card *card)
1675 {
1676 if ((card->host->caps & MMC_CAP_ERASE) &&
1677 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1678 return 1;
1679 return 0;
1680 }
1681 EXPORT_SYMBOL(mmc_can_erase);
1682
mmc_can_trim(struct mmc_card * card)1683 int mmc_can_trim(struct mmc_card *card)
1684 {
1685 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1686 return 1;
1687 return 0;
1688 }
1689 EXPORT_SYMBOL(mmc_can_trim);
1690
mmc_can_discard(struct mmc_card * card)1691 int mmc_can_discard(struct mmc_card *card)
1692 {
1693 /*
1694 * As there's no way to detect the discard support bit at v4.5
1695 * use the s/w feature support filed.
1696 */
1697 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1698 return 1;
1699 return 0;
1700 }
1701 EXPORT_SYMBOL(mmc_can_discard);
1702
mmc_can_sanitize(struct mmc_card * card)1703 int mmc_can_sanitize(struct mmc_card *card)
1704 {
1705 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1706 return 0;
1707 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1708 return 1;
1709 return 0;
1710 }
1711 EXPORT_SYMBOL(mmc_can_sanitize);
1712
mmc_can_secure_erase_trim(struct mmc_card * card)1713 int mmc_can_secure_erase_trim(struct mmc_card *card)
1714 {
1715 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1716 return 1;
1717 return 0;
1718 }
1719 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1720
mmc_erase_group_aligned(struct mmc_card * card,unsigned int from,unsigned int nr)1721 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1722 unsigned int nr)
1723 {
1724 if (!card->erase_size)
1725 return 0;
1726 if (from % card->erase_size || nr % card->erase_size)
1727 return 0;
1728 return 1;
1729 }
1730 EXPORT_SYMBOL(mmc_erase_group_aligned);
1731
mmc_do_calc_max_discard(struct mmc_card * card,unsigned int arg)1732 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1733 unsigned int arg)
1734 {
1735 struct mmc_host *host = card->host;
1736 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1737 unsigned int last_timeout = 0;
1738
1739 if (card->erase_shift)
1740 max_qty = UINT_MAX >> card->erase_shift;
1741 else if (mmc_card_sd(card))
1742 max_qty = UINT_MAX;
1743 else
1744 max_qty = UINT_MAX / card->erase_size;
1745
1746 /* Find the largest qty with an OK timeout */
1747 do {
1748 y = 0;
1749 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1750 timeout = mmc_erase_timeout(card, arg, qty + x);
1751 if (timeout > host->max_discard_to)
1752 break;
1753 if (timeout < last_timeout)
1754 break;
1755 last_timeout = timeout;
1756 y = x;
1757 }
1758 qty += y;
1759 } while (y);
1760
1761 if (!qty)
1762 return 0;
1763
1764 if (qty == 1)
1765 return 1;
1766
1767 /* Convert qty to sectors */
1768 if (card->erase_shift)
1769 max_discard = --qty << card->erase_shift;
1770 else if (mmc_card_sd(card))
1771 max_discard = qty;
1772 else
1773 max_discard = --qty * card->erase_size;
1774
1775 return max_discard;
1776 }
1777
mmc_calc_max_discard(struct mmc_card * card)1778 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1779 {
1780 struct mmc_host *host = card->host;
1781 unsigned int max_discard, max_trim;
1782
1783 if (!host->max_discard_to)
1784 return UINT_MAX;
1785
1786 /*
1787 * Without erase_group_def set, MMC erase timeout depends on clock
1788 * frequence which can change. In that case, the best choice is
1789 * just the preferred erase size.
1790 */
1791 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1792 return card->pref_erase;
1793
1794 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1795 if (mmc_can_trim(card)) {
1796 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1797 if (max_trim < max_discard)
1798 max_discard = max_trim;
1799 } else if (max_discard < card->erase_size) {
1800 max_discard = 0;
1801 }
1802 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1803 mmc_hostname(host), max_discard, host->max_discard_to);
1804 return max_discard;
1805 }
1806 EXPORT_SYMBOL(mmc_calc_max_discard);
1807
mmc_set_blocklen(struct mmc_card * card,unsigned int blocklen)1808 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1809 {
1810 struct mmc_command cmd = {0};
1811
1812 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1813 return 0;
1814
1815 cmd.opcode = MMC_SET_BLOCKLEN;
1816 cmd.arg = blocklen;
1817 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1818 return mmc_wait_for_cmd(card->host, &cmd, 5);
1819 }
1820 EXPORT_SYMBOL(mmc_set_blocklen);
1821
mmc_hw_reset_for_init(struct mmc_host * host)1822 static void mmc_hw_reset_for_init(struct mmc_host *host)
1823 {
1824 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1825 return;
1826 mmc_host_clk_hold(host);
1827 host->ops->hw_reset(host);
1828 mmc_host_clk_release(host);
1829 }
1830
mmc_can_reset(struct mmc_card * card)1831 int mmc_can_reset(struct mmc_card *card)
1832 {
1833 u8 rst_n_function;
1834
1835 if (!mmc_card_mmc(card))
1836 return 0;
1837 rst_n_function = card->ext_csd.rst_n_function;
1838 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1839 return 0;
1840 return 1;
1841 }
1842 EXPORT_SYMBOL(mmc_can_reset);
1843
mmc_do_hw_reset(struct mmc_host * host,int check)1844 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1845 {
1846 struct mmc_card *card = host->card;
1847
1848 if (!host->bus_ops->power_restore)
1849 return -EOPNOTSUPP;
1850
1851 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1852 return -EOPNOTSUPP;
1853
1854 if (!card)
1855 return -EINVAL;
1856
1857 if (!mmc_can_reset(card))
1858 return -EOPNOTSUPP;
1859
1860 mmc_host_clk_hold(host);
1861 mmc_set_clock(host, host->f_init);
1862
1863 host->ops->hw_reset(host);
1864
1865 /* If the reset has happened, then a status command will fail */
1866 if (check) {
1867 struct mmc_command cmd = {0};
1868 int err;
1869
1870 cmd.opcode = MMC_SEND_STATUS;
1871 if (!mmc_host_is_spi(card->host))
1872 cmd.arg = card->rca << 16;
1873 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1874 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1875 if (!err) {
1876 mmc_host_clk_release(host);
1877 return -ENOSYS;
1878 }
1879 }
1880
1881 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1882 if (mmc_host_is_spi(host)) {
1883 host->ios.chip_select = MMC_CS_HIGH;
1884 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1885 } else {
1886 host->ios.chip_select = MMC_CS_DONTCARE;
1887 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1888 }
1889 host->ios.bus_width = MMC_BUS_WIDTH_1;
1890 host->ios.timing = MMC_TIMING_LEGACY;
1891 mmc_set_ios(host);
1892
1893 mmc_host_clk_release(host);
1894
1895 return host->bus_ops->power_restore(host);
1896 }
1897
mmc_hw_reset(struct mmc_host * host)1898 int mmc_hw_reset(struct mmc_host *host)
1899 {
1900 return mmc_do_hw_reset(host, 0);
1901 }
1902 EXPORT_SYMBOL(mmc_hw_reset);
1903
mmc_hw_reset_check(struct mmc_host * host)1904 int mmc_hw_reset_check(struct mmc_host *host)
1905 {
1906 return mmc_do_hw_reset(host, 1);
1907 }
1908 EXPORT_SYMBOL(mmc_hw_reset_check);
1909
mmc_rescan_try_freq(struct mmc_host * host,unsigned freq)1910 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1911 {
1912 host->f_init = freq;
1913
1914 #ifdef CONFIG_MMC_DEBUG
1915 pr_info("%s: %s: trying to init card at %u Hz\n",
1916 mmc_hostname(host), __func__, host->f_init);
1917 #endif
1918 mmc_power_up(host);
1919
1920 /*
1921 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
1922 * do a hardware reset if possible.
1923 */
1924 mmc_hw_reset_for_init(host);
1925
1926 /* Initialization should be done at 3.3 V I/O voltage. */
1927 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
1928
1929 /*
1930 * sdio_reset sends CMD52 to reset card. Since we do not know
1931 * if the card is being re-initialized, just send it. CMD52
1932 * should be ignored by SD/eMMC cards.
1933 */
1934 sdio_reset(host);
1935 mmc_go_idle(host);
1936
1937 mmc_send_if_cond(host, host->ocr_avail);
1938
1939 /* Order's important: probe SDIO, then SD, then MMC */
1940 if (!mmc_attach_sdio(host))
1941 return 0;
1942 if (!mmc_attach_sd(host))
1943 return 0;
1944 if (!mmc_attach_mmc(host))
1945 return 0;
1946
1947 mmc_power_off(host);
1948 return -EIO;
1949 }
1950
_mmc_detect_card_removed(struct mmc_host * host)1951 int _mmc_detect_card_removed(struct mmc_host *host)
1952 {
1953 int ret;
1954
1955 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1956 return 0;
1957
1958 if (!host->card || mmc_card_removed(host->card))
1959 return 1;
1960
1961 ret = host->bus_ops->alive(host);
1962 if (ret) {
1963 mmc_card_set_removed(host->card);
1964 pr_debug("%s: card remove detected\n", mmc_hostname(host));
1965 }
1966
1967 return ret;
1968 }
1969
mmc_detect_card_removed(struct mmc_host * host)1970 int mmc_detect_card_removed(struct mmc_host *host)
1971 {
1972 struct mmc_card *card = host->card;
1973 int ret;
1974
1975 WARN_ON(!host->claimed);
1976
1977 if (!card)
1978 return 1;
1979
1980 ret = mmc_card_removed(card);
1981 /*
1982 * The card will be considered unchanged unless we have been asked to
1983 * detect a change or host requires polling to provide card detection.
1984 */
1985 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1986 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
1987 return ret;
1988
1989 host->detect_change = 0;
1990 if (!ret) {
1991 ret = _mmc_detect_card_removed(host);
1992 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
1993 /*
1994 * Schedule a detect work as soon as possible to let a
1995 * rescan handle the card removal.
1996 */
1997 cancel_delayed_work(&host->detect);
1998 mmc_detect_change(host, 0);
1999 }
2000 }
2001
2002 return ret;
2003 }
2004 EXPORT_SYMBOL(mmc_detect_card_removed);
2005
mmc_rescan(struct work_struct * work)2006 void mmc_rescan(struct work_struct *work)
2007 {
2008 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2009 struct mmc_host *host =
2010 container_of(work, struct mmc_host, detect.work);
2011 int i;
2012
2013 if (host->rescan_disable)
2014 return;
2015
2016 mmc_bus_get(host);
2017
2018 /*
2019 * if there is a _removable_ card registered, check whether it is
2020 * still present
2021 */
2022 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2023 && !(host->caps & MMC_CAP_NONREMOVABLE))
2024 host->bus_ops->detect(host);
2025
2026 host->detect_change = 0;
2027
2028 /*
2029 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2030 * the card is no longer present.
2031 */
2032 mmc_bus_put(host);
2033 mmc_bus_get(host);
2034
2035 /* if there still is a card present, stop here */
2036 if (host->bus_ops != NULL) {
2037 mmc_bus_put(host);
2038 goto out;
2039 }
2040
2041 /*
2042 * Only we can add a new handler, so it's safe to
2043 * release the lock here.
2044 */
2045 mmc_bus_put(host);
2046
2047 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
2048 goto out;
2049
2050 mmc_claim_host(host);
2051 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2052 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2053 break;
2054 if (freqs[i] <= host->f_min)
2055 break;
2056 }
2057 mmc_release_host(host);
2058
2059 out:
2060 if (host->caps & MMC_CAP_NEEDS_POLL)
2061 mmc_schedule_delayed_work(&host->detect, HZ);
2062 }
2063
mmc_start_host(struct mmc_host * host)2064 void mmc_start_host(struct mmc_host *host)
2065 {
2066 mmc_power_off(host);
2067 mmc_detect_change(host, 0);
2068 }
2069
mmc_stop_host(struct mmc_host * host)2070 void mmc_stop_host(struct mmc_host *host)
2071 {
2072 #ifdef CONFIG_MMC_DEBUG
2073 unsigned long flags;
2074 spin_lock_irqsave(&host->lock, flags);
2075 host->removed = 1;
2076 spin_unlock_irqrestore(&host->lock, flags);
2077 #endif
2078
2079 cancel_delayed_work_sync(&host->detect);
2080 mmc_flush_scheduled_work();
2081
2082 /* clear pm flags now and let card drivers set them as needed */
2083 host->pm_flags = 0;
2084
2085 mmc_bus_get(host);
2086 if (host->bus_ops && !host->bus_dead) {
2087 /* Calling bus_ops->remove() with a claimed host can deadlock */
2088 if (host->bus_ops->remove)
2089 host->bus_ops->remove(host);
2090
2091 mmc_claim_host(host);
2092 mmc_detach_bus(host);
2093 mmc_power_off(host);
2094 mmc_release_host(host);
2095 mmc_bus_put(host);
2096 return;
2097 }
2098 mmc_bus_put(host);
2099
2100 BUG_ON(host->card);
2101
2102 mmc_power_off(host);
2103 }
2104
mmc_power_save_host(struct mmc_host * host)2105 int mmc_power_save_host(struct mmc_host *host)
2106 {
2107 int ret = 0;
2108
2109 #ifdef CONFIG_MMC_DEBUG
2110 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2111 #endif
2112
2113 mmc_bus_get(host);
2114
2115 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2116 mmc_bus_put(host);
2117 return -EINVAL;
2118 }
2119
2120 if (host->bus_ops->power_save)
2121 ret = host->bus_ops->power_save(host);
2122
2123 mmc_bus_put(host);
2124
2125 mmc_power_off(host);
2126
2127 return ret;
2128 }
2129 EXPORT_SYMBOL(mmc_power_save_host);
2130
mmc_power_restore_host(struct mmc_host * host)2131 int mmc_power_restore_host(struct mmc_host *host)
2132 {
2133 int ret;
2134
2135 #ifdef CONFIG_MMC_DEBUG
2136 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2137 #endif
2138
2139 mmc_bus_get(host);
2140
2141 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2142 mmc_bus_put(host);
2143 return -EINVAL;
2144 }
2145
2146 mmc_power_up(host);
2147 ret = host->bus_ops->power_restore(host);
2148
2149 mmc_bus_put(host);
2150
2151 return ret;
2152 }
2153 EXPORT_SYMBOL(mmc_power_restore_host);
2154
mmc_card_awake(struct mmc_host * host)2155 int mmc_card_awake(struct mmc_host *host)
2156 {
2157 int err = -ENOSYS;
2158
2159 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2160 return 0;
2161
2162 mmc_bus_get(host);
2163
2164 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2165 err = host->bus_ops->awake(host);
2166
2167 mmc_bus_put(host);
2168
2169 return err;
2170 }
2171 EXPORT_SYMBOL(mmc_card_awake);
2172
mmc_card_sleep(struct mmc_host * host)2173 int mmc_card_sleep(struct mmc_host *host)
2174 {
2175 int err = -ENOSYS;
2176
2177 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2178 return 0;
2179
2180 mmc_bus_get(host);
2181
2182 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2183 err = host->bus_ops->sleep(host);
2184
2185 mmc_bus_put(host);
2186
2187 return err;
2188 }
2189 EXPORT_SYMBOL(mmc_card_sleep);
2190
mmc_card_can_sleep(struct mmc_host * host)2191 int mmc_card_can_sleep(struct mmc_host *host)
2192 {
2193 struct mmc_card *card = host->card;
2194
2195 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2196 return 1;
2197 return 0;
2198 }
2199 EXPORT_SYMBOL(mmc_card_can_sleep);
2200
2201 /*
2202 * Flush the cache to the non-volatile storage.
2203 */
mmc_flush_cache(struct mmc_card * card)2204 int mmc_flush_cache(struct mmc_card *card)
2205 {
2206 struct mmc_host *host = card->host;
2207 int err = 0;
2208
2209 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2210 return err;
2211
2212 if (mmc_card_mmc(card) &&
2213 (card->ext_csd.cache_size > 0) &&
2214 (card->ext_csd.cache_ctrl & 1)) {
2215 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2216 EXT_CSD_FLUSH_CACHE, 1, 0);
2217 if (err)
2218 pr_err("%s: cache flush error %d\n",
2219 mmc_hostname(card->host), err);
2220 }
2221
2222 return err;
2223 }
2224 EXPORT_SYMBOL(mmc_flush_cache);
2225
2226 /*
2227 * Turn the cache ON/OFF.
2228 * Turning the cache OFF shall trigger flushing of the data
2229 * to the non-volatile storage.
2230 */
mmc_cache_ctrl(struct mmc_host * host,u8 enable)2231 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2232 {
2233 struct mmc_card *card = host->card;
2234 unsigned int timeout;
2235 int err = 0;
2236
2237 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2238 mmc_card_is_removable(host))
2239 return err;
2240
2241 mmc_claim_host(host);
2242 if (card && mmc_card_mmc(card) &&
2243 (card->ext_csd.cache_size > 0)) {
2244 enable = !!enable;
2245
2246 if (card->ext_csd.cache_ctrl ^ enable) {
2247 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2248 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2249 EXT_CSD_CACHE_CTRL, enable, timeout);
2250 if (err)
2251 pr_err("%s: cache %s error %d\n",
2252 mmc_hostname(card->host),
2253 enable ? "on" : "off",
2254 err);
2255 else
2256 card->ext_csd.cache_ctrl = enable;
2257 }
2258 }
2259 mmc_release_host(host);
2260
2261 return err;
2262 }
2263 EXPORT_SYMBOL(mmc_cache_ctrl);
2264
2265 #ifdef CONFIG_PM
2266
2267 /**
2268 * mmc_suspend_host - suspend a host
2269 * @host: mmc host
2270 */
mmc_suspend_host(struct mmc_host * host)2271 int mmc_suspend_host(struct mmc_host *host)
2272 {
2273 int err = 0;
2274
2275 cancel_delayed_work(&host->detect);
2276 mmc_flush_scheduled_work();
2277
2278 err = mmc_cache_ctrl(host, 0);
2279 if (err)
2280 goto out;
2281
2282 mmc_bus_get(host);
2283 if (host->bus_ops && !host->bus_dead) {
2284
2285 if (host->bus_ops->suspend)
2286 err = host->bus_ops->suspend(host);
2287
2288 if (err == -ENOSYS || !host->bus_ops->resume) {
2289 /*
2290 * We simply "remove" the card in this case.
2291 * It will be redetected on resume. (Calling
2292 * bus_ops->remove() with a claimed host can
2293 * deadlock.)
2294 */
2295 if (host->bus_ops->remove)
2296 host->bus_ops->remove(host);
2297 mmc_claim_host(host);
2298 mmc_detach_bus(host);
2299 mmc_power_off(host);
2300 mmc_release_host(host);
2301 host->pm_flags = 0;
2302 err = 0;
2303 }
2304 }
2305 mmc_bus_put(host);
2306
2307 if (!err && !mmc_card_keep_power(host))
2308 mmc_power_off(host);
2309
2310 out:
2311 return err;
2312 }
2313
2314 EXPORT_SYMBOL(mmc_suspend_host);
2315
2316 /**
2317 * mmc_resume_host - resume a previously suspended host
2318 * @host: mmc host
2319 */
mmc_resume_host(struct mmc_host * host)2320 int mmc_resume_host(struct mmc_host *host)
2321 {
2322 int err = 0;
2323
2324 mmc_bus_get(host);
2325 if (host->bus_ops && !host->bus_dead) {
2326 if (!mmc_card_keep_power(host)) {
2327 mmc_power_up(host);
2328 mmc_select_voltage(host, host->ocr);
2329 /*
2330 * Tell runtime PM core we just powered up the card,
2331 * since it still believes the card is powered off.
2332 * Note that currently runtime PM is only enabled
2333 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2334 */
2335 if (mmc_card_sdio(host->card) &&
2336 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2337 pm_runtime_disable(&host->card->dev);
2338 pm_runtime_set_active(&host->card->dev);
2339 pm_runtime_enable(&host->card->dev);
2340 }
2341 }
2342 BUG_ON(!host->bus_ops->resume);
2343 err = host->bus_ops->resume(host);
2344 if (err) {
2345 pr_warning("%s: error %d during resume "
2346 "(card was removed?)\n",
2347 mmc_hostname(host), err);
2348 err = 0;
2349 }
2350 }
2351 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2352 mmc_bus_put(host);
2353
2354 return err;
2355 }
2356 EXPORT_SYMBOL(mmc_resume_host);
2357
2358 /* Do the card removal on suspend if card is assumed removeable
2359 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2360 to sync the card.
2361 */
mmc_pm_notify(struct notifier_block * notify_block,unsigned long mode,void * unused)2362 int mmc_pm_notify(struct notifier_block *notify_block,
2363 unsigned long mode, void *unused)
2364 {
2365 struct mmc_host *host = container_of(
2366 notify_block, struct mmc_host, pm_notify);
2367 unsigned long flags;
2368
2369
2370 switch (mode) {
2371 case PM_HIBERNATION_PREPARE:
2372 case PM_SUSPEND_PREPARE:
2373
2374 spin_lock_irqsave(&host->lock, flags);
2375 host->rescan_disable = 1;
2376 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2377 spin_unlock_irqrestore(&host->lock, flags);
2378 cancel_delayed_work_sync(&host->detect);
2379
2380 if (!host->bus_ops || host->bus_ops->suspend)
2381 break;
2382
2383 /* Calling bus_ops->remove() with a claimed host can deadlock */
2384 if (host->bus_ops->remove)
2385 host->bus_ops->remove(host);
2386
2387 mmc_claim_host(host);
2388 mmc_detach_bus(host);
2389 mmc_power_off(host);
2390 mmc_release_host(host);
2391 host->pm_flags = 0;
2392 break;
2393
2394 case PM_POST_SUSPEND:
2395 case PM_POST_HIBERNATION:
2396 case PM_POST_RESTORE:
2397
2398 spin_lock_irqsave(&host->lock, flags);
2399 host->rescan_disable = 0;
2400 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2401 spin_unlock_irqrestore(&host->lock, flags);
2402 mmc_detect_change(host, 0);
2403
2404 }
2405
2406 return 0;
2407 }
2408 #endif
2409
mmc_init(void)2410 static int __init mmc_init(void)
2411 {
2412 int ret;
2413
2414 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2415 if (!workqueue)
2416 return -ENOMEM;
2417
2418 ret = mmc_register_bus();
2419 if (ret)
2420 goto destroy_workqueue;
2421
2422 ret = mmc_register_host_class();
2423 if (ret)
2424 goto unregister_bus;
2425
2426 ret = sdio_register_bus();
2427 if (ret)
2428 goto unregister_host_class;
2429
2430 return 0;
2431
2432 unregister_host_class:
2433 mmc_unregister_host_class();
2434 unregister_bus:
2435 mmc_unregister_bus();
2436 destroy_workqueue:
2437 destroy_workqueue(workqueue);
2438
2439 return ret;
2440 }
2441
mmc_exit(void)2442 static void __exit mmc_exit(void)
2443 {
2444 sdio_unregister_bus();
2445 mmc_unregister_host_class();
2446 mmc_unregister_bus();
2447 destroy_workqueue(workqueue);
2448 }
2449
2450 subsys_initcall(mmc_init);
2451 module_exit(mmc_exit);
2452
2453 MODULE_LICENSE("GPL");
2454