1 /* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * wwang (wei_wang@realsil.com.cn)
20 * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
21 */
22
23 #include <linux/blkdev.h>
24 #include <linux/kthread.h>
25 #include <linux/sched.h>
26
27 #include "rtsx.h"
28 #include "rtsx_scsi.h"
29 #include "rtsx_transport.h"
30 #include "rtsx_chip.h"
31 #include "rtsx_card.h"
32 #include "debug.h"
33
34 /***********************************************************************
35 * Scatter-gather transfer buffer access routines
36 ***********************************************************************/
37
38 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
39 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
40 * points to a list of s-g entries and we ignore srb->request_bufflen.
41 * For non-scatter-gather transfers, srb->request_buffer points to the
42 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
43 * Update the *index and *offset variables so that the next copy will
44 * pick up from where this one left off. */
45
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)46 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
47 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
48 unsigned int *offset, enum xfer_buf_dir dir)
49 {
50 unsigned int cnt;
51
52 /* If not using scatter-gather, just transfer the data directly.
53 * Make certain it will fit in the available buffer space. */
54 if (scsi_sg_count(srb) == 0) {
55 if (*offset >= scsi_bufflen(srb))
56 return 0;
57 cnt = min(buflen, scsi_bufflen(srb) - *offset);
58 if (dir == TO_XFER_BUF)
59 memcpy((unsigned char *) scsi_sglist(srb) + *offset,
60 buffer, cnt);
61 else
62 memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
63 *offset, cnt);
64 *offset += cnt;
65
66 /* Using scatter-gather. We have to go through the list one entry
67 * at a time. Each s-g entry contains some number of pages, and
68 * each page has to be kmap()'ed separately. If the page is already
69 * in kernel-addressable memory then kmap() will return its address.
70 * If the page is not directly accessible -- such as a user buffer
71 * located in high memory -- then kmap() will map it to a temporary
72 * position in the kernel's virtual address space. */
73 } else {
74 struct scatterlist *sg =
75 (struct scatterlist *) scsi_sglist(srb)
76 + *index;
77
78 /* This loop handles a single s-g list entry, which may
79 * include multiple pages. Find the initial page structure
80 * and the starting offset within the page, and update
81 * the *offset and *index values for the next loop. */
82 cnt = 0;
83 while (cnt < buflen && *index < scsi_sg_count(srb)) {
84 struct page *page = sg_page(sg) +
85 ((sg->offset + *offset) >> PAGE_SHIFT);
86 unsigned int poff =
87 (sg->offset + *offset) & (PAGE_SIZE-1);
88 unsigned int sglen = sg->length - *offset;
89
90 if (sglen > buflen - cnt) {
91
92 /* Transfer ends within this s-g entry */
93 sglen = buflen - cnt;
94 *offset += sglen;
95 } else {
96
97 /* Transfer continues to next s-g entry */
98 *offset = 0;
99 ++*index;
100 ++sg;
101 }
102
103 /* Transfer the data for all the pages in this
104 * s-g entry. For each page: call kmap(), do the
105 * transfer, and call kunmap() immediately after. */
106 while (sglen > 0) {
107 unsigned int plen = min(sglen, (unsigned int)
108 PAGE_SIZE - poff);
109 unsigned char *ptr = kmap(page);
110
111 if (dir == TO_XFER_BUF)
112 memcpy(ptr + poff, buffer + cnt, plen);
113 else
114 memcpy(buffer + cnt, ptr + poff, plen);
115 kunmap(page);
116
117 /* Start at the beginning of the next page */
118 poff = 0;
119 ++page;
120 cnt += plen;
121 sglen -= plen;
122 }
123 }
124 }
125
126 /* Return the amount actually transferred */
127 return cnt;
128 }
129
130 /* Store the contents of buffer into srb's transfer buffer and set the
131 * SCSI residue. */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)132 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 unsigned int buflen, struct scsi_cmnd *srb)
134 {
135 unsigned int index = 0, offset = 0;
136
137 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 TO_XFER_BUF);
139 if (buflen < scsi_bufflen(srb))
140 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
141 }
142
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)143 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 unsigned int buflen, struct scsi_cmnd *srb)
145 {
146 unsigned int index = 0, offset = 0;
147
148 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 FROM_XFER_BUF);
150 if (buflen < scsi_bufflen(srb))
151 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
152 }
153
154
155 /***********************************************************************
156 * Transport routines
157 ***********************************************************************/
158
159 /* Invoke the transport and basic error-handling/recovery methods
160 *
161 * This is used to send the message to the device and receive the response.
162 */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)163 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
164 {
165 int result;
166
167 result = rtsx_scsi_handler(srb, chip);
168
169 /* if the command gets aborted by the higher layers, we need to
170 * short-circuit all other processing
171 */
172 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
173 RTSX_DEBUGP("-- command was aborted\n");
174 srb->result = DID_ABORT << 16;
175 goto Handle_Errors;
176 }
177
178 /* if there is a transport error, reset and don't auto-sense */
179 if (result == TRANSPORT_ERROR) {
180 RTSX_DEBUGP("-- transport indicates error, resetting\n");
181 srb->result = DID_ERROR << 16;
182 goto Handle_Errors;
183 }
184
185 srb->result = SAM_STAT_GOOD;
186
187 /*
188 * If we have a failure, we're going to do a REQUEST_SENSE
189 * automatically. Note that we differentiate between a command
190 * "failure" and an "error" in the transport mechanism.
191 */
192 if (result == TRANSPORT_FAILED) {
193 /* set the result so the higher layers expect this data */
194 srb->result = SAM_STAT_CHECK_CONDITION;
195 memcpy(srb->sense_buffer,
196 (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
197 sizeof(struct sense_data_t));
198 }
199
200 return;
201
202 /* Error and abort processing: try to resynchronize with the device
203 * by issuing a port reset. If that fails, try a class-specific
204 * device reset. */
205 Handle_Errors:
206 return;
207 }
208
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)209 void rtsx_add_cmd(struct rtsx_chip *chip,
210 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
211 {
212 u32 *cb = (u32 *)(chip->host_cmds_ptr);
213 u32 val = 0;
214
215 val |= (u32)(cmd_type & 0x03) << 30;
216 val |= (u32)(reg_addr & 0x3FFF) << 16;
217 val |= (u32)mask << 8;
218 val |= (u32)data;
219
220 spin_lock_irq(&chip->rtsx->reg_lock);
221 if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) {
222 cb[(chip->ci)++] = cpu_to_le32(val);
223 }
224 spin_unlock_irq(&chip->rtsx->reg_lock);
225 }
226
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)227 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
228 {
229 u32 val = 1 << 31;
230
231 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
232
233 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
234 /* Hardware Auto Response */
235 val |= 0x40000000;
236 rtsx_writel(chip, RTSX_HCBCTLR, val);
237 }
238
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)239 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
240 {
241 struct rtsx_dev *rtsx = chip->rtsx;
242 struct completion trans_done;
243 u32 val = 1 << 31;
244 long timeleft;
245 int err = 0;
246
247 if (card == SD_CARD) {
248 rtsx->check_card_cd = SD_EXIST;
249 } else if (card == MS_CARD) {
250 rtsx->check_card_cd = MS_EXIST;
251 } else if (card == XD_CARD) {
252 rtsx->check_card_cd = XD_EXIST;
253 } else {
254 rtsx->check_card_cd = 0;
255 }
256
257 spin_lock_irq(&rtsx->reg_lock);
258
259 /* set up data structures for the wakeup system */
260 rtsx->done = &trans_done;
261 rtsx->trans_result = TRANS_NOT_READY;
262 init_completion(&trans_done);
263 rtsx->trans_state = STATE_TRANS_CMD;
264
265 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
266
267 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
268 /* Hardware Auto Response */
269 val |= 0x40000000;
270 rtsx_writel(chip, RTSX_HCBCTLR, val);
271
272 spin_unlock_irq(&rtsx->reg_lock);
273
274 /* Wait for TRANS_OK_INT */
275 timeleft = wait_for_completion_interruptible_timeout(
276 &trans_done, timeout * HZ / 1000);
277 if (timeleft <= 0) {
278 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
279 err = -ETIMEDOUT;
280 TRACE_GOTO(chip, finish_send_cmd);
281 }
282
283 spin_lock_irq(&rtsx->reg_lock);
284 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
285 err = -EIO;
286 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
287 err = 0;
288 }
289 spin_unlock_irq(&rtsx->reg_lock);
290
291 finish_send_cmd:
292 rtsx->done = NULL;
293 rtsx->trans_state = STATE_TRANS_NONE;
294
295 if (err < 0)
296 rtsx_stop_cmd(chip, card);
297
298 return err;
299 }
300
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)301 static inline void rtsx_add_sg_tbl(
302 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
303 {
304 u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
305 u64 val = 0;
306 u32 temp_len = 0;
307 u8 temp_opt = 0;
308
309 do {
310 if (len > 0x80000) {
311 temp_len = 0x80000;
312 temp_opt = option & (~SG_END);
313 } else {
314 temp_len = len;
315 temp_opt = option;
316 }
317 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
318
319 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
320 sgb[(chip->sgi)++] = cpu_to_le64(val);
321
322 len -= temp_len;
323 addr += temp_len;
324 } while (len);
325 }
326
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)327 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
328 struct scatterlist *sg, int num_sg, unsigned int *index,
329 unsigned int *offset, int size,
330 enum dma_data_direction dma_dir, int timeout)
331 {
332 struct rtsx_dev *rtsx = chip->rtsx;
333 struct completion trans_done;
334 u8 dir;
335 int sg_cnt, i, resid;
336 int err = 0;
337 long timeleft;
338 u32 val = TRIG_DMA;
339
340 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
341 return -EIO;
342
343 if (dma_dir == DMA_TO_DEVICE) {
344 dir = HOST_TO_DEVICE;
345 } else if (dma_dir == DMA_FROM_DEVICE) {
346 dir = DEVICE_TO_HOST;
347 } else {
348 return -ENXIO;
349 }
350
351 if (card == SD_CARD) {
352 rtsx->check_card_cd = SD_EXIST;
353 } else if (card == MS_CARD) {
354 rtsx->check_card_cd = MS_EXIST;
355 } else if (card == XD_CARD) {
356 rtsx->check_card_cd = XD_EXIST;
357 } else {
358 rtsx->check_card_cd = 0;
359 }
360
361 spin_lock_irq(&rtsx->reg_lock);
362
363 /* set up data structures for the wakeup system */
364 rtsx->done = &trans_done;
365
366 rtsx->trans_state = STATE_TRANS_SG;
367 rtsx->trans_result = TRANS_NOT_READY;
368
369 spin_unlock_irq(&rtsx->reg_lock);
370
371 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
372
373 resid = size;
374
375 chip->sgi = 0;
376 /* Usually the next entry will be @sg@ + 1, but if this sg element
377 * is part of a chained scatterlist, it could jump to the start of
378 * a new scatterlist array. So here we use sg_next to move to
379 * the proper sg
380 */
381 for (i = 0; i < *index; i++)
382 sg = sg_next(sg);
383 for (i = *index; i < sg_cnt; i++) {
384 dma_addr_t addr;
385 unsigned int len;
386 u8 option;
387
388 addr = sg_dma_address(sg);
389 len = sg_dma_len(sg);
390
391 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
392 (unsigned int)addr, len);
393 RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
394
395 addr += *offset;
396
397 if ((len - *offset) > resid) {
398 *offset += resid;
399 len = resid;
400 resid = 0;
401 } else {
402 resid -= (len - *offset);
403 len -= *offset;
404 *offset = 0;
405 *index = *index + 1;
406 }
407 if ((i == (sg_cnt - 1)) || !resid) {
408 option = SG_VALID | SG_END | SG_TRANS_DATA;
409 } else {
410 option = SG_VALID | SG_TRANS_DATA;
411 }
412
413 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
414
415 if (!resid)
416 break;
417
418 sg = sg_next(sg);
419 }
420
421 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
422
423 val |= (u32)(dir & 0x01) << 29;
424 val |= ADMA_MODE;
425
426 spin_lock_irq(&rtsx->reg_lock);
427
428 init_completion(&trans_done);
429
430 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
431 rtsx_writel(chip, RTSX_HDBCTLR, val);
432
433 spin_unlock_irq(&rtsx->reg_lock);
434
435 timeleft = wait_for_completion_interruptible_timeout(
436 &trans_done, timeout * HZ / 1000);
437 if (timeleft <= 0) {
438 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
439 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
440 err = -ETIMEDOUT;
441 goto out;
442 }
443
444 spin_lock_irq(&rtsx->reg_lock);
445 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
446 err = -EIO;
447 spin_unlock_irq(&rtsx->reg_lock);
448 goto out;
449 }
450 spin_unlock_irq(&rtsx->reg_lock);
451
452 /* Wait for TRANS_OK_INT */
453 spin_lock_irq(&rtsx->reg_lock);
454 if (rtsx->trans_result == TRANS_NOT_READY) {
455 init_completion(&trans_done);
456 spin_unlock_irq(&rtsx->reg_lock);
457 timeleft = wait_for_completion_interruptible_timeout(
458 &trans_done, timeout * HZ / 1000);
459 if (timeleft <= 0) {
460 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
461 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
462 err = -ETIMEDOUT;
463 goto out;
464 }
465 } else {
466 spin_unlock_irq(&rtsx->reg_lock);
467 }
468
469 spin_lock_irq(&rtsx->reg_lock);
470 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
471 err = -EIO;
472 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
473 err = 0;
474 }
475 spin_unlock_irq(&rtsx->reg_lock);
476
477 out:
478 rtsx->done = NULL;
479 rtsx->trans_state = STATE_TRANS_NONE;
480 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
481
482 if (err < 0)
483 rtsx_stop_cmd(chip, card);
484
485 return err;
486 }
487
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)488 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
489 struct scatterlist *sg, int num_sg,
490 enum dma_data_direction dma_dir, int timeout)
491 {
492 struct rtsx_dev *rtsx = chip->rtsx;
493 struct completion trans_done;
494 u8 dir;
495 int buf_cnt, i;
496 int err = 0;
497 long timeleft;
498 struct scatterlist *sg_ptr;
499
500 if ((sg == NULL) || (num_sg <= 0))
501 return -EIO;
502
503 if (dma_dir == DMA_TO_DEVICE) {
504 dir = HOST_TO_DEVICE;
505 } else if (dma_dir == DMA_FROM_DEVICE) {
506 dir = DEVICE_TO_HOST;
507 } else {
508 return -ENXIO;
509 }
510
511 if (card == SD_CARD) {
512 rtsx->check_card_cd = SD_EXIST;
513 } else if (card == MS_CARD) {
514 rtsx->check_card_cd = MS_EXIST;
515 } else if (card == XD_CARD) {
516 rtsx->check_card_cd = XD_EXIST;
517 } else {
518 rtsx->check_card_cd = 0;
519 }
520
521 spin_lock_irq(&rtsx->reg_lock);
522
523 /* set up data structures for the wakeup system */
524 rtsx->done = &trans_done;
525
526 rtsx->trans_state = STATE_TRANS_SG;
527 rtsx->trans_result = TRANS_NOT_READY;
528
529 spin_unlock_irq(&rtsx->reg_lock);
530
531 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
532
533 sg_ptr = sg;
534
535 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
536 u32 val = TRIG_DMA;
537 int sg_cnt, j;
538
539 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) {
540 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
541 } else {
542 sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
543 }
544
545 chip->sgi = 0;
546 for (j = 0; j < sg_cnt; j++) {
547 dma_addr_t addr = sg_dma_address(sg_ptr);
548 unsigned int len = sg_dma_len(sg_ptr);
549 u8 option;
550
551 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
552 (unsigned int)addr, len);
553
554 if (j == (sg_cnt - 1)) {
555 option = SG_VALID | SG_END | SG_TRANS_DATA;
556 } else {
557 option = SG_VALID | SG_TRANS_DATA;
558 }
559
560 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
561
562 sg_ptr = sg_next(sg_ptr);
563 }
564
565 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
566
567 val |= (u32)(dir & 0x01) << 29;
568 val |= ADMA_MODE;
569
570 spin_lock_irq(&rtsx->reg_lock);
571
572 init_completion(&trans_done);
573
574 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
575 rtsx_writel(chip, RTSX_HDBCTLR, val);
576
577 spin_unlock_irq(&rtsx->reg_lock);
578
579 timeleft = wait_for_completion_interruptible_timeout(
580 &trans_done, timeout * HZ / 1000);
581 if (timeleft <= 0) {
582 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
583 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
584 err = -ETIMEDOUT;
585 goto out;
586 }
587
588 spin_lock_irq(&rtsx->reg_lock);
589 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
590 err = -EIO;
591 spin_unlock_irq(&rtsx->reg_lock);
592 goto out;
593 }
594 spin_unlock_irq(&rtsx->reg_lock);
595
596 sg_ptr += sg_cnt;
597 }
598
599 /* Wait for TRANS_OK_INT */
600 spin_lock_irq(&rtsx->reg_lock);
601 if (rtsx->trans_result == TRANS_NOT_READY) {
602 init_completion(&trans_done);
603 spin_unlock_irq(&rtsx->reg_lock);
604 timeleft = wait_for_completion_interruptible_timeout(
605 &trans_done, timeout * HZ / 1000);
606 if (timeleft <= 0) {
607 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
608 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
609 err = -ETIMEDOUT;
610 goto out;
611 }
612 } else {
613 spin_unlock_irq(&rtsx->reg_lock);
614 }
615
616 spin_lock_irq(&rtsx->reg_lock);
617 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
618 err = -EIO;
619 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
620 err = 0;
621 }
622 spin_unlock_irq(&rtsx->reg_lock);
623
624 out:
625 rtsx->done = NULL;
626 rtsx->trans_state = STATE_TRANS_NONE;
627 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
628
629 if (err < 0)
630 rtsx_stop_cmd(chip, card);
631
632 return err;
633 }
634
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)635 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
636 enum dma_data_direction dma_dir, int timeout)
637 {
638 struct rtsx_dev *rtsx = chip->rtsx;
639 struct completion trans_done;
640 dma_addr_t addr;
641 u8 dir;
642 int err = 0;
643 u32 val = (1 << 31);
644 long timeleft;
645
646 if ((buf == NULL) || (len <= 0))
647 return -EIO;
648
649 if (dma_dir == DMA_TO_DEVICE) {
650 dir = HOST_TO_DEVICE;
651 } else if (dma_dir == DMA_FROM_DEVICE) {
652 dir = DEVICE_TO_HOST;
653 } else {
654 return -ENXIO;
655 }
656
657 addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
658 if (!addr)
659 return -ENOMEM;
660
661 if (card == SD_CARD) {
662 rtsx->check_card_cd = SD_EXIST;
663 } else if (card == MS_CARD) {
664 rtsx->check_card_cd = MS_EXIST;
665 } else if (card == XD_CARD) {
666 rtsx->check_card_cd = XD_EXIST;
667 } else {
668 rtsx->check_card_cd = 0;
669 }
670
671 val |= (u32)(dir & 0x01) << 29;
672 val |= (u32)(len & 0x00FFFFFF);
673
674 spin_lock_irq(&rtsx->reg_lock);
675
676 /* set up data structures for the wakeup system */
677 rtsx->done = &trans_done;
678
679 init_completion(&trans_done);
680
681 rtsx->trans_state = STATE_TRANS_BUF;
682 rtsx->trans_result = TRANS_NOT_READY;
683
684 rtsx_writel(chip, RTSX_HDBAR, addr);
685 rtsx_writel(chip, RTSX_HDBCTLR, val);
686
687 spin_unlock_irq(&rtsx->reg_lock);
688
689 /* Wait for TRANS_OK_INT */
690 timeleft = wait_for_completion_interruptible_timeout(
691 &trans_done, timeout * HZ / 1000);
692 if (timeleft <= 0) {
693 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
694 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
695 err = -ETIMEDOUT;
696 goto out;
697 }
698
699 spin_lock_irq(&rtsx->reg_lock);
700 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
701 err = -EIO;
702 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
703 err = 0;
704 }
705 spin_unlock_irq(&rtsx->reg_lock);
706
707 out:
708 rtsx->done = NULL;
709 rtsx->trans_state = STATE_TRANS_NONE;
710 dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
711
712 if (err < 0)
713 rtsx_stop_cmd(chip, card);
714
715 return err;
716 }
717
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)718 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
719 void *buf, size_t len, int use_sg, unsigned int *index,
720 unsigned int *offset, enum dma_data_direction dma_dir,
721 int timeout)
722 {
723 int err = 0;
724
725 /* don't transfer data during abort processing */
726 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
727 return -EIO;
728
729 if (use_sg) {
730 err = rtsx_transfer_sglist_adma_partial(chip, card,
731 (struct scatterlist *)buf, use_sg,
732 index, offset, (int)len, dma_dir, timeout);
733 } else {
734 err = rtsx_transfer_buf(chip, card,
735 buf, len, dma_dir, timeout);
736 }
737
738 if (err < 0) {
739 if (RTSX_TST_DELINK(chip)) {
740 RTSX_CLR_DELINK(chip);
741 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
742 rtsx_reinit_cards(chip, 1);
743 }
744 }
745
746 return err;
747 }
748
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)749 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
750 int use_sg, enum dma_data_direction dma_dir, int timeout)
751 {
752 int err = 0;
753
754 RTSX_DEBUGP("use_sg = %d\n", use_sg);
755
756 /* don't transfer data during abort processing */
757 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
758 return -EIO;
759
760 if (use_sg) {
761 err = rtsx_transfer_sglist_adma(chip, card,
762 (struct scatterlist *)buf,
763 use_sg, dma_dir, timeout);
764 } else {
765 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
766 }
767
768 if (err < 0) {
769 if (RTSX_TST_DELINK(chip)) {
770 RTSX_CLR_DELINK(chip);
771 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
772 rtsx_reinit_cards(chip, 1);
773 }
774 }
775
776 return err;
777 }
778
779