1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for Realtek PCI-Express card reader
4 *
5 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
6 *
7 * Author:
8 * Wei WANG (wei_wang@realsil.com.cn)
9 * Micky Ching (micky_ching@realsil.com.cn)
10 */
11
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
15
16 #include "rtsx.h"
17
18 /***********************************************************************
19 * Scatter-gather transfer buffer access routines
20 ***********************************************************************/
21
22 /*
23 * Copy a buffer of length buflen to/from the srb's transfer buffer.
24 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
25 * points to a list of s-g entries and we ignore srb->request_bufflen.
26 * For non-scatter-gather transfers, srb->request_buffer points to the
27 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
28 * Update the *index and *offset variables so that the next copy will
29 * pick up from where this one left off.
30 */
31
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)32 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
33 unsigned int buflen,
34 struct scsi_cmnd *srb,
35 unsigned int *index,
36 unsigned int *offset,
37 enum xfer_buf_dir dir)
38 {
39 unsigned int cnt;
40
41 /* If not using scatter-gather, just transfer the data directly. */
42 if (scsi_sg_count(srb) == 0) {
43 unsigned char *sgbuffer;
44
45 if (*offset >= scsi_bufflen(srb))
46 return 0;
47 cnt = min(buflen, scsi_bufflen(srb) - *offset);
48
49 sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
50
51 if (dir == TO_XFER_BUF)
52 memcpy(sgbuffer, buffer, cnt);
53 else
54 memcpy(buffer, sgbuffer, cnt);
55 *offset += cnt;
56
57 /*
58 * Using scatter-gather. We have to go through the list one entry
59 * at a time. Each s-g entry contains some number of pages which
60 * have to be copied one at a time.
61 */
62 } else {
63 struct scatterlist *sg =
64 (struct scatterlist *)scsi_sglist(srb)
65 + *index;
66
67 /*
68 * This loop handles a single s-g list entry, which may
69 * include multiple pages. Find the initial page structure
70 * and the starting offset within the page, and update
71 * the *offset and *index values for the next loop.
72 */
73 cnt = 0;
74 while (cnt < buflen && *index < scsi_sg_count(srb)) {
75 struct page *page = sg_page(sg) +
76 ((sg->offset + *offset) >> PAGE_SHIFT);
77 unsigned int poff = (sg->offset + *offset) &
78 (PAGE_SIZE - 1);
79 unsigned int sglen = sg->length - *offset;
80
81 if (sglen > buflen - cnt) {
82 /* Transfer ends within this s-g entry */
83 sglen = buflen - cnt;
84 *offset += sglen;
85 } else {
86 /* Transfer continues to next s-g entry */
87 *offset = 0;
88 ++*index;
89 ++sg;
90 }
91
92 while (sglen > 0) {
93 unsigned int plen = min(sglen, (unsigned int)
94 PAGE_SIZE - poff);
95
96 if (dir == TO_XFER_BUF)
97 memcpy_to_page(page, poff, buffer + cnt, plen);
98 else
99 memcpy_from_page(buffer + cnt, page, poff, plen);
100
101 /* Start at the beginning of the next page */
102 poff = 0;
103 ++page;
104 cnt += plen;
105 sglen -= plen;
106 }
107 }
108 }
109
110 /* Return the amount actually transferred */
111 return cnt;
112 }
113
114 /*
115 * Store the contents of buffer into srb's transfer buffer and set the
116 * SCSI residue.
117 */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)118 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
119 unsigned int buflen, struct scsi_cmnd *srb)
120 {
121 unsigned int index = 0, offset = 0;
122
123 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
124 TO_XFER_BUF);
125 if (buflen < scsi_bufflen(srb))
126 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
127 }
128
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)129 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
130 unsigned int buflen, struct scsi_cmnd *srb)
131 {
132 unsigned int index = 0, offset = 0;
133
134 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
135 FROM_XFER_BUF);
136 if (buflen < scsi_bufflen(srb))
137 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
138 }
139
140 /***********************************************************************
141 * Transport routines
142 ***********************************************************************/
143
144 /*
145 * Invoke the transport and basic error-handling/recovery methods
146 *
147 * This is used to send the message to the device and receive the response.
148 */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)149 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
150 {
151 int result;
152
153 result = rtsx_scsi_handler(srb, chip);
154
155 /*
156 * if the command gets aborted by the higher layers, we need to
157 * short-circuit all other processing.
158 */
159 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
160 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
161 srb->result = DID_ABORT << 16;
162 goto handle_errors;
163 }
164
165 /* if there is a transport error, reset and don't auto-sense */
166 if (result == TRANSPORT_ERROR) {
167 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
168 srb->result = DID_ERROR << 16;
169 goto handle_errors;
170 }
171
172 srb->result = SAM_STAT_GOOD;
173
174 /*
175 * If we have a failure, we're going to do a REQUEST_SENSE
176 * automatically. Note that we differentiate between a command
177 * "failure" and an "error" in the transport mechanism.
178 */
179 if (result == TRANSPORT_FAILED) {
180 /* set the result so the higher layers expect this data */
181 srb->result = SAM_STAT_CHECK_CONDITION;
182 memcpy(srb->sense_buffer,
183 (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
184 sizeof(struct sense_data_t));
185 }
186
187 return;
188
189 handle_errors:
190 return;
191 }
192
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)193 void rtsx_add_cmd(struct rtsx_chip *chip,
194 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
195 {
196 __le32 *cb = (__le32 *)(chip->host_cmds_ptr);
197 u32 val = 0;
198
199 val |= (u32)(cmd_type & 0x03) << 30;
200 val |= (u32)(reg_addr & 0x3FFF) << 16;
201 val |= (u32)mask << 8;
202 val |= (u32)data;
203
204 spin_lock_irq(&chip->rtsx->reg_lock);
205 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
206 cb[(chip->ci)++] = cpu_to_le32(val);
207
208 spin_unlock_irq(&chip->rtsx->reg_lock);
209 }
210
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)211 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
212 {
213 u32 val = BIT(31);
214
215 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
216
217 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
218 /* Hardware Auto Response */
219 val |= 0x40000000;
220 rtsx_writel(chip, RTSX_HCBCTLR, val);
221 }
222
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)223 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
224 {
225 struct rtsx_dev *rtsx = chip->rtsx;
226 struct completion trans_done;
227 u32 val = BIT(31);
228 long timeleft;
229 int err = 0;
230
231 if (card == SD_CARD)
232 rtsx->check_card_cd = SD_EXIST;
233 else if (card == MS_CARD)
234 rtsx->check_card_cd = MS_EXIST;
235 else if (card == XD_CARD)
236 rtsx->check_card_cd = XD_EXIST;
237 else
238 rtsx->check_card_cd = 0;
239
240 spin_lock_irq(&rtsx->reg_lock);
241
242 /* set up data structures for the wakeup system */
243 rtsx->done = &trans_done;
244 rtsx->trans_result = TRANS_NOT_READY;
245 init_completion(&trans_done);
246 rtsx->trans_state = STATE_TRANS_CMD;
247
248 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
249
250 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
251 /* Hardware Auto Response */
252 val |= 0x40000000;
253 rtsx_writel(chip, RTSX_HCBCTLR, val);
254
255 spin_unlock_irq(&rtsx->reg_lock);
256
257 /* Wait for TRANS_OK_INT */
258 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
259 msecs_to_jiffies(timeout));
260 if (timeleft <= 0) {
261 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
262 chip->int_reg);
263 err = -ETIMEDOUT;
264 goto finish_send_cmd;
265 }
266
267 spin_lock_irq(&rtsx->reg_lock);
268 if (rtsx->trans_result == TRANS_RESULT_FAIL)
269 err = -EIO;
270 else if (rtsx->trans_result == TRANS_RESULT_OK)
271 err = 0;
272
273 spin_unlock_irq(&rtsx->reg_lock);
274
275 finish_send_cmd:
276 rtsx->done = NULL;
277 rtsx->trans_state = STATE_TRANS_NONE;
278
279 if (err < 0)
280 rtsx_stop_cmd(chip, card);
281
282 return err;
283 }
284
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)285 static inline void rtsx_add_sg_tbl(struct rtsx_chip *chip,
286 u32 addr, u32 len, u8 option)
287 {
288 __le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
289 u64 val = 0;
290 u32 temp_len = 0;
291 u8 temp_opt = 0;
292
293 do {
294 if (len > 0x80000) {
295 temp_len = 0x80000;
296 temp_opt = option & (~RTSX_SG_END);
297 } else {
298 temp_len = len;
299 temp_opt = option;
300 }
301 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
302
303 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
304 sgb[(chip->sgi)++] = cpu_to_le64(val);
305
306 len -= temp_len;
307 addr += temp_len;
308 } while (len);
309 }
310
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)311 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
312 struct scatterlist *sg, int num_sg,
313 unsigned int *index,
314 unsigned int *offset, int size,
315 enum dma_data_direction dma_dir,
316 int timeout)
317 {
318 struct rtsx_dev *rtsx = chip->rtsx;
319 struct completion trans_done;
320 u8 dir;
321 int sg_cnt, i, resid;
322 int err = 0;
323 long timeleft;
324 struct scatterlist *sg_ptr;
325 u32 val = TRIG_DMA;
326
327 if (!sg || num_sg <= 0 || !offset || !index)
328 return -EIO;
329
330 if (dma_dir == DMA_TO_DEVICE)
331 dir = HOST_TO_DEVICE;
332 else if (dma_dir == DMA_FROM_DEVICE)
333 dir = DEVICE_TO_HOST;
334 else
335 return -ENXIO;
336
337 if (card == SD_CARD)
338 rtsx->check_card_cd = SD_EXIST;
339 else if (card == MS_CARD)
340 rtsx->check_card_cd = MS_EXIST;
341 else if (card == XD_CARD)
342 rtsx->check_card_cd = XD_EXIST;
343 else
344 rtsx->check_card_cd = 0;
345
346 spin_lock_irq(&rtsx->reg_lock);
347
348 /* set up data structures for the wakeup system */
349 rtsx->done = &trans_done;
350
351 rtsx->trans_state = STATE_TRANS_SG;
352 rtsx->trans_result = TRANS_NOT_READY;
353
354 spin_unlock_irq(&rtsx->reg_lock);
355
356 sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
357
358 resid = size;
359 sg_ptr = sg;
360 chip->sgi = 0;
361 /*
362 * Usually the next entry will be @sg@ + 1, but if this sg element
363 * is part of a chained scatterlist, it could jump to the start of
364 * a new scatterlist array. So here we use sg_next to move to
365 * the proper sg.
366 */
367 for (i = 0; i < *index; i++)
368 sg_ptr = sg_next(sg_ptr);
369 for (i = *index; i < sg_cnt; i++) {
370 dma_addr_t addr;
371 unsigned int len;
372 u8 option;
373
374 addr = sg_dma_address(sg_ptr);
375 len = sg_dma_len(sg_ptr);
376
377 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
378 (unsigned int)addr, len);
379 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
380 *index, *offset);
381
382 addr += *offset;
383
384 if ((len - *offset) > resid) {
385 *offset += resid;
386 len = resid;
387 resid = 0;
388 } else {
389 resid -= (len - *offset);
390 len -= *offset;
391 *offset = 0;
392 *index = *index + 1;
393 }
394 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
395 if ((i == sg_cnt - 1) || !resid)
396 option |= RTSX_SG_END;
397
398 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
399
400 if (!resid)
401 break;
402
403 sg_ptr = sg_next(sg_ptr);
404 }
405
406 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
407
408 val |= (u32)(dir & 0x01) << 29;
409 val |= ADMA_MODE;
410
411 spin_lock_irq(&rtsx->reg_lock);
412
413 init_completion(&trans_done);
414
415 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
416 rtsx_writel(chip, RTSX_HDBCTLR, val);
417
418 spin_unlock_irq(&rtsx->reg_lock);
419
420 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
421 msecs_to_jiffies(timeout));
422 if (timeleft <= 0) {
423 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
424 __func__, __LINE__);
425 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
426 chip->int_reg);
427 err = -ETIMEDOUT;
428 goto out;
429 }
430
431 spin_lock_irq(&rtsx->reg_lock);
432 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
433 err = -EIO;
434 spin_unlock_irq(&rtsx->reg_lock);
435 goto out;
436 }
437 spin_unlock_irq(&rtsx->reg_lock);
438
439 /* Wait for TRANS_OK_INT */
440 spin_lock_irq(&rtsx->reg_lock);
441 if (rtsx->trans_result == TRANS_NOT_READY) {
442 init_completion(&trans_done);
443 spin_unlock_irq(&rtsx->reg_lock);
444 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
445 msecs_to_jiffies(timeout));
446 if (timeleft <= 0) {
447 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
448 __func__, __LINE__);
449 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
450 chip->int_reg);
451 err = -ETIMEDOUT;
452 goto out;
453 }
454 } else {
455 spin_unlock_irq(&rtsx->reg_lock);
456 }
457
458 spin_lock_irq(&rtsx->reg_lock);
459 if (rtsx->trans_result == TRANS_RESULT_FAIL)
460 err = -EIO;
461 else if (rtsx->trans_result == TRANS_RESULT_OK)
462 err = 0;
463
464 spin_unlock_irq(&rtsx->reg_lock);
465
466 out:
467 rtsx->done = NULL;
468 rtsx->trans_state = STATE_TRANS_NONE;
469 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
470
471 if (err < 0)
472 rtsx_stop_cmd(chip, card);
473
474 return err;
475 }
476
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)477 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
478 struct scatterlist *sg, int num_sg,
479 enum dma_data_direction dma_dir,
480 int timeout)
481 {
482 struct rtsx_dev *rtsx = chip->rtsx;
483 struct completion trans_done;
484 u8 dir;
485 int buf_cnt, i;
486 int err = 0;
487 long timeleft;
488 struct scatterlist *sg_ptr;
489
490 if (!sg || num_sg <= 0)
491 return -EIO;
492
493 if (dma_dir == DMA_TO_DEVICE)
494 dir = HOST_TO_DEVICE;
495 else if (dma_dir == DMA_FROM_DEVICE)
496 dir = DEVICE_TO_HOST;
497 else
498 return -ENXIO;
499
500 if (card == SD_CARD)
501 rtsx->check_card_cd = SD_EXIST;
502 else if (card == MS_CARD)
503 rtsx->check_card_cd = MS_EXIST;
504 else if (card == XD_CARD)
505 rtsx->check_card_cd = XD_EXIST;
506 else
507 rtsx->check_card_cd = 0;
508
509 spin_lock_irq(&rtsx->reg_lock);
510
511 /* set up data structures for the wakeup system */
512 rtsx->done = &trans_done;
513
514 rtsx->trans_state = STATE_TRANS_SG;
515 rtsx->trans_result = TRANS_NOT_READY;
516
517 spin_unlock_irq(&rtsx->reg_lock);
518
519 buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
520
521 sg_ptr = sg;
522
523 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
524 u32 val = TRIG_DMA;
525 int sg_cnt, j;
526
527 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
528 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
529 else
530 sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
531
532 chip->sgi = 0;
533 for (j = 0; j < sg_cnt; j++) {
534 dma_addr_t addr = sg_dma_address(sg_ptr);
535 unsigned int len = sg_dma_len(sg_ptr);
536 u8 option;
537
538 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
539 (unsigned int)addr, len);
540
541 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
542 if (j == (sg_cnt - 1))
543 option |= RTSX_SG_END;
544
545 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
546
547 sg_ptr = sg_next(sg_ptr);
548 }
549
550 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
551
552 val |= (u32)(dir & 0x01) << 29;
553 val |= ADMA_MODE;
554
555 spin_lock_irq(&rtsx->reg_lock);
556
557 init_completion(&trans_done);
558
559 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
560 rtsx_writel(chip, RTSX_HDBCTLR, val);
561
562 spin_unlock_irq(&rtsx->reg_lock);
563
564 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
565 msecs_to_jiffies(timeout));
566 if (timeleft <= 0) {
567 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
568 __func__, __LINE__);
569 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
570 chip->int_reg);
571 err = -ETIMEDOUT;
572 goto out;
573 }
574
575 spin_lock_irq(&rtsx->reg_lock);
576 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
577 err = -EIO;
578 spin_unlock_irq(&rtsx->reg_lock);
579 goto out;
580 }
581 spin_unlock_irq(&rtsx->reg_lock);
582
583 sg_ptr += sg_cnt;
584 }
585
586 /* Wait for TRANS_OK_INT */
587 spin_lock_irq(&rtsx->reg_lock);
588 if (rtsx->trans_result == TRANS_NOT_READY) {
589 init_completion(&trans_done);
590 spin_unlock_irq(&rtsx->reg_lock);
591 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
592 msecs_to_jiffies(timeout));
593 if (timeleft <= 0) {
594 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
595 __func__, __LINE__);
596 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
597 chip->int_reg);
598 err = -ETIMEDOUT;
599 goto out;
600 }
601 } else {
602 spin_unlock_irq(&rtsx->reg_lock);
603 }
604
605 spin_lock_irq(&rtsx->reg_lock);
606 if (rtsx->trans_result == TRANS_RESULT_FAIL)
607 err = -EIO;
608 else if (rtsx->trans_result == TRANS_RESULT_OK)
609 err = 0;
610
611 spin_unlock_irq(&rtsx->reg_lock);
612
613 out:
614 rtsx->done = NULL;
615 rtsx->trans_state = STATE_TRANS_NONE;
616 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
617
618 if (err < 0)
619 rtsx_stop_cmd(chip, card);
620
621 return err;
622 }
623
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)624 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
625 size_t len, enum dma_data_direction dma_dir,
626 int timeout)
627 {
628 struct rtsx_dev *rtsx = chip->rtsx;
629 struct completion trans_done;
630 dma_addr_t addr;
631 u8 dir;
632 int err = 0;
633 u32 val = BIT(31);
634 long timeleft;
635
636 if (!buf || len <= 0)
637 return -EIO;
638
639 if (dma_dir == DMA_TO_DEVICE)
640 dir = HOST_TO_DEVICE;
641 else if (dma_dir == DMA_FROM_DEVICE)
642 dir = DEVICE_TO_HOST;
643 else
644 return -ENXIO;
645
646 addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
647 if (dma_mapping_error(&rtsx->pci->dev, addr))
648 return -ENOMEM;
649
650 if (card == SD_CARD)
651 rtsx->check_card_cd = SD_EXIST;
652 else if (card == MS_CARD)
653 rtsx->check_card_cd = MS_EXIST;
654 else if (card == XD_CARD)
655 rtsx->check_card_cd = XD_EXIST;
656 else
657 rtsx->check_card_cd = 0;
658
659 val |= (u32)(dir & 0x01) << 29;
660 val |= (u32)(len & 0x00FFFFFF);
661
662 spin_lock_irq(&rtsx->reg_lock);
663
664 /* set up data structures for the wakeup system */
665 rtsx->done = &trans_done;
666
667 init_completion(&trans_done);
668
669 rtsx->trans_state = STATE_TRANS_BUF;
670 rtsx->trans_result = TRANS_NOT_READY;
671
672 rtsx_writel(chip, RTSX_HDBAR, addr);
673 rtsx_writel(chip, RTSX_HDBCTLR, val);
674
675 spin_unlock_irq(&rtsx->reg_lock);
676
677 /* Wait for TRANS_OK_INT */
678 timeleft = wait_for_completion_interruptible_timeout(&trans_done,
679 msecs_to_jiffies(timeout));
680 if (timeleft <= 0) {
681 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
682 __func__, __LINE__);
683 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
684 chip->int_reg);
685 err = -ETIMEDOUT;
686 goto out;
687 }
688
689 spin_lock_irq(&rtsx->reg_lock);
690 if (rtsx->trans_result == TRANS_RESULT_FAIL)
691 err = -EIO;
692 else if (rtsx->trans_result == TRANS_RESULT_OK)
693 err = 0;
694
695 spin_unlock_irq(&rtsx->reg_lock);
696
697 out:
698 rtsx->done = NULL;
699 rtsx->trans_state = STATE_TRANS_NONE;
700 dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
701
702 if (err < 0)
703 rtsx_stop_cmd(chip, card);
704
705 return err;
706 }
707
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)708 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
709 void *buf, size_t len, int use_sg,
710 unsigned int *index, unsigned int *offset,
711 enum dma_data_direction dma_dir, int timeout)
712 {
713 int err = 0;
714
715 /* don't transfer data during abort processing */
716 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
717 return -EIO;
718
719 if (use_sg) {
720 struct scatterlist *sg = buf;
721
722 err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
723 index, offset, (int)len,
724 dma_dir, timeout);
725 } else {
726 err = rtsx_transfer_buf(chip, card,
727 buf, len, dma_dir, timeout);
728 }
729 if (err < 0) {
730 if (RTSX_TST_DELINK(chip)) {
731 RTSX_CLR_DELINK(chip);
732 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
733 rtsx_reinit_cards(chip, 1);
734 }
735 }
736
737 return err;
738 }
739
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)740 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
741 int use_sg, enum dma_data_direction dma_dir, int timeout)
742 {
743 int err = 0;
744
745 dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
746
747 /* don't transfer data during abort processing */
748 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
749 return -EIO;
750
751 if (use_sg) {
752 err = rtsx_transfer_sglist_adma(chip, card, buf,
753 use_sg, dma_dir, timeout);
754 } else {
755 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
756 }
757
758 if (err < 0) {
759 if (RTSX_TST_DELINK(chip)) {
760 RTSX_CLR_DELINK(chip);
761 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
762 rtsx_reinit_cards(chip, 1);
763 }
764 }
765
766 return err;
767 }
768
769