1 /*
2 * Xilinx SystemACE device driver
3 *
4 * Copyright 2007 Secret Lab Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11 /*
12 * The SystemACE chip is designed to configure FPGAs by loading an FPGA
13 * bitstream from a file on a CF card and squirting it into FPGAs connected
14 * to the SystemACE JTAG chain. It also has the advantage of providing an
15 * MPU interface which can be used to control the FPGA configuration process
16 * and to use the attached CF card for general purpose storage.
17 *
18 * This driver is a block device driver for the SystemACE.
19 *
20 * Initialization:
21 * The driver registers itself as a platform_device driver at module
22 * load time. The platform bus will take care of calling the
23 * ace_probe() method for all SystemACE instances in the system. Any
24 * number of SystemACE instances are supported. ace_probe() calls
25 * ace_setup() which initialized all data structures, reads the CF
26 * id structure and registers the device.
27 *
28 * Processing:
29 * Just about all of the heavy lifting in this driver is performed by
30 * a Finite State Machine (FSM). The driver needs to wait on a number
31 * of events; some raised by interrupts, some which need to be polled
32 * for. Describing all of the behaviour in a FSM seems to be the
33 * easiest way to keep the complexity low and make it easy to
34 * understand what the driver is doing. If the block ops or the
35 * request function need to interact with the hardware, then they
36 * simply need to flag the request and kick of FSM processing.
37 *
38 * The FSM itself is atomic-safe code which can be run from any
39 * context. The general process flow is:
40 * 1. obtain the ace->lock spinlock.
41 * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
42 * cleared.
43 * 3. release the lock.
44 *
45 * Individual states do not sleep in any way. If a condition needs to
46 * be waited for then the state much clear the fsm_continue flag and
47 * either schedule the FSM to be run again at a later time, or expect
48 * an interrupt to call the FSM when the desired condition is met.
49 *
50 * In normal operation, the FSM is processed at interrupt context
51 * either when the driver's tasklet is scheduled, or when an irq is
52 * raised by the hardware. The tasklet can be scheduled at any time.
53 * The request method in particular schedules the tasklet when a new
54 * request has been indicated by the block layer. Once started, the
55 * FSM proceeds as far as it can processing the request until it
56 * needs on a hardware event. At this point, it must yield execution.
57 *
58 * A state has two options when yielding execution:
59 * 1. ace_fsm_yield()
60 * - Call if need to poll for event.
61 * - clears the fsm_continue flag to exit the processing loop
62 * - reschedules the tasklet to run again as soon as possible
63 * 2. ace_fsm_yieldirq()
64 * - Call if an irq is expected from the HW
65 * - clears the fsm_continue flag to exit the processing loop
66 * - does not reschedule the tasklet so the FSM will not be processed
67 * again until an irq is received.
68 * After calling a yield function, the state must return control back
69 * to the FSM main loop.
70 *
71 * Additionally, the driver maintains a kernel timer which can process
72 * the FSM. If the FSM gets stalled, typically due to a missed
73 * interrupt, then the kernel timer will expire and the driver can
74 * continue where it left off.
75 *
76 * To Do:
77 * - Add FPGA configuration control interface.
78 * - Request major number from lanana
79 */
80
81 #undef DEBUG
82
83 #include <linux/module.h>
84 #include <linux/ctype.h>
85 #include <linux/init.h>
86 #include <linux/interrupt.h>
87 #include <linux/errno.h>
88 #include <linux/kernel.h>
89 #include <linux/delay.h>
90 #include <linux/slab.h>
91 #include <linux/blkdev.h>
92 #include <linux/mutex.h>
93 #include <linux/ata.h>
94 #include <linux/hdreg.h>
95 #include <linux/platform_device.h>
96 #if defined(CONFIG_OF)
97 #include <linux/of_address.h>
98 #include <linux/of_device.h>
99 #include <linux/of_platform.h>
100 #endif
101
102 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
103 MODULE_DESCRIPTION("Xilinx SystemACE device driver");
104 MODULE_LICENSE("GPL");
105
106 /* SystemACE register definitions */
107 #define ACE_BUSMODE (0x00)
108
109 #define ACE_STATUS (0x04)
110 #define ACE_STATUS_CFGLOCK (0x00000001)
111 #define ACE_STATUS_MPULOCK (0x00000002)
112 #define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
113 #define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
114 #define ACE_STATUS_CFDETECT (0x00000010)
115 #define ACE_STATUS_DATABUFRDY (0x00000020)
116 #define ACE_STATUS_DATABUFMODE (0x00000040)
117 #define ACE_STATUS_CFGDONE (0x00000080)
118 #define ACE_STATUS_RDYFORCFCMD (0x00000100)
119 #define ACE_STATUS_CFGMODEPIN (0x00000200)
120 #define ACE_STATUS_CFGADDR_MASK (0x0000e000)
121 #define ACE_STATUS_CFBSY (0x00020000)
122 #define ACE_STATUS_CFRDY (0x00040000)
123 #define ACE_STATUS_CFDWF (0x00080000)
124 #define ACE_STATUS_CFDSC (0x00100000)
125 #define ACE_STATUS_CFDRQ (0x00200000)
126 #define ACE_STATUS_CFCORR (0x00400000)
127 #define ACE_STATUS_CFERR (0x00800000)
128
129 #define ACE_ERROR (0x08)
130 #define ACE_CFGLBA (0x0c)
131 #define ACE_MPULBA (0x10)
132
133 #define ACE_SECCNTCMD (0x14)
134 #define ACE_SECCNTCMD_RESET (0x0100)
135 #define ACE_SECCNTCMD_IDENTIFY (0x0200)
136 #define ACE_SECCNTCMD_READ_DATA (0x0300)
137 #define ACE_SECCNTCMD_WRITE_DATA (0x0400)
138 #define ACE_SECCNTCMD_ABORT (0x0600)
139
140 #define ACE_VERSION (0x16)
141 #define ACE_VERSION_REVISION_MASK (0x00FF)
142 #define ACE_VERSION_MINOR_MASK (0x0F00)
143 #define ACE_VERSION_MAJOR_MASK (0xF000)
144
145 #define ACE_CTRL (0x18)
146 #define ACE_CTRL_FORCELOCKREQ (0x0001)
147 #define ACE_CTRL_LOCKREQ (0x0002)
148 #define ACE_CTRL_FORCECFGADDR (0x0004)
149 #define ACE_CTRL_FORCECFGMODE (0x0008)
150 #define ACE_CTRL_CFGMODE (0x0010)
151 #define ACE_CTRL_CFGSTART (0x0020)
152 #define ACE_CTRL_CFGSEL (0x0040)
153 #define ACE_CTRL_CFGRESET (0x0080)
154 #define ACE_CTRL_DATABUFRDYIRQ (0x0100)
155 #define ACE_CTRL_ERRORIRQ (0x0200)
156 #define ACE_CTRL_CFGDONEIRQ (0x0400)
157 #define ACE_CTRL_RESETIRQ (0x0800)
158 #define ACE_CTRL_CFGPROG (0x1000)
159 #define ACE_CTRL_CFGADDR_MASK (0xe000)
160
161 #define ACE_FATSTAT (0x1c)
162
163 #define ACE_NUM_MINORS 16
164 #define ACE_SECTOR_SIZE (512)
165 #define ACE_FIFO_SIZE (32)
166 #define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
167
168 #define ACE_BUS_WIDTH_8 0
169 #define ACE_BUS_WIDTH_16 1
170
171 struct ace_reg_ops;
172
173 struct ace_device {
174 /* driver state data */
175 int id;
176 int media_change;
177 int users;
178 struct list_head list;
179
180 /* finite state machine data */
181 struct tasklet_struct fsm_tasklet;
182 uint fsm_task; /* Current activity (ACE_TASK_*) */
183 uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
184 uint fsm_continue_flag; /* cleared to exit FSM mainloop */
185 uint fsm_iter_num;
186 struct timer_list stall_timer;
187
188 /* Transfer state/result, use for both id and block request */
189 struct request *req; /* request being processed */
190 void *data_ptr; /* pointer to I/O buffer */
191 int data_count; /* number of buffers remaining */
192 int data_result; /* Result of transfer; 0 := success */
193
194 int id_req_count; /* count of id requests */
195 int id_result;
196 struct completion id_completion; /* used when id req finishes */
197 int in_irq;
198
199 /* Details of hardware device */
200 resource_size_t physaddr;
201 void __iomem *baseaddr;
202 int irq;
203 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
204 struct ace_reg_ops *reg_ops;
205 int lock_count;
206
207 /* Block device data structures */
208 spinlock_t lock;
209 struct device *dev;
210 struct request_queue *queue;
211 struct gendisk *gd;
212
213 /* Inserted CF card parameters */
214 u16 cf_id[ATA_ID_WORDS];
215 };
216
217 static DEFINE_MUTEX(xsysace_mutex);
218 static int ace_major;
219
220 /* ---------------------------------------------------------------------
221 * Low level register access
222 */
223
224 struct ace_reg_ops {
225 u16(*in) (struct ace_device * ace, int reg);
226 void (*out) (struct ace_device * ace, int reg, u16 val);
227 void (*datain) (struct ace_device * ace);
228 void (*dataout) (struct ace_device * ace);
229 };
230
231 /* 8 Bit bus width */
ace_in_8(struct ace_device * ace,int reg)232 static u16 ace_in_8(struct ace_device *ace, int reg)
233 {
234 void __iomem *r = ace->baseaddr + reg;
235 return in_8(r) | (in_8(r + 1) << 8);
236 }
237
ace_out_8(struct ace_device * ace,int reg,u16 val)238 static void ace_out_8(struct ace_device *ace, int reg, u16 val)
239 {
240 void __iomem *r = ace->baseaddr + reg;
241 out_8(r, val);
242 out_8(r + 1, val >> 8);
243 }
244
ace_datain_8(struct ace_device * ace)245 static void ace_datain_8(struct ace_device *ace)
246 {
247 void __iomem *r = ace->baseaddr + 0x40;
248 u8 *dst = ace->data_ptr;
249 int i = ACE_FIFO_SIZE;
250 while (i--)
251 *dst++ = in_8(r++);
252 ace->data_ptr = dst;
253 }
254
ace_dataout_8(struct ace_device * ace)255 static void ace_dataout_8(struct ace_device *ace)
256 {
257 void __iomem *r = ace->baseaddr + 0x40;
258 u8 *src = ace->data_ptr;
259 int i = ACE_FIFO_SIZE;
260 while (i--)
261 out_8(r++, *src++);
262 ace->data_ptr = src;
263 }
264
265 static struct ace_reg_ops ace_reg_8_ops = {
266 .in = ace_in_8,
267 .out = ace_out_8,
268 .datain = ace_datain_8,
269 .dataout = ace_dataout_8,
270 };
271
272 /* 16 bit big endian bus attachment */
ace_in_be16(struct ace_device * ace,int reg)273 static u16 ace_in_be16(struct ace_device *ace, int reg)
274 {
275 return in_be16(ace->baseaddr + reg);
276 }
277
ace_out_be16(struct ace_device * ace,int reg,u16 val)278 static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
279 {
280 out_be16(ace->baseaddr + reg, val);
281 }
282
ace_datain_be16(struct ace_device * ace)283 static void ace_datain_be16(struct ace_device *ace)
284 {
285 int i = ACE_FIFO_SIZE / 2;
286 u16 *dst = ace->data_ptr;
287 while (i--)
288 *dst++ = in_le16(ace->baseaddr + 0x40);
289 ace->data_ptr = dst;
290 }
291
ace_dataout_be16(struct ace_device * ace)292 static void ace_dataout_be16(struct ace_device *ace)
293 {
294 int i = ACE_FIFO_SIZE / 2;
295 u16 *src = ace->data_ptr;
296 while (i--)
297 out_le16(ace->baseaddr + 0x40, *src++);
298 ace->data_ptr = src;
299 }
300
301 /* 16 bit little endian bus attachment */
ace_in_le16(struct ace_device * ace,int reg)302 static u16 ace_in_le16(struct ace_device *ace, int reg)
303 {
304 return in_le16(ace->baseaddr + reg);
305 }
306
ace_out_le16(struct ace_device * ace,int reg,u16 val)307 static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
308 {
309 out_le16(ace->baseaddr + reg, val);
310 }
311
ace_datain_le16(struct ace_device * ace)312 static void ace_datain_le16(struct ace_device *ace)
313 {
314 int i = ACE_FIFO_SIZE / 2;
315 u16 *dst = ace->data_ptr;
316 while (i--)
317 *dst++ = in_be16(ace->baseaddr + 0x40);
318 ace->data_ptr = dst;
319 }
320
ace_dataout_le16(struct ace_device * ace)321 static void ace_dataout_le16(struct ace_device *ace)
322 {
323 int i = ACE_FIFO_SIZE / 2;
324 u16 *src = ace->data_ptr;
325 while (i--)
326 out_be16(ace->baseaddr + 0x40, *src++);
327 ace->data_ptr = src;
328 }
329
330 static struct ace_reg_ops ace_reg_be16_ops = {
331 .in = ace_in_be16,
332 .out = ace_out_be16,
333 .datain = ace_datain_be16,
334 .dataout = ace_dataout_be16,
335 };
336
337 static struct ace_reg_ops ace_reg_le16_ops = {
338 .in = ace_in_le16,
339 .out = ace_out_le16,
340 .datain = ace_datain_le16,
341 .dataout = ace_dataout_le16,
342 };
343
ace_in(struct ace_device * ace,int reg)344 static inline u16 ace_in(struct ace_device *ace, int reg)
345 {
346 return ace->reg_ops->in(ace, reg);
347 }
348
ace_in32(struct ace_device * ace,int reg)349 static inline u32 ace_in32(struct ace_device *ace, int reg)
350 {
351 return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
352 }
353
ace_out(struct ace_device * ace,int reg,u16 val)354 static inline void ace_out(struct ace_device *ace, int reg, u16 val)
355 {
356 ace->reg_ops->out(ace, reg, val);
357 }
358
ace_out32(struct ace_device * ace,int reg,u32 val)359 static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
360 {
361 ace_out(ace, reg, val);
362 ace_out(ace, reg + 2, val >> 16);
363 }
364
365 /* ---------------------------------------------------------------------
366 * Debug support functions
367 */
368
369 #if defined(DEBUG)
ace_dump_mem(void * base,int len)370 static void ace_dump_mem(void *base, int len)
371 {
372 const char *ptr = base;
373 int i, j;
374
375 for (i = 0; i < len; i += 16) {
376 printk(KERN_INFO "%.8x:", i);
377 for (j = 0; j < 16; j++) {
378 if (!(j % 4))
379 printk(" ");
380 printk("%.2x", ptr[i + j]);
381 }
382 printk(" ");
383 for (j = 0; j < 16; j++)
384 printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
385 printk("\n");
386 }
387 }
388 #else
ace_dump_mem(void * base,int len)389 static inline void ace_dump_mem(void *base, int len)
390 {
391 }
392 #endif
393
ace_dump_regs(struct ace_device * ace)394 static void ace_dump_regs(struct ace_device *ace)
395 {
396 dev_info(ace->dev,
397 " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
398 " status:%.8x mpu_lba:%.8x busmode:%4x\n"
399 " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
400 ace_in32(ace, ACE_CTRL),
401 ace_in(ace, ACE_SECCNTCMD),
402 ace_in(ace, ACE_VERSION),
403 ace_in32(ace, ACE_STATUS),
404 ace_in32(ace, ACE_MPULBA),
405 ace_in(ace, ACE_BUSMODE),
406 ace_in32(ace, ACE_ERROR),
407 ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
408 }
409
ace_fix_driveid(u16 * id)410 void ace_fix_driveid(u16 *id)
411 {
412 #if defined(__BIG_ENDIAN)
413 int i;
414
415 /* All half words have wrong byte order; swap the bytes */
416 for (i = 0; i < ATA_ID_WORDS; i++, id++)
417 *id = le16_to_cpu(*id);
418 #endif
419 }
420
421 /* ---------------------------------------------------------------------
422 * Finite State Machine (FSM) implementation
423 */
424
425 /* FSM tasks; used to direct state transitions */
426 #define ACE_TASK_IDLE 0
427 #define ACE_TASK_IDENTIFY 1
428 #define ACE_TASK_READ 2
429 #define ACE_TASK_WRITE 3
430 #define ACE_FSM_NUM_TASKS 4
431
432 /* FSM state definitions */
433 #define ACE_FSM_STATE_IDLE 0
434 #define ACE_FSM_STATE_REQ_LOCK 1
435 #define ACE_FSM_STATE_WAIT_LOCK 2
436 #define ACE_FSM_STATE_WAIT_CFREADY 3
437 #define ACE_FSM_STATE_IDENTIFY_PREPARE 4
438 #define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
439 #define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
440 #define ACE_FSM_STATE_REQ_PREPARE 7
441 #define ACE_FSM_STATE_REQ_TRANSFER 8
442 #define ACE_FSM_STATE_REQ_COMPLETE 9
443 #define ACE_FSM_STATE_ERROR 10
444 #define ACE_FSM_NUM_STATES 11
445
446 /* Set flag to exit FSM loop and reschedule tasklet */
ace_fsm_yield(struct ace_device * ace)447 static inline void ace_fsm_yield(struct ace_device *ace)
448 {
449 dev_dbg(ace->dev, "ace_fsm_yield()\n");
450 tasklet_schedule(&ace->fsm_tasklet);
451 ace->fsm_continue_flag = 0;
452 }
453
454 /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
ace_fsm_yieldirq(struct ace_device * ace)455 static inline void ace_fsm_yieldirq(struct ace_device *ace)
456 {
457 dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
458
459 if (!ace->irq)
460 /* No IRQ assigned, so need to poll */
461 tasklet_schedule(&ace->fsm_tasklet);
462 ace->fsm_continue_flag = 0;
463 }
464
465 /* Get the next read/write request; ending requests that we don't handle */
ace_get_next_request(struct request_queue * q)466 struct request *ace_get_next_request(struct request_queue * q)
467 {
468 struct request *req;
469
470 while ((req = blk_peek_request(q)) != NULL) {
471 if (req->cmd_type == REQ_TYPE_FS)
472 break;
473 blk_start_request(req);
474 __blk_end_request_all(req, -EIO);
475 }
476 return req;
477 }
478
ace_fsm_dostate(struct ace_device * ace)479 static void ace_fsm_dostate(struct ace_device *ace)
480 {
481 struct request *req;
482 u32 status;
483 u16 val;
484 int count;
485
486 #if defined(DEBUG)
487 dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
488 ace->fsm_state, ace->id_req_count);
489 #endif
490
491 /* Verify that there is actually a CF in the slot. If not, then
492 * bail out back to the idle state and wake up all the waiters */
493 status = ace_in32(ace, ACE_STATUS);
494 if ((status & ACE_STATUS_CFDETECT) == 0) {
495 ace->fsm_state = ACE_FSM_STATE_IDLE;
496 ace->media_change = 1;
497 set_capacity(ace->gd, 0);
498 dev_info(ace->dev, "No CF in slot\n");
499
500 /* Drop all in-flight and pending requests */
501 if (ace->req) {
502 __blk_end_request_all(ace->req, -EIO);
503 ace->req = NULL;
504 }
505 while ((req = blk_fetch_request(ace->queue)) != NULL)
506 __blk_end_request_all(req, -EIO);
507
508 /* Drop back to IDLE state and notify waiters */
509 ace->fsm_state = ACE_FSM_STATE_IDLE;
510 ace->id_result = -EIO;
511 while (ace->id_req_count) {
512 complete(&ace->id_completion);
513 ace->id_req_count--;
514 }
515 }
516
517 switch (ace->fsm_state) {
518 case ACE_FSM_STATE_IDLE:
519 /* See if there is anything to do */
520 if (ace->id_req_count || ace_get_next_request(ace->queue)) {
521 ace->fsm_iter_num++;
522 ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
523 mod_timer(&ace->stall_timer, jiffies + HZ);
524 if (!timer_pending(&ace->stall_timer))
525 add_timer(&ace->stall_timer);
526 break;
527 }
528 del_timer(&ace->stall_timer);
529 ace->fsm_continue_flag = 0;
530 break;
531
532 case ACE_FSM_STATE_REQ_LOCK:
533 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
534 /* Already have the lock, jump to next state */
535 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
536 break;
537 }
538
539 /* Request the lock */
540 val = ace_in(ace, ACE_CTRL);
541 ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
542 ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
543 break;
544
545 case ACE_FSM_STATE_WAIT_LOCK:
546 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
547 /* got the lock; move to next state */
548 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
549 break;
550 }
551
552 /* wait a bit for the lock */
553 ace_fsm_yield(ace);
554 break;
555
556 case ACE_FSM_STATE_WAIT_CFREADY:
557 status = ace_in32(ace, ACE_STATUS);
558 if (!(status & ACE_STATUS_RDYFORCFCMD) ||
559 (status & ACE_STATUS_CFBSY)) {
560 /* CF card isn't ready; it needs to be polled */
561 ace_fsm_yield(ace);
562 break;
563 }
564
565 /* Device is ready for command; determine what to do next */
566 if (ace->id_req_count)
567 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
568 else
569 ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
570 break;
571
572 case ACE_FSM_STATE_IDENTIFY_PREPARE:
573 /* Send identify command */
574 ace->fsm_task = ACE_TASK_IDENTIFY;
575 ace->data_ptr = ace->cf_id;
576 ace->data_count = ACE_BUF_PER_SECTOR;
577 ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
578
579 /* As per datasheet, put config controller in reset */
580 val = ace_in(ace, ACE_CTRL);
581 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
582
583 /* irq handler takes over from this point; wait for the
584 * transfer to complete */
585 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
586 ace_fsm_yieldirq(ace);
587 break;
588
589 case ACE_FSM_STATE_IDENTIFY_TRANSFER:
590 /* Check that the sysace is ready to receive data */
591 status = ace_in32(ace, ACE_STATUS);
592 if (status & ACE_STATUS_CFBSY) {
593 dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
594 ace->fsm_task, ace->fsm_iter_num,
595 ace->data_count);
596 ace_fsm_yield(ace);
597 break;
598 }
599 if (!(status & ACE_STATUS_DATABUFRDY)) {
600 ace_fsm_yield(ace);
601 break;
602 }
603
604 /* Transfer the next buffer */
605 ace->reg_ops->datain(ace);
606 ace->data_count--;
607
608 /* If there are still buffers to be transfers; jump out here */
609 if (ace->data_count != 0) {
610 ace_fsm_yieldirq(ace);
611 break;
612 }
613
614 /* transfer finished; kick state machine */
615 dev_dbg(ace->dev, "identify finished\n");
616 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
617 break;
618
619 case ACE_FSM_STATE_IDENTIFY_COMPLETE:
620 ace_fix_driveid(ace->cf_id);
621 ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */
622
623 if (ace->data_result) {
624 /* Error occurred, disable the disk */
625 ace->media_change = 1;
626 set_capacity(ace->gd, 0);
627 dev_err(ace->dev, "error fetching CF id (%i)\n",
628 ace->data_result);
629 } else {
630 ace->media_change = 0;
631
632 /* Record disk parameters */
633 set_capacity(ace->gd,
634 ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
635 dev_info(ace->dev, "capacity: %i sectors\n",
636 ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
637 }
638
639 /* We're done, drop to IDLE state and notify waiters */
640 ace->fsm_state = ACE_FSM_STATE_IDLE;
641 ace->id_result = ace->data_result;
642 while (ace->id_req_count) {
643 complete(&ace->id_completion);
644 ace->id_req_count--;
645 }
646 break;
647
648 case ACE_FSM_STATE_REQ_PREPARE:
649 req = ace_get_next_request(ace->queue);
650 if (!req) {
651 ace->fsm_state = ACE_FSM_STATE_IDLE;
652 break;
653 }
654 blk_start_request(req);
655
656 /* Okay, it's a data request, set it up for transfer */
657 dev_dbg(ace->dev,
658 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
659 (unsigned long long)blk_rq_pos(req),
660 blk_rq_sectors(req), blk_rq_cur_sectors(req),
661 rq_data_dir(req));
662
663 ace->req = req;
664 ace->data_ptr = req->buffer;
665 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
666 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
667
668 count = blk_rq_sectors(req);
669 if (rq_data_dir(req)) {
670 /* Kick off write request */
671 dev_dbg(ace->dev, "write data\n");
672 ace->fsm_task = ACE_TASK_WRITE;
673 ace_out(ace, ACE_SECCNTCMD,
674 count | ACE_SECCNTCMD_WRITE_DATA);
675 } else {
676 /* Kick off read request */
677 dev_dbg(ace->dev, "read data\n");
678 ace->fsm_task = ACE_TASK_READ;
679 ace_out(ace, ACE_SECCNTCMD,
680 count | ACE_SECCNTCMD_READ_DATA);
681 }
682
683 /* As per datasheet, put config controller in reset */
684 val = ace_in(ace, ACE_CTRL);
685 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
686
687 /* Move to the transfer state. The systemace will raise
688 * an interrupt once there is something to do
689 */
690 ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
691 if (ace->fsm_task == ACE_TASK_READ)
692 ace_fsm_yieldirq(ace); /* wait for data ready */
693 break;
694
695 case ACE_FSM_STATE_REQ_TRANSFER:
696 /* Check that the sysace is ready to receive data */
697 status = ace_in32(ace, ACE_STATUS);
698 if (status & ACE_STATUS_CFBSY) {
699 dev_dbg(ace->dev,
700 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
701 ace->fsm_task, ace->fsm_iter_num,
702 blk_rq_cur_sectors(ace->req) * 16,
703 ace->data_count, ace->in_irq);
704 ace_fsm_yield(ace); /* need to poll CFBSY bit */
705 break;
706 }
707 if (!(status & ACE_STATUS_DATABUFRDY)) {
708 dev_dbg(ace->dev,
709 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
710 ace->fsm_task, ace->fsm_iter_num,
711 blk_rq_cur_sectors(ace->req) * 16,
712 ace->data_count, ace->in_irq);
713 ace_fsm_yieldirq(ace);
714 break;
715 }
716
717 /* Transfer the next buffer */
718 if (ace->fsm_task == ACE_TASK_WRITE)
719 ace->reg_ops->dataout(ace);
720 else
721 ace->reg_ops->datain(ace);
722 ace->data_count--;
723
724 /* If there are still buffers to be transfers; jump out here */
725 if (ace->data_count != 0) {
726 ace_fsm_yieldirq(ace);
727 break;
728 }
729
730 /* bio finished; is there another one? */
731 if (__blk_end_request_cur(ace->req, 0)) {
732 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
733 * blk_rq_sectors(ace->req),
734 * blk_rq_cur_sectors(ace->req));
735 */
736 ace->data_ptr = ace->req->buffer;
737 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
738 ace_fsm_yieldirq(ace);
739 break;
740 }
741
742 ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
743 break;
744
745 case ACE_FSM_STATE_REQ_COMPLETE:
746 ace->req = NULL;
747
748 /* Finished request; go to idle state */
749 ace->fsm_state = ACE_FSM_STATE_IDLE;
750 break;
751
752 default:
753 ace->fsm_state = ACE_FSM_STATE_IDLE;
754 break;
755 }
756 }
757
ace_fsm_tasklet(unsigned long data)758 static void ace_fsm_tasklet(unsigned long data)
759 {
760 struct ace_device *ace = (void *)data;
761 unsigned long flags;
762
763 spin_lock_irqsave(&ace->lock, flags);
764
765 /* Loop over state machine until told to stop */
766 ace->fsm_continue_flag = 1;
767 while (ace->fsm_continue_flag)
768 ace_fsm_dostate(ace);
769
770 spin_unlock_irqrestore(&ace->lock, flags);
771 }
772
ace_stall_timer(unsigned long data)773 static void ace_stall_timer(unsigned long data)
774 {
775 struct ace_device *ace = (void *)data;
776 unsigned long flags;
777
778 dev_warn(ace->dev,
779 "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
780 ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
781 ace->data_count);
782 spin_lock_irqsave(&ace->lock, flags);
783
784 /* Rearm the stall timer *before* entering FSM (which may then
785 * delete the timer) */
786 mod_timer(&ace->stall_timer, jiffies + HZ);
787
788 /* Loop over state machine until told to stop */
789 ace->fsm_continue_flag = 1;
790 while (ace->fsm_continue_flag)
791 ace_fsm_dostate(ace);
792
793 spin_unlock_irqrestore(&ace->lock, flags);
794 }
795
796 /* ---------------------------------------------------------------------
797 * Interrupt handling routines
798 */
ace_interrupt_checkstate(struct ace_device * ace)799 static int ace_interrupt_checkstate(struct ace_device *ace)
800 {
801 u32 sreg = ace_in32(ace, ACE_STATUS);
802 u16 creg = ace_in(ace, ACE_CTRL);
803
804 /* Check for error occurrence */
805 if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
806 (creg & ACE_CTRL_ERRORIRQ)) {
807 dev_err(ace->dev, "transfer failure\n");
808 ace_dump_regs(ace);
809 return -EIO;
810 }
811
812 return 0;
813 }
814
ace_interrupt(int irq,void * dev_id)815 static irqreturn_t ace_interrupt(int irq, void *dev_id)
816 {
817 u16 creg;
818 struct ace_device *ace = dev_id;
819
820 /* be safe and get the lock */
821 spin_lock(&ace->lock);
822 ace->in_irq = 1;
823
824 /* clear the interrupt */
825 creg = ace_in(ace, ACE_CTRL);
826 ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
827 ace_out(ace, ACE_CTRL, creg);
828
829 /* check for IO failures */
830 if (ace_interrupt_checkstate(ace))
831 ace->data_result = -EIO;
832
833 if (ace->fsm_task == 0) {
834 dev_err(ace->dev,
835 "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
836 ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
837 ace_in(ace, ACE_SECCNTCMD));
838 dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
839 ace->fsm_task, ace->fsm_state, ace->data_count);
840 }
841
842 /* Loop over state machine until told to stop */
843 ace->fsm_continue_flag = 1;
844 while (ace->fsm_continue_flag)
845 ace_fsm_dostate(ace);
846
847 /* done with interrupt; drop the lock */
848 ace->in_irq = 0;
849 spin_unlock(&ace->lock);
850
851 return IRQ_HANDLED;
852 }
853
854 /* ---------------------------------------------------------------------
855 * Block ops
856 */
ace_request(struct request_queue * q)857 static void ace_request(struct request_queue * q)
858 {
859 struct request *req;
860 struct ace_device *ace;
861
862 req = ace_get_next_request(q);
863
864 if (req) {
865 ace = req->rq_disk->private_data;
866 tasklet_schedule(&ace->fsm_tasklet);
867 }
868 }
869
ace_check_events(struct gendisk * gd,unsigned int clearing)870 static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
871 {
872 struct ace_device *ace = gd->private_data;
873 dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change);
874
875 return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
876 }
877
ace_revalidate_disk(struct gendisk * gd)878 static int ace_revalidate_disk(struct gendisk *gd)
879 {
880 struct ace_device *ace = gd->private_data;
881 unsigned long flags;
882
883 dev_dbg(ace->dev, "ace_revalidate_disk()\n");
884
885 if (ace->media_change) {
886 dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
887
888 spin_lock_irqsave(&ace->lock, flags);
889 ace->id_req_count++;
890 spin_unlock_irqrestore(&ace->lock, flags);
891
892 tasklet_schedule(&ace->fsm_tasklet);
893 wait_for_completion(&ace->id_completion);
894 }
895
896 dev_dbg(ace->dev, "revalidate complete\n");
897 return ace->id_result;
898 }
899
ace_open(struct block_device * bdev,fmode_t mode)900 static int ace_open(struct block_device *bdev, fmode_t mode)
901 {
902 struct ace_device *ace = bdev->bd_disk->private_data;
903 unsigned long flags;
904
905 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
906
907 mutex_lock(&xsysace_mutex);
908 spin_lock_irqsave(&ace->lock, flags);
909 ace->users++;
910 spin_unlock_irqrestore(&ace->lock, flags);
911
912 check_disk_change(bdev);
913 mutex_unlock(&xsysace_mutex);
914
915 return 0;
916 }
917
ace_release(struct gendisk * disk,fmode_t mode)918 static int ace_release(struct gendisk *disk, fmode_t mode)
919 {
920 struct ace_device *ace = disk->private_data;
921 unsigned long flags;
922 u16 val;
923
924 dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
925
926 mutex_lock(&xsysace_mutex);
927 spin_lock_irqsave(&ace->lock, flags);
928 ace->users--;
929 if (ace->users == 0) {
930 val = ace_in(ace, ACE_CTRL);
931 ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
932 }
933 spin_unlock_irqrestore(&ace->lock, flags);
934 mutex_unlock(&xsysace_mutex);
935 return 0;
936 }
937
ace_getgeo(struct block_device * bdev,struct hd_geometry * geo)938 static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
939 {
940 struct ace_device *ace = bdev->bd_disk->private_data;
941 u16 *cf_id = ace->cf_id;
942
943 dev_dbg(ace->dev, "ace_getgeo()\n");
944
945 geo->heads = cf_id[ATA_ID_HEADS];
946 geo->sectors = cf_id[ATA_ID_SECTORS];
947 geo->cylinders = cf_id[ATA_ID_CYLS];
948
949 return 0;
950 }
951
952 static const struct block_device_operations ace_fops = {
953 .owner = THIS_MODULE,
954 .open = ace_open,
955 .release = ace_release,
956 .check_events = ace_check_events,
957 .revalidate_disk = ace_revalidate_disk,
958 .getgeo = ace_getgeo,
959 };
960
961 /* --------------------------------------------------------------------
962 * SystemACE device setup/teardown code
963 */
ace_setup(struct ace_device * ace)964 static int __devinit ace_setup(struct ace_device *ace)
965 {
966 u16 version;
967 u16 val;
968 int rc;
969
970 dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
971 dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
972 (unsigned long long)ace->physaddr, ace->irq);
973
974 spin_lock_init(&ace->lock);
975 init_completion(&ace->id_completion);
976
977 /*
978 * Map the device
979 */
980 ace->baseaddr = ioremap(ace->physaddr, 0x80);
981 if (!ace->baseaddr)
982 goto err_ioremap;
983
984 /*
985 * Initialize the state machine tasklet and stall timer
986 */
987 tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
988 setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
989
990 /*
991 * Initialize the request queue
992 */
993 ace->queue = blk_init_queue(ace_request, &ace->lock);
994 if (ace->queue == NULL)
995 goto err_blk_initq;
996 blk_queue_logical_block_size(ace->queue, 512);
997
998 /*
999 * Allocate and initialize GD structure
1000 */
1001 ace->gd = alloc_disk(ACE_NUM_MINORS);
1002 if (!ace->gd)
1003 goto err_alloc_disk;
1004
1005 ace->gd->major = ace_major;
1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
1007 ace->gd->fops = &ace_fops;
1008 ace->gd->queue = ace->queue;
1009 ace->gd->private_data = ace;
1010 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
1011
1012 /* set bus width */
1013 if (ace->bus_width == ACE_BUS_WIDTH_16) {
1014 /* 0x0101 should work regardless of endianess */
1015 ace_out_le16(ace, ACE_BUSMODE, 0x0101);
1016
1017 /* read it back to determine endianess */
1018 if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
1019 ace->reg_ops = &ace_reg_le16_ops;
1020 else
1021 ace->reg_ops = &ace_reg_be16_ops;
1022 } else {
1023 ace_out_8(ace, ACE_BUSMODE, 0x00);
1024 ace->reg_ops = &ace_reg_8_ops;
1025 }
1026
1027 /* Make sure version register is sane */
1028 version = ace_in(ace, ACE_VERSION);
1029 if ((version == 0) || (version == 0xFFFF))
1030 goto err_read;
1031
1032 /* Put sysace in a sane state by clearing most control reg bits */
1033 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1034 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1035
1036 /* Now we can hook up the irq handler */
1037 if (ace->irq) {
1038 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
1039 if (rc) {
1040 /* Failure - fall back to polled mode */
1041 dev_err(ace->dev, "request_irq failed\n");
1042 ace->irq = 0;
1043 }
1044 }
1045
1046 /* Enable interrupts */
1047 val = ace_in(ace, ACE_CTRL);
1048 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
1049 ace_out(ace, ACE_CTRL, val);
1050
1051 /* Print the identification */
1052 dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
1053 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
1054 dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
1055 (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
1056
1057 ace->media_change = 1;
1058 ace_revalidate_disk(ace->gd);
1059
1060 /* Make the sysace device 'live' */
1061 add_disk(ace->gd);
1062
1063 return 0;
1064
1065 err_read:
1066 put_disk(ace->gd);
1067 err_alloc_disk:
1068 blk_cleanup_queue(ace->queue);
1069 err_blk_initq:
1070 iounmap(ace->baseaddr);
1071 err_ioremap:
1072 dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
1073 (unsigned long long) ace->physaddr);
1074 return -ENOMEM;
1075 }
1076
ace_teardown(struct ace_device * ace)1077 static void __devexit ace_teardown(struct ace_device *ace)
1078 {
1079 if (ace->gd) {
1080 del_gendisk(ace->gd);
1081 put_disk(ace->gd);
1082 }
1083
1084 if (ace->queue)
1085 blk_cleanup_queue(ace->queue);
1086
1087 tasklet_kill(&ace->fsm_tasklet);
1088
1089 if (ace->irq)
1090 free_irq(ace->irq, ace);
1091
1092 iounmap(ace->baseaddr);
1093 }
1094
1095 static int __devinit
ace_alloc(struct device * dev,int id,resource_size_t physaddr,int irq,int bus_width)1096 ace_alloc(struct device *dev, int id, resource_size_t physaddr,
1097 int irq, int bus_width)
1098 {
1099 struct ace_device *ace;
1100 int rc;
1101 dev_dbg(dev, "ace_alloc(%p)\n", dev);
1102
1103 if (!physaddr) {
1104 rc = -ENODEV;
1105 goto err_noreg;
1106 }
1107
1108 /* Allocate and initialize the ace device structure */
1109 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1110 if (!ace) {
1111 rc = -ENOMEM;
1112 goto err_alloc;
1113 }
1114
1115 ace->dev = dev;
1116 ace->id = id;
1117 ace->physaddr = physaddr;
1118 ace->irq = irq;
1119 ace->bus_width = bus_width;
1120
1121 /* Call the setup code */
1122 rc = ace_setup(ace);
1123 if (rc)
1124 goto err_setup;
1125
1126 dev_set_drvdata(dev, ace);
1127 return 0;
1128
1129 err_setup:
1130 dev_set_drvdata(dev, NULL);
1131 kfree(ace);
1132 err_alloc:
1133 err_noreg:
1134 dev_err(dev, "could not initialize device, err=%i\n", rc);
1135 return rc;
1136 }
1137
ace_free(struct device * dev)1138 static void __devexit ace_free(struct device *dev)
1139 {
1140 struct ace_device *ace = dev_get_drvdata(dev);
1141 dev_dbg(dev, "ace_free(%p)\n", dev);
1142
1143 if (ace) {
1144 ace_teardown(ace);
1145 dev_set_drvdata(dev, NULL);
1146 kfree(ace);
1147 }
1148 }
1149
1150 /* ---------------------------------------------------------------------
1151 * Platform Bus Support
1152 */
1153
ace_probe(struct platform_device * dev)1154 static int __devinit ace_probe(struct platform_device *dev)
1155 {
1156 resource_size_t physaddr = 0;
1157 int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
1158 u32 id = dev->id;
1159 int irq = 0;
1160 int i;
1161
1162 dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
1163
1164 /* device id and bus width */
1165 of_property_read_u32(dev->dev.of_node, "port-number", &id);
1166 if (id < 0)
1167 id = 0;
1168 if (of_find_property(dev->dev.of_node, "8-bit", NULL))
1169 bus_width = ACE_BUS_WIDTH_8;
1170
1171 for (i = 0; i < dev->num_resources; i++) {
1172 if (dev->resource[i].flags & IORESOURCE_MEM)
1173 physaddr = dev->resource[i].start;
1174 if (dev->resource[i].flags & IORESOURCE_IRQ)
1175 irq = dev->resource[i].start;
1176 }
1177
1178 /* Call the bus-independent setup code */
1179 return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
1180 }
1181
1182 /*
1183 * Platform bus remove() method
1184 */
ace_remove(struct platform_device * dev)1185 static int __devexit ace_remove(struct platform_device *dev)
1186 {
1187 ace_free(&dev->dev);
1188 return 0;
1189 }
1190
1191 #if defined(CONFIG_OF)
1192 /* Match table for of_platform binding */
1193 static const struct of_device_id ace_of_match[] __devinitconst = {
1194 { .compatible = "xlnx,opb-sysace-1.00.b", },
1195 { .compatible = "xlnx,opb-sysace-1.00.c", },
1196 { .compatible = "xlnx,xps-sysace-1.00.a", },
1197 { .compatible = "xlnx,sysace", },
1198 {},
1199 };
1200 MODULE_DEVICE_TABLE(of, ace_of_match);
1201 #else /* CONFIG_OF */
1202 #define ace_of_match NULL
1203 #endif /* CONFIG_OF */
1204
1205 static struct platform_driver ace_platform_driver = {
1206 .probe = ace_probe,
1207 .remove = __devexit_p(ace_remove),
1208 .driver = {
1209 .owner = THIS_MODULE,
1210 .name = "xsysace",
1211 .of_match_table = ace_of_match,
1212 },
1213 };
1214
1215 /* ---------------------------------------------------------------------
1216 * Module init/exit routines
1217 */
ace_init(void)1218 static int __init ace_init(void)
1219 {
1220 int rc;
1221
1222 ace_major = register_blkdev(ace_major, "xsysace");
1223 if (ace_major <= 0) {
1224 rc = -ENOMEM;
1225 goto err_blk;
1226 }
1227
1228 rc = platform_driver_register(&ace_platform_driver);
1229 if (rc)
1230 goto err_plat;
1231
1232 pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
1233 return 0;
1234
1235 err_plat:
1236 unregister_blkdev(ace_major, "xsysace");
1237 err_blk:
1238 printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
1239 return rc;
1240 }
1241 module_init(ace_init);
1242
ace_exit(void)1243 static void __exit ace_exit(void)
1244 {
1245 pr_debug("Unregistering Xilinx SystemACE driver\n");
1246 platform_driver_unregister(&ace_platform_driver);
1247 unregister_blkdev(ace_major, "xsysace");
1248 }
1249 module_exit(ace_exit);
1250