1 /*
2  *  drivers/block/mg_disk.c
3  *
4  *  Support for the mGine m[g]flash IO mode.
5  *  Based on legacy hd.c
6  *
7  * (c) 2008 mGine Co.,LTD
8  * (c) 2008 unsik Kim <donari75@gmail.com>
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/hdreg.h>
20 #include <linux/ata.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
25 #include <linux/mg_disk.h>
26 #include <linux/slab.h>
27 
28 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
29 
30 /* name for block device */
31 #define MG_DISK_NAME "mgd"
32 
33 #define MG_DISK_MAJ 0
34 #define MG_DISK_MAX_PART 16
35 #define MG_SECTOR_SIZE 512
36 #define MG_MAX_SECTS 256
37 
38 /* Register offsets */
39 #define MG_BUFF_OFFSET			0x8000
40 #define MG_REG_OFFSET			0xC000
41 #define MG_REG_FEATURE			(MG_REG_OFFSET + 2)	/* write case */
42 #define MG_REG_ERROR			(MG_REG_OFFSET + 2)	/* read case */
43 #define MG_REG_SECT_CNT			(MG_REG_OFFSET + 4)
44 #define MG_REG_SECT_NUM			(MG_REG_OFFSET + 6)
45 #define MG_REG_CYL_LOW			(MG_REG_OFFSET + 8)
46 #define MG_REG_CYL_HIGH			(MG_REG_OFFSET + 0xA)
47 #define MG_REG_DRV_HEAD			(MG_REG_OFFSET + 0xC)
48 #define MG_REG_COMMAND			(MG_REG_OFFSET + 0xE)	/* write case */
49 #define MG_REG_STATUS			(MG_REG_OFFSET + 0xE)	/* read  case */
50 #define MG_REG_DRV_CTRL			(MG_REG_OFFSET + 0x10)
51 #define MG_REG_BURST_CTRL		(MG_REG_OFFSET + 0x12)
52 
53 /* handy status */
54 #define MG_STAT_READY	(ATA_DRDY | ATA_DSC)
55 #define MG_READY_OK(s)	(((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
56 				 ATA_ERR))) == MG_STAT_READY)
57 
58 /* error code for others */
59 #define MG_ERR_NONE		0
60 #define MG_ERR_TIMEOUT		0x100
61 #define MG_ERR_INIT_STAT	0x101
62 #define MG_ERR_TRANSLATION	0x102
63 #define MG_ERR_CTRL_RST		0x103
64 #define MG_ERR_INV_STAT		0x104
65 #define MG_ERR_RSTOUT		0x105
66 
67 #define MG_MAX_ERRORS	6	/* Max read/write errors */
68 
69 /* command */
70 #define MG_CMD_RD 0x20
71 #define MG_CMD_WR 0x30
72 #define MG_CMD_SLEEP 0x99
73 #define MG_CMD_WAKEUP 0xC3
74 #define MG_CMD_ID 0xEC
75 #define MG_CMD_WR_CONF 0x3C
76 #define MG_CMD_RD_CONF 0x40
77 
78 /* operation mode */
79 #define MG_OP_CASCADE (1 << 0)
80 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
81 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
82 #define MG_OP_INTERLEAVE (1 << 3)
83 
84 /* synchronous */
85 #define MG_BURST_LAT_4 (3 << 4)
86 #define MG_BURST_LAT_5 (4 << 4)
87 #define MG_BURST_LAT_6 (5 << 4)
88 #define MG_BURST_LAT_7 (6 << 4)
89 #define MG_BURST_LAT_8 (7 << 4)
90 #define MG_BURST_LEN_4 (1 << 1)
91 #define MG_BURST_LEN_8 (2 << 1)
92 #define MG_BURST_LEN_16 (3 << 1)
93 #define MG_BURST_LEN_32 (4 << 1)
94 #define MG_BURST_LEN_CONT (0 << 1)
95 
96 /* timeout value (unit: ms) */
97 #define MG_TMAX_CONF_TO_CMD	1
98 #define MG_TMAX_WAIT_RD_DRQ	10
99 #define MG_TMAX_WAIT_WR_DRQ	500
100 #define MG_TMAX_RST_TO_BUSY	10
101 #define MG_TMAX_HDRST_TO_RDY	500
102 #define MG_TMAX_SWRST_TO_RDY	500
103 #define MG_TMAX_RSTOUT		3000
104 
105 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
106 
107 /* main structure for mflash driver */
108 struct mg_host {
109 	struct device *dev;
110 
111 	struct request_queue *breq;
112 	struct request *req;
113 	spinlock_t lock;
114 	struct gendisk *gd;
115 
116 	struct timer_list timer;
117 	void (*mg_do_intr) (struct mg_host *);
118 
119 	u16 id[ATA_ID_WORDS];
120 
121 	u16 cyls;
122 	u16 heads;
123 	u16 sectors;
124 	u32 n_sectors;
125 	u32 nres_sectors;
126 
127 	void __iomem *dev_base;
128 	unsigned int irq;
129 	unsigned int rst;
130 	unsigned int rstout;
131 
132 	u32 major;
133 	u32 error;
134 };
135 
136 /*
137  * Debugging macro and defines
138  */
139 #undef DO_MG_DEBUG
140 #ifdef DO_MG_DEBUG
141 #  define MG_DBG(fmt, args...) \
142 	printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
143 #else /* CONFIG_MG_DEBUG */
144 #  define MG_DBG(fmt, args...) do { } while (0)
145 #endif /* CONFIG_MG_DEBUG */
146 
147 static void mg_request(struct request_queue *);
148 
mg_end_request(struct mg_host * host,int err,unsigned int nr_bytes)149 static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
150 {
151 	if (__blk_end_request(host->req, err, nr_bytes))
152 		return true;
153 
154 	host->req = NULL;
155 	return false;
156 }
157 
mg_end_request_cur(struct mg_host * host,int err)158 static bool mg_end_request_cur(struct mg_host *host, int err)
159 {
160 	return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
161 }
162 
mg_dump_status(const char * msg,unsigned int stat,struct mg_host * host)163 static void mg_dump_status(const char *msg, unsigned int stat,
164 		struct mg_host *host)
165 {
166 	char *name = MG_DISK_NAME;
167 
168 	if (host->req)
169 		name = host->req->rq_disk->disk_name;
170 
171 	printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
172 	if (stat & ATA_BUSY)
173 		printk("Busy ");
174 	if (stat & ATA_DRDY)
175 		printk("DriveReady ");
176 	if (stat & ATA_DF)
177 		printk("WriteFault ");
178 	if (stat & ATA_DSC)
179 		printk("SeekComplete ");
180 	if (stat & ATA_DRQ)
181 		printk("DataRequest ");
182 	if (stat & ATA_CORR)
183 		printk("CorrectedError ");
184 	if (stat & ATA_ERR)
185 		printk("Error ");
186 	printk("}\n");
187 	if ((stat & ATA_ERR) == 0) {
188 		host->error = 0;
189 	} else {
190 		host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
191 		printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
192 				host->error & 0xff);
193 		if (host->error & ATA_BBK)
194 			printk("BadSector ");
195 		if (host->error & ATA_UNC)
196 			printk("UncorrectableError ");
197 		if (host->error & ATA_IDNF)
198 			printk("SectorIdNotFound ");
199 		if (host->error & ATA_ABORTED)
200 			printk("DriveStatusError ");
201 		if (host->error & ATA_AMNF)
202 			printk("AddrMarkNotFound ");
203 		printk("}");
204 		if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
205 			if (host->req)
206 				printk(", sector=%u",
207 				       (unsigned int)blk_rq_pos(host->req));
208 		}
209 		printk("\n");
210 	}
211 }
212 
mg_wait(struct mg_host * host,u32 expect,u32 msec)213 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
214 {
215 	u8 status;
216 	unsigned long expire, cur_jiffies;
217 	struct mg_drv_data *prv_data = host->dev->platform_data;
218 
219 	host->error = MG_ERR_NONE;
220 	expire = jiffies + msecs_to_jiffies(msec);
221 
222 	/* These 2 times dummy status read prevents reading invalid
223 	 * status. A very little time (3 times of mflash operating clk)
224 	 * is required for busy bit is set. Use dummy read instead of
225 	 * busy wait, because mflash's PLL is machine dependent.
226 	 */
227 	if (prv_data->use_polling) {
228 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
229 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
230 	}
231 
232 	status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
233 
234 	do {
235 		cur_jiffies = jiffies;
236 		if (status & ATA_BUSY) {
237 			if (expect == ATA_BUSY)
238 				break;
239 		} else {
240 			/* Check the error condition! */
241 			if (status & ATA_ERR) {
242 				mg_dump_status("mg_wait", status, host);
243 				break;
244 			}
245 
246 			if (expect == MG_STAT_READY)
247 				if (MG_READY_OK(status))
248 					break;
249 
250 			if (expect == ATA_DRQ)
251 				if (status & ATA_DRQ)
252 					break;
253 		}
254 		if (!msec) {
255 			mg_dump_status("not ready", status, host);
256 			return MG_ERR_INV_STAT;
257 		}
258 
259 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
260 	} while (time_before(cur_jiffies, expire));
261 
262 	if (time_after_eq(cur_jiffies, expire) && msec)
263 		host->error = MG_ERR_TIMEOUT;
264 
265 	return host->error;
266 }
267 
mg_wait_rstout(u32 rstout,u32 msec)268 static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
269 {
270 	unsigned long expire;
271 
272 	expire = jiffies + msecs_to_jiffies(msec);
273 	while (time_before(jiffies, expire)) {
274 		if (gpio_get_value(rstout) == 1)
275 			return MG_ERR_NONE;
276 		msleep(10);
277 	}
278 
279 	return MG_ERR_RSTOUT;
280 }
281 
mg_unexpected_intr(struct mg_host * host)282 static void mg_unexpected_intr(struct mg_host *host)
283 {
284 	u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
285 
286 	mg_dump_status("mg_unexpected_intr", status, host);
287 }
288 
mg_irq(int irq,void * dev_id)289 static irqreturn_t mg_irq(int irq, void *dev_id)
290 {
291 	struct mg_host *host = dev_id;
292 	void (*handler)(struct mg_host *) = host->mg_do_intr;
293 
294 	spin_lock(&host->lock);
295 
296 	host->mg_do_intr = NULL;
297 	del_timer(&host->timer);
298 	if (!handler)
299 		handler = mg_unexpected_intr;
300 	handler(host);
301 
302 	spin_unlock(&host->lock);
303 
304 	return IRQ_HANDLED;
305 }
306 
307 /* local copy of ata_id_string() */
mg_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)308 static void mg_id_string(const u16 *id, unsigned char *s,
309 			 unsigned int ofs, unsigned int len)
310 {
311 	unsigned int c;
312 
313 	BUG_ON(len & 1);
314 
315 	while (len > 0) {
316 		c = id[ofs] >> 8;
317 		*s = c;
318 		s++;
319 
320 		c = id[ofs] & 0xff;
321 		*s = c;
322 		s++;
323 
324 		ofs++;
325 		len -= 2;
326 	}
327 }
328 
329 /* local copy of ata_id_c_string() */
mg_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)330 static void mg_id_c_string(const u16 *id, unsigned char *s,
331 			   unsigned int ofs, unsigned int len)
332 {
333 	unsigned char *p;
334 
335 	mg_id_string(id, s, ofs, len - 1);
336 
337 	p = s + strnlen(s, len - 1);
338 	while (p > s && p[-1] == ' ')
339 		p--;
340 	*p = '\0';
341 }
342 
mg_get_disk_id(struct mg_host * host)343 static int mg_get_disk_id(struct mg_host *host)
344 {
345 	u32 i;
346 	s32 err;
347 	const u16 *id = host->id;
348 	struct mg_drv_data *prv_data = host->dev->platform_data;
349 	char fwrev[ATA_ID_FW_REV_LEN + 1];
350 	char model[ATA_ID_PROD_LEN + 1];
351 	char serial[ATA_ID_SERNO_LEN + 1];
352 
353 	if (!prv_data->use_polling)
354 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
355 
356 	outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
357 	err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
358 	if (err)
359 		return err;
360 
361 	for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
362 		host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
363 					MG_BUFF_OFFSET + i * 2));
364 
365 	outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
366 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
367 	if (err)
368 		return err;
369 
370 	if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
371 		return MG_ERR_TRANSLATION;
372 
373 	host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
374 	host->cyls = id[ATA_ID_CYLS];
375 	host->heads = id[ATA_ID_HEADS];
376 	host->sectors = id[ATA_ID_SECTORS];
377 
378 	if (MG_RES_SEC && host->heads && host->sectors) {
379 		/* modify cyls, n_sectors */
380 		host->cyls = (host->n_sectors - MG_RES_SEC) /
381 			host->heads / host->sectors;
382 		host->nres_sectors = host->n_sectors - host->cyls *
383 			host->heads * host->sectors;
384 		host->n_sectors -= host->nres_sectors;
385 	}
386 
387 	mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
388 	mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
389 	mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
390 	printk(KERN_INFO "mg_disk: model: %s\n", model);
391 	printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
392 	printk(KERN_INFO "mg_disk: serial: %s\n", serial);
393 	printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
394 			host->n_sectors, host->nres_sectors);
395 
396 	if (!prv_data->use_polling)
397 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
398 
399 	return err;
400 }
401 
402 
mg_disk_init(struct mg_host * host)403 static int mg_disk_init(struct mg_host *host)
404 {
405 	struct mg_drv_data *prv_data = host->dev->platform_data;
406 	s32 err;
407 	u8 init_status;
408 
409 	/* hdd rst low */
410 	gpio_set_value(host->rst, 0);
411 	err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
412 	if (err)
413 		return err;
414 
415 	/* hdd rst high */
416 	gpio_set_value(host->rst, 1);
417 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
418 	if (err)
419 		return err;
420 
421 	/* soft reset on */
422 	outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
423 			(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
424 	err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
425 	if (err)
426 		return err;
427 
428 	/* soft reset off */
429 	outb(prv_data->use_polling ? ATA_NIEN : 0,
430 			(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
431 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
432 	if (err)
433 		return err;
434 
435 	init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
436 
437 	if (init_status == 0xf)
438 		return MG_ERR_INIT_STAT;
439 
440 	return err;
441 }
442 
mg_bad_rw_intr(struct mg_host * host)443 static void mg_bad_rw_intr(struct mg_host *host)
444 {
445 	if (host->req)
446 		if (++host->req->errors >= MG_MAX_ERRORS ||
447 		    host->error == MG_ERR_TIMEOUT)
448 			mg_end_request_cur(host, -EIO);
449 }
450 
mg_out(struct mg_host * host,unsigned int sect_num,unsigned int sect_cnt,unsigned int cmd,void (* intr_addr)(struct mg_host *))451 static unsigned int mg_out(struct mg_host *host,
452 		unsigned int sect_num,
453 		unsigned int sect_cnt,
454 		unsigned int cmd,
455 		void (*intr_addr)(struct mg_host *))
456 {
457 	struct mg_drv_data *prv_data = host->dev->platform_data;
458 
459 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
460 		return host->error;
461 
462 	if (!prv_data->use_polling) {
463 		host->mg_do_intr = intr_addr;
464 		mod_timer(&host->timer, jiffies + 3 * HZ);
465 	}
466 	if (MG_RES_SEC)
467 		sect_num += MG_RES_SEC;
468 	outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
469 	outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
470 	outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
471 			MG_REG_CYL_LOW);
472 	outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
473 			MG_REG_CYL_HIGH);
474 	outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
475 			(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
476 	outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
477 	return MG_ERR_NONE;
478 }
479 
mg_read_one(struct mg_host * host,struct request * req)480 static void mg_read_one(struct mg_host *host, struct request *req)
481 {
482 	u16 *buff = (u16 *)req->buffer;
483 	u32 i;
484 
485 	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
486 		*buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
487 			      (i << 1));
488 }
489 
mg_read(struct request * req)490 static void mg_read(struct request *req)
491 {
492 	struct mg_host *host = req->rq_disk->private_data;
493 
494 	if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
495 		   MG_CMD_RD, NULL) != MG_ERR_NONE)
496 		mg_bad_rw_intr(host);
497 
498 	MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
499 	       blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
500 
501 	do {
502 		if (mg_wait(host, ATA_DRQ,
503 			    MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
504 			mg_bad_rw_intr(host);
505 			return;
506 		}
507 
508 		mg_read_one(host, req);
509 
510 		outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
511 				MG_REG_COMMAND);
512 	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
513 }
514 
mg_write_one(struct mg_host * host,struct request * req)515 static void mg_write_one(struct mg_host *host, struct request *req)
516 {
517 	u16 *buff = (u16 *)req->buffer;
518 	u32 i;
519 
520 	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
521 		outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
522 		     (i << 1));
523 }
524 
mg_write(struct request * req)525 static void mg_write(struct request *req)
526 {
527 	struct mg_host *host = req->rq_disk->private_data;
528 	unsigned int rem = blk_rq_sectors(req);
529 
530 	if (mg_out(host, blk_rq_pos(req), rem,
531 		   MG_CMD_WR, NULL) != MG_ERR_NONE) {
532 		mg_bad_rw_intr(host);
533 		return;
534 	}
535 
536 	MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
537 	       rem, blk_rq_pos(req), req->buffer);
538 
539 	if (mg_wait(host, ATA_DRQ,
540 		    MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
541 		mg_bad_rw_intr(host);
542 		return;
543 	}
544 
545 	do {
546 		mg_write_one(host, req);
547 
548 		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
549 				MG_REG_COMMAND);
550 
551 		rem--;
552 		if (rem > 1 && mg_wait(host, ATA_DRQ,
553 					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
554 			mg_bad_rw_intr(host);
555 			return;
556 		} else if (mg_wait(host, MG_STAT_READY,
557 					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
558 			mg_bad_rw_intr(host);
559 			return;
560 		}
561 	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
562 }
563 
mg_read_intr(struct mg_host * host)564 static void mg_read_intr(struct mg_host *host)
565 {
566 	struct request *req = host->req;
567 	u32 i;
568 
569 	/* check status */
570 	do {
571 		i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
572 		if (i & ATA_BUSY)
573 			break;
574 		if (!MG_READY_OK(i))
575 			break;
576 		if (i & ATA_DRQ)
577 			goto ok_to_read;
578 	} while (0);
579 	mg_dump_status("mg_read_intr", i, host);
580 	mg_bad_rw_intr(host);
581 	mg_request(host->breq);
582 	return;
583 
584 ok_to_read:
585 	mg_read_one(host, req);
586 
587 	MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
588 	       blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
589 
590 	/* send read confirm */
591 	outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
592 
593 	if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
594 		/* set handler if read remains */
595 		host->mg_do_intr = mg_read_intr;
596 		mod_timer(&host->timer, jiffies + 3 * HZ);
597 	} else /* goto next request */
598 		mg_request(host->breq);
599 }
600 
mg_write_intr(struct mg_host * host)601 static void mg_write_intr(struct mg_host *host)
602 {
603 	struct request *req = host->req;
604 	u32 i;
605 	bool rem;
606 
607 	/* check status */
608 	do {
609 		i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
610 		if (i & ATA_BUSY)
611 			break;
612 		if (!MG_READY_OK(i))
613 			break;
614 		if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
615 			goto ok_to_write;
616 	} while (0);
617 	mg_dump_status("mg_write_intr", i, host);
618 	mg_bad_rw_intr(host);
619 	mg_request(host->breq);
620 	return;
621 
622 ok_to_write:
623 	if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
624 		/* write 1 sector and set handler if remains */
625 		mg_write_one(host, req);
626 		MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
627 		       blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
628 		host->mg_do_intr = mg_write_intr;
629 		mod_timer(&host->timer, jiffies + 3 * HZ);
630 	}
631 
632 	/* send write confirm */
633 	outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
634 
635 	if (!rem)
636 		mg_request(host->breq);
637 }
638 
mg_times_out(unsigned long data)639 void mg_times_out(unsigned long data)
640 {
641 	struct mg_host *host = (struct mg_host *)data;
642 	char *name;
643 
644 	spin_lock_irq(&host->lock);
645 
646 	if (!host->req)
647 		goto out_unlock;
648 
649 	host->mg_do_intr = NULL;
650 
651 	name = host->req->rq_disk->disk_name;
652 	printk(KERN_DEBUG "%s: timeout\n", name);
653 
654 	host->error = MG_ERR_TIMEOUT;
655 	mg_bad_rw_intr(host);
656 
657 out_unlock:
658 	mg_request(host->breq);
659 	spin_unlock_irq(&host->lock);
660 }
661 
mg_request_poll(struct request_queue * q)662 static void mg_request_poll(struct request_queue *q)
663 {
664 	struct mg_host *host = q->queuedata;
665 
666 	while (1) {
667 		if (!host->req) {
668 			host->req = blk_fetch_request(q);
669 			if (!host->req)
670 				break;
671 		}
672 
673 		if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
674 			mg_end_request_cur(host, -EIO);
675 			continue;
676 		}
677 
678 		if (rq_data_dir(host->req) == READ)
679 			mg_read(host->req);
680 		else
681 			mg_write(host->req);
682 	}
683 }
684 
mg_issue_req(struct request * req,struct mg_host * host,unsigned int sect_num,unsigned int sect_cnt)685 static unsigned int mg_issue_req(struct request *req,
686 		struct mg_host *host,
687 		unsigned int sect_num,
688 		unsigned int sect_cnt)
689 {
690 	switch (rq_data_dir(req)) {
691 	case READ:
692 		if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
693 				!= MG_ERR_NONE) {
694 			mg_bad_rw_intr(host);
695 			return host->error;
696 		}
697 		break;
698 	case WRITE:
699 		/* TODO : handler */
700 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
701 		if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
702 				!= MG_ERR_NONE) {
703 			mg_bad_rw_intr(host);
704 			return host->error;
705 		}
706 		del_timer(&host->timer);
707 		mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
708 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
709 		if (host->error) {
710 			mg_bad_rw_intr(host);
711 			return host->error;
712 		}
713 		mg_write_one(host, req);
714 		mod_timer(&host->timer, jiffies + 3 * HZ);
715 		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
716 				MG_REG_COMMAND);
717 		break;
718 	}
719 	return MG_ERR_NONE;
720 }
721 
722 /* This function also called from IRQ context */
mg_request(struct request_queue * q)723 static void mg_request(struct request_queue *q)
724 {
725 	struct mg_host *host = q->queuedata;
726 	struct request *req;
727 	u32 sect_num, sect_cnt;
728 
729 	while (1) {
730 		if (!host->req) {
731 			host->req = blk_fetch_request(q);
732 			if (!host->req)
733 				break;
734 		}
735 		req = host->req;
736 
737 		/* check unwanted request call */
738 		if (host->mg_do_intr)
739 			return;
740 
741 		del_timer(&host->timer);
742 
743 		sect_num = blk_rq_pos(req);
744 		/* deal whole segments */
745 		sect_cnt = blk_rq_sectors(req);
746 
747 		/* sanity check */
748 		if (sect_num >= get_capacity(req->rq_disk) ||
749 				((sect_num + sect_cnt) >
750 				 get_capacity(req->rq_disk))) {
751 			printk(KERN_WARNING
752 					"%s: bad access: sector=%d, count=%d\n",
753 					req->rq_disk->disk_name,
754 					sect_num, sect_cnt);
755 			mg_end_request_cur(host, -EIO);
756 			continue;
757 		}
758 
759 		if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
760 			mg_end_request_cur(host, -EIO);
761 			continue;
762 		}
763 
764 		if (!mg_issue_req(req, host, sect_num, sect_cnt))
765 			return;
766 	}
767 }
768 
mg_getgeo(struct block_device * bdev,struct hd_geometry * geo)769 static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
770 {
771 	struct mg_host *host = bdev->bd_disk->private_data;
772 
773 	geo->cylinders = (unsigned short)host->cyls;
774 	geo->heads = (unsigned char)host->heads;
775 	geo->sectors = (unsigned char)host->sectors;
776 	return 0;
777 }
778 
779 static const struct block_device_operations mg_disk_ops = {
780 	.getgeo = mg_getgeo
781 };
782 
mg_suspend(struct platform_device * plat_dev,pm_message_t state)783 static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
784 {
785 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
786 	struct mg_host *host = prv_data->host;
787 
788 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
789 		return -EIO;
790 
791 	if (!prv_data->use_polling)
792 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
793 
794 	outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
795 	/* wait until mflash deep sleep */
796 	msleep(1);
797 
798 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
799 		if (!prv_data->use_polling)
800 			outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
801 		return -EIO;
802 	}
803 
804 	return 0;
805 }
806 
mg_resume(struct platform_device * plat_dev)807 static int mg_resume(struct platform_device *plat_dev)
808 {
809 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
810 	struct mg_host *host = prv_data->host;
811 
812 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
813 		return -EIO;
814 
815 	outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
816 	/* wait until mflash wakeup */
817 	msleep(1);
818 
819 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
820 		return -EIO;
821 
822 	if (!prv_data->use_polling)
823 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
824 
825 	return 0;
826 }
827 
mg_probe(struct platform_device * plat_dev)828 static int mg_probe(struct platform_device *plat_dev)
829 {
830 	struct mg_host *host;
831 	struct resource *rsc;
832 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
833 	int err = 0;
834 
835 	if (!prv_data) {
836 		printk(KERN_ERR	"%s:%d fail (no driver_data)\n",
837 				__func__, __LINE__);
838 		err = -EINVAL;
839 		goto probe_err;
840 	}
841 
842 	/* alloc mg_host */
843 	host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
844 	if (!host) {
845 		printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
846 				__func__, __LINE__);
847 		err = -ENOMEM;
848 		goto probe_err;
849 	}
850 	host->major = MG_DISK_MAJ;
851 
852 	/* link each other */
853 	prv_data->host = host;
854 	host->dev = &plat_dev->dev;
855 
856 	/* io remap */
857 	rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
858 	if (!rsc) {
859 		printk(KERN_ERR "%s:%d platform_get_resource fail\n",
860 				__func__, __LINE__);
861 		err = -EINVAL;
862 		goto probe_err_2;
863 	}
864 	host->dev_base = ioremap(rsc->start, resource_size(rsc));
865 	if (!host->dev_base) {
866 		printk(KERN_ERR "%s:%d ioremap fail\n",
867 				__func__, __LINE__);
868 		err = -EIO;
869 		goto probe_err_2;
870 	}
871 	MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
872 
873 	/* get reset pin */
874 	rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
875 			MG_RST_PIN);
876 	if (!rsc) {
877 		printk(KERN_ERR "%s:%d get reset pin fail\n",
878 				__func__, __LINE__);
879 		err = -EIO;
880 		goto probe_err_3;
881 	}
882 	host->rst = rsc->start;
883 
884 	/* init rst pin */
885 	err = gpio_request(host->rst, MG_RST_PIN);
886 	if (err)
887 		goto probe_err_3;
888 	gpio_direction_output(host->rst, 1);
889 
890 	/* reset out pin */
891 	if (!(prv_data->dev_attr & MG_DEV_MASK))
892 		goto probe_err_3a;
893 
894 	if (prv_data->dev_attr != MG_BOOT_DEV) {
895 		rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
896 				MG_RSTOUT_PIN);
897 		if (!rsc) {
898 			printk(KERN_ERR "%s:%d get reset-out pin fail\n",
899 					__func__, __LINE__);
900 			err = -EIO;
901 			goto probe_err_3a;
902 		}
903 		host->rstout = rsc->start;
904 		err = gpio_request(host->rstout, MG_RSTOUT_PIN);
905 		if (err)
906 			goto probe_err_3a;
907 		gpio_direction_input(host->rstout);
908 	}
909 
910 	/* disk reset */
911 	if (prv_data->dev_attr == MG_STORAGE_DEV) {
912 		/* If POR seq. not yet finised, wait */
913 		err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
914 		if (err)
915 			goto probe_err_3b;
916 		err = mg_disk_init(host);
917 		if (err) {
918 			printk(KERN_ERR "%s:%d fail (err code : %d)\n",
919 					__func__, __LINE__, err);
920 			err = -EIO;
921 			goto probe_err_3b;
922 		}
923 	}
924 
925 	/* get irq resource */
926 	if (!prv_data->use_polling) {
927 		host->irq = platform_get_irq(plat_dev, 0);
928 		if (host->irq == -ENXIO) {
929 			err = host->irq;
930 			goto probe_err_3b;
931 		}
932 		err = request_irq(host->irq, mg_irq,
933 				IRQF_DISABLED | IRQF_TRIGGER_RISING,
934 				MG_DEV_NAME, host);
935 		if (err) {
936 			printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
937 					__func__, __LINE__, err);
938 			goto probe_err_3b;
939 		}
940 
941 	}
942 
943 	/* get disk id */
944 	err = mg_get_disk_id(host);
945 	if (err) {
946 		printk(KERN_ERR "%s:%d fail (err code : %d)\n",
947 				__func__, __LINE__, err);
948 		err = -EIO;
949 		goto probe_err_4;
950 	}
951 
952 	err = register_blkdev(host->major, MG_DISK_NAME);
953 	if (err < 0) {
954 		printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
955 				__func__, __LINE__, err);
956 		goto probe_err_4;
957 	}
958 	if (!host->major)
959 		host->major = err;
960 
961 	spin_lock_init(&host->lock);
962 
963 	if (prv_data->use_polling)
964 		host->breq = blk_init_queue(mg_request_poll, &host->lock);
965 	else
966 		host->breq = blk_init_queue(mg_request, &host->lock);
967 
968 	if (!host->breq) {
969 		err = -ENOMEM;
970 		printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
971 				__func__, __LINE__);
972 		goto probe_err_5;
973 	}
974 	host->breq->queuedata = host;
975 
976 	/* mflash is random device, thanx for the noop */
977 	err = elevator_change(host->breq, "noop");
978 	if (err) {
979 		printk(KERN_ERR "%s:%d (elevator_init) fail\n",
980 				__func__, __LINE__);
981 		goto probe_err_6;
982 	}
983 	blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
984 	blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
985 
986 	init_timer(&host->timer);
987 	host->timer.function = mg_times_out;
988 	host->timer.data = (unsigned long)host;
989 
990 	host->gd = alloc_disk(MG_DISK_MAX_PART);
991 	if (!host->gd) {
992 		printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
993 				__func__, __LINE__);
994 		err = -ENOMEM;
995 		goto probe_err_7;
996 	}
997 	host->gd->major = host->major;
998 	host->gd->first_minor = 0;
999 	host->gd->fops = &mg_disk_ops;
1000 	host->gd->queue = host->breq;
1001 	host->gd->private_data = host;
1002 	sprintf(host->gd->disk_name, MG_DISK_NAME"a");
1003 
1004 	set_capacity(host->gd, host->n_sectors);
1005 
1006 	add_disk(host->gd);
1007 
1008 	return err;
1009 
1010 probe_err_7:
1011 	del_timer_sync(&host->timer);
1012 probe_err_6:
1013 	blk_cleanup_queue(host->breq);
1014 probe_err_5:
1015 	unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
1016 probe_err_4:
1017 	if (!prv_data->use_polling)
1018 		free_irq(host->irq, host);
1019 probe_err_3b:
1020 	gpio_free(host->rstout);
1021 probe_err_3a:
1022 	gpio_free(host->rst);
1023 probe_err_3:
1024 	iounmap(host->dev_base);
1025 probe_err_2:
1026 	kfree(host);
1027 probe_err:
1028 	return err;
1029 }
1030 
mg_remove(struct platform_device * plat_dev)1031 static int mg_remove(struct platform_device *plat_dev)
1032 {
1033 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
1034 	struct mg_host *host = prv_data->host;
1035 	int err = 0;
1036 
1037 	/* delete timer */
1038 	del_timer_sync(&host->timer);
1039 
1040 	/* remove disk */
1041 	if (host->gd) {
1042 		del_gendisk(host->gd);
1043 		put_disk(host->gd);
1044 	}
1045 	/* remove queue */
1046 	if (host->breq)
1047 		blk_cleanup_queue(host->breq);
1048 
1049 	/* unregister blk device */
1050 	unregister_blkdev(host->major, MG_DISK_NAME);
1051 
1052 	/* free irq */
1053 	if (!prv_data->use_polling)
1054 		free_irq(host->irq, host);
1055 
1056 	/* free reset-out pin */
1057 	if (prv_data->dev_attr != MG_BOOT_DEV)
1058 		gpio_free(host->rstout);
1059 
1060 	/* free rst pin */
1061 	if (host->rst)
1062 		gpio_free(host->rst);
1063 
1064 	/* unmap io */
1065 	if (host->dev_base)
1066 		iounmap(host->dev_base);
1067 
1068 	/* free mg_host */
1069 	kfree(host);
1070 
1071 	return err;
1072 }
1073 
1074 static struct platform_driver mg_disk_driver = {
1075 	.probe = mg_probe,
1076 	.remove = mg_remove,
1077 	.suspend = mg_suspend,
1078 	.resume = mg_resume,
1079 	.driver = {
1080 		.name = MG_DEV_NAME,
1081 		.owner = THIS_MODULE,
1082 	}
1083 };
1084 
1085 /****************************************************************************
1086  *
1087  * Module stuff
1088  *
1089  ****************************************************************************/
1090 
mg_init(void)1091 static int __init mg_init(void)
1092 {
1093 	printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
1094 	return platform_driver_register(&mg_disk_driver);
1095 }
1096 
mg_exit(void)1097 static void __exit mg_exit(void)
1098 {
1099 	printk(KERN_INFO "mflash driver : bye bye\n");
1100 	platform_driver_unregister(&mg_disk_driver);
1101 }
1102 
1103 module_init(mg_init);
1104 module_exit(mg_exit);
1105 
1106 MODULE_LICENSE("GPL");
1107 MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1108 MODULE_DESCRIPTION("mGine m[g]flash device driver");
1109