1 /*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20 #include "ffsport.h"
21 #include "flash.h"
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/blkdev.h>
25 #include <linux/wait.h>
26 #include <linux/mutex.h>
27 #include <linux/kthread.h>
28 #include <linux/log2.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/async.h>
32
33 /**** Helper functions used for Div, Remainder operation on u64 ****/
34
35 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
36 * Function: GLOB_Calc_Used_Bits
37 * Inputs: Power of 2 number
38 * Outputs: Number of Used Bits
39 * 0, if the argument is 0
40 * Description: Calculate the number of bits used by a given power of 2 number
41 * Number can be up to 32 bit
42 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
GLOB_Calc_Used_Bits(u32 n)43 int GLOB_Calc_Used_Bits(u32 n)
44 {
45 int tot_bits = 0;
46
47 if (n >= 1 << 16) {
48 n >>= 16;
49 tot_bits += 16;
50 }
51
52 if (n >= 1 << 8) {
53 n >>= 8;
54 tot_bits += 8;
55 }
56
57 if (n >= 1 << 4) {
58 n >>= 4;
59 tot_bits += 4;
60 }
61
62 if (n >= 1 << 2) {
63 n >>= 2;
64 tot_bits += 2;
65 }
66
67 if (n >= 1 << 1)
68 tot_bits += 1;
69
70 return ((n == 0) ? (0) : tot_bits);
71 }
72
73 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
74 * Function: GLOB_u64_Div
75 * Inputs: Number of u64
76 * A power of 2 number as Division
77 * Outputs: Quotient of the Divisor operation
78 * Description: It divides the address by divisor by using bit shift operation
79 * (essentially without explicitely using "/").
80 * Divisor is a power of 2 number and Divided is of u64
81 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
GLOB_u64_Div(u64 addr,u32 divisor)82 u64 GLOB_u64_Div(u64 addr, u32 divisor)
83 {
84 return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
85 }
86
87 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
88 * Function: GLOB_u64_Remainder
89 * Inputs: Number of u64
90 * Divisor Type (1 -PageAddress, 2- BlockAddress)
91 * Outputs: Remainder of the Division operation
92 * Description: It calculates the remainder of a number (of u64) by
93 * divisor(power of 2 number ) by using bit shifting and multiply
94 * operation(essentially without explicitely using "/").
95 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
GLOB_u64_Remainder(u64 addr,u32 divisor_type)96 u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
97 {
98 u64 result = 0;
99
100 if (divisor_type == 1) { /* Remainder -- Page */
101 result = (addr >> DeviceInfo.nBitsInPageDataSize);
102 result = result * DeviceInfo.wPageDataSize;
103 } else if (divisor_type == 2) { /* Remainder -- Block */
104 result = (addr >> DeviceInfo.nBitsInBlockDataSize);
105 result = result * DeviceInfo.wBlockDataSize;
106 }
107
108 result = addr - result;
109
110 return result;
111 }
112
113 #define NUM_DEVICES 1
114 #define PARTITIONS 8
115
116 #define GLOB_SBD_NAME "nd"
117 #define GLOB_SBD_IRQ_NUM (29)
118
119 #define GLOB_SBD_IOCTL_GC (0x7701)
120 #define GLOB_SBD_IOCTL_WL (0x7702)
121 #define GLOB_SBD_IOCTL_FORMAT (0x7703)
122 #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
123 #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
124 #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
125 #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
126 #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
127 #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
128 #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
129
130 static int reserved_mb = 0;
131 module_param(reserved_mb, int, 0);
132 MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
133
134 int nand_debug_level;
135 module_param(nand_debug_level, int, 0644);
136 MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
137
138 MODULE_LICENSE("GPL");
139
140 struct spectra_nand_dev {
141 struct pci_dev *dev;
142 u64 size;
143 u16 users;
144 spinlock_t qlock;
145 void __iomem *ioaddr; /* Mapped address */
146 struct request_queue *queue;
147 struct task_struct *thread;
148 struct gendisk *gd;
149 u8 *tmp_buf;
150 };
151
152
153 static int GLOB_SBD_majornum;
154
155 static char *GLOB_version = GLOB_VERSION;
156
157 static struct spectra_nand_dev nand_device[NUM_DEVICES];
158
159 static struct mutex spectra_lock;
160
161 static int res_blks_os = 1;
162
163 struct spectra_indentfy_dev_tag IdentifyDeviceData;
164
force_flush_cache(void)165 static int force_flush_cache(void)
166 {
167 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
168 __FILE__, __LINE__, __func__);
169
170 if (ERR == GLOB_FTL_Flush_Cache()) {
171 printk(KERN_ERR "Fail to Flush FTL Cache!\n");
172 return -EFAULT;
173 }
174 #if CMD_DMA
175 if (glob_ftl_execute_cmds())
176 return -EIO;
177 else
178 return 0;
179 #endif
180 return 0;
181 }
182
183 struct ioctl_rw_page_info {
184 u8 *data;
185 unsigned int page;
186 };
187
ioctl_read_page_data(unsigned long arg)188 static int ioctl_read_page_data(unsigned long arg)
189 {
190 u8 *buf;
191 struct ioctl_rw_page_info info;
192 int result = PASS;
193
194 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
195 return -EFAULT;
196
197 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
198 if (!buf) {
199 printk(KERN_ERR "ioctl_read_page_data: "
200 "failed to allocate memory\n");
201 return -ENOMEM;
202 }
203
204 mutex_lock(&spectra_lock);
205 result = GLOB_FTL_Page_Read(buf,
206 (u64)info.page * IdentifyDeviceData.PageDataSize);
207 mutex_unlock(&spectra_lock);
208
209 if (copy_to_user((void __user *)info.data, buf,
210 IdentifyDeviceData.PageDataSize)) {
211 printk(KERN_ERR "ioctl_read_page_data: "
212 "failed to copy user data\n");
213 kfree(buf);
214 return -EFAULT;
215 }
216
217 kfree(buf);
218 return result;
219 }
220
ioctl_write_page_data(unsigned long arg)221 static int ioctl_write_page_data(unsigned long arg)
222 {
223 u8 *buf;
224 struct ioctl_rw_page_info info;
225 int result = PASS;
226
227 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
228 return -EFAULT;
229
230 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
231 if (!buf) {
232 printk(KERN_ERR "ioctl_write_page_data: "
233 "failed to allocate memory\n");
234 return -ENOMEM;
235 }
236
237 if (copy_from_user(buf, (void __user *)info.data,
238 IdentifyDeviceData.PageDataSize)) {
239 printk(KERN_ERR "ioctl_write_page_data: "
240 "failed to copy user data\n");
241 kfree(buf);
242 return -EFAULT;
243 }
244
245 mutex_lock(&spectra_lock);
246 result = GLOB_FTL_Page_Write(buf,
247 (u64)info.page * IdentifyDeviceData.PageDataSize);
248 mutex_unlock(&spectra_lock);
249
250 kfree(buf);
251 return result;
252 }
253
254 /* Return how many blocks should be reserved for bad block replacement */
get_res_blk_num_bad_blk(void)255 static int get_res_blk_num_bad_blk(void)
256 {
257 return IdentifyDeviceData.wDataBlockNum / 10;
258 }
259
260 /* Return how many blocks should be reserved for OS image */
get_res_blk_num_os(void)261 static int get_res_blk_num_os(void)
262 {
263 u32 res_blks, blk_size;
264
265 blk_size = IdentifyDeviceData.PageDataSize *
266 IdentifyDeviceData.PagesPerBlock;
267
268 res_blks = (reserved_mb * 1024 * 1024) / blk_size;
269
270 if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
271 res_blks = 1; /* Reserved 1 block for block table */
272
273 return res_blks;
274 }
275
276 /* Transfer a full request. */
do_transfer(struct spectra_nand_dev * tr,struct request * req)277 static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
278 {
279 u64 start_addr, addr;
280 u32 logical_start_sect, hd_start_sect;
281 u32 nsect, hd_sects;
282 u32 rsect, tsect = 0;
283 char *buf;
284 u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
285
286 start_addr = (u64)(blk_rq_pos(req)) << 9;
287 /* Add a big enough offset to prevent the OS Image from
288 * being accessed or damaged by file system */
289 start_addr += IdentifyDeviceData.PageDataSize *
290 IdentifyDeviceData.PagesPerBlock *
291 res_blks_os;
292
293 if (req->cmd_type & REQ_FLUSH) {
294 if (force_flush_cache()) /* Fail to flush cache */
295 return -EIO;
296 else
297 return 0;
298 }
299
300 if (req->cmd_type != REQ_TYPE_FS)
301 return -EIO;
302
303 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
304 printk(KERN_ERR "Spectra error: request over the NAND "
305 "capacity!sector %d, current_nr_sectors %d, "
306 "while capacity is %d\n",
307 (int)blk_rq_pos(req),
308 blk_rq_cur_sectors(req),
309 (int)get_capacity(tr->gd));
310 return -EIO;
311 }
312
313 logical_start_sect = start_addr >> 9;
314 hd_start_sect = logical_start_sect / ratio;
315 rsect = logical_start_sect - hd_start_sect * ratio;
316
317 addr = (u64)hd_start_sect * ratio * 512;
318 buf = req->buffer;
319 nsect = blk_rq_cur_sectors(req);
320
321 if (rsect)
322 tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
323
324 switch (rq_data_dir(req)) {
325 case READ:
326 /* Read the first NAND page */
327 if (rsect) {
328 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
329 printk(KERN_ERR "Error in %s, Line %d\n",
330 __FILE__, __LINE__);
331 return -EIO;
332 }
333 memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
334 addr += IdentifyDeviceData.PageDataSize;
335 buf += tsect << 9;
336 nsect -= tsect;
337 }
338
339 /* Read the other NAND pages */
340 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
341 if (GLOB_FTL_Page_Read(buf, addr)) {
342 printk(KERN_ERR "Error in %s, Line %d\n",
343 __FILE__, __LINE__);
344 return -EIO;
345 }
346 addr += IdentifyDeviceData.PageDataSize;
347 buf += IdentifyDeviceData.PageDataSize;
348 }
349
350 /* Read the last NAND pages */
351 if (nsect % ratio) {
352 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
353 printk(KERN_ERR "Error in %s, Line %d\n",
354 __FILE__, __LINE__);
355 return -EIO;
356 }
357 memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
358 }
359 #if CMD_DMA
360 if (glob_ftl_execute_cmds())
361 return -EIO;
362 else
363 return 0;
364 #endif
365 return 0;
366
367 case WRITE:
368 /* Write the first NAND page */
369 if (rsect) {
370 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
371 printk(KERN_ERR "Error in %s, Line %d\n",
372 __FILE__, __LINE__);
373 return -EIO;
374 }
375 memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
376 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
377 printk(KERN_ERR "Error in %s, Line %d\n",
378 __FILE__, __LINE__);
379 return -EIO;
380 }
381 addr += IdentifyDeviceData.PageDataSize;
382 buf += tsect << 9;
383 nsect -= tsect;
384 }
385
386 /* Write the other NAND pages */
387 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
388 if (GLOB_FTL_Page_Write(buf, addr)) {
389 printk(KERN_ERR "Error in %s, Line %d\n",
390 __FILE__, __LINE__);
391 return -EIO;
392 }
393 addr += IdentifyDeviceData.PageDataSize;
394 buf += IdentifyDeviceData.PageDataSize;
395 }
396
397 /* Write the last NAND pages */
398 if (nsect % ratio) {
399 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
400 printk(KERN_ERR "Error in %s, Line %d\n",
401 __FILE__, __LINE__);
402 return -EIO;
403 }
404 memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
405 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
406 printk(KERN_ERR "Error in %s, Line %d\n",
407 __FILE__, __LINE__);
408 return -EIO;
409 }
410 }
411 #if CMD_DMA
412 if (glob_ftl_execute_cmds())
413 return -EIO;
414 else
415 return 0;
416 #endif
417 return 0;
418
419 default:
420 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
421 return -EIO;
422 }
423 }
424
425 /* This function is copied from drivers/mtd/mtd_blkdevs.c */
spectra_trans_thread(void * arg)426 static int spectra_trans_thread(void *arg)
427 {
428 struct spectra_nand_dev *tr = arg;
429 struct request_queue *rq = tr->queue;
430 struct request *req = NULL;
431
432 /* we might get involved when memory gets low, so use PF_MEMALLOC */
433 current->flags |= PF_MEMALLOC;
434
435 spin_lock_irq(rq->queue_lock);
436 while (!kthread_should_stop()) {
437 int res;
438
439 if (!req) {
440 req = blk_fetch_request(rq);
441 if (!req) {
442 set_current_state(TASK_INTERRUPTIBLE);
443 spin_unlock_irq(rq->queue_lock);
444 schedule();
445 spin_lock_irq(rq->queue_lock);
446 continue;
447 }
448 }
449
450 spin_unlock_irq(rq->queue_lock);
451
452 mutex_lock(&spectra_lock);
453 res = do_transfer(tr, req);
454 mutex_unlock(&spectra_lock);
455
456 spin_lock_irq(rq->queue_lock);
457
458 if (!__blk_end_request_cur(req, res))
459 req = NULL;
460 }
461
462 if (req)
463 __blk_end_request_all(req, -EIO);
464
465 spin_unlock_irq(rq->queue_lock);
466
467 return 0;
468 }
469
470
471 /* Request function that "handles clustering". */
GLOB_SBD_request(struct request_queue * rq)472 static void GLOB_SBD_request(struct request_queue *rq)
473 {
474 struct spectra_nand_dev *pdev = rq->queuedata;
475 wake_up_process(pdev->thread);
476 }
477
GLOB_SBD_open(struct block_device * bdev,fmode_t mode)478 static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
479
480 {
481 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
482 __FILE__, __LINE__, __func__);
483 return 0;
484 }
485
GLOB_SBD_release(struct gendisk * disk,fmode_t mode)486 static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
487 {
488 int ret;
489
490 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
491 __FILE__, __LINE__, __func__);
492
493 mutex_lock(&spectra_lock);
494 ret = force_flush_cache();
495 mutex_unlock(&spectra_lock);
496
497 return 0;
498 }
499
GLOB_SBD_getgeo(struct block_device * bdev,struct hd_geometry * geo)500 static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
501 {
502 geo->heads = 4;
503 geo->sectors = 16;
504 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
505
506 nand_dbg_print(NAND_DBG_DEBUG,
507 "heads: %d, sectors: %d, cylinders: %d\n",
508 geo->heads, geo->sectors, geo->cylinders);
509
510 return 0;
511 }
512
GLOB_SBD_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)513 int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
514 unsigned int cmd, unsigned long arg)
515 {
516 int ret;
517
518 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
519 __FILE__, __LINE__, __func__);
520
521 switch (cmd) {
522 case GLOB_SBD_IOCTL_GC:
523 nand_dbg_print(NAND_DBG_DEBUG,
524 "Spectra IOCTL: Garbage Collection "
525 "being performed\n");
526 if (PASS != GLOB_FTL_Garbage_Collection())
527 return -EFAULT;
528 return 0;
529
530 case GLOB_SBD_IOCTL_WL:
531 nand_dbg_print(NAND_DBG_DEBUG,
532 "Spectra IOCTL: Static Wear Leveling "
533 "being performed\n");
534 if (PASS != GLOB_FTL_Wear_Leveling())
535 return -EFAULT;
536 return 0;
537
538 case GLOB_SBD_IOCTL_FORMAT:
539 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
540 "being performed\n");
541 if (PASS != GLOB_FTL_Flash_Format())
542 return -EFAULT;
543 return 0;
544
545 case GLOB_SBD_IOCTL_FLUSH_CACHE:
546 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
547 "being performed\n");
548 mutex_lock(&spectra_lock);
549 ret = force_flush_cache();
550 mutex_unlock(&spectra_lock);
551 return ret;
552
553 case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
554 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
555 "Copy block table\n");
556 if (copy_to_user((void __user *)arg,
557 get_blk_table_start_addr(),
558 get_blk_table_len()))
559 return -EFAULT;
560 return 0;
561
562 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
563 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
564 "Copy wear leveling table\n");
565 if (copy_to_user((void __user *)arg,
566 get_wear_leveling_table_start_addr(),
567 get_wear_leveling_table_len()))
568 return -EFAULT;
569 return 0;
570
571 case GLOB_SBD_IOCTL_GET_NAND_INFO:
572 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
573 "Get NAND info\n");
574 if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
575 sizeof(IdentifyDeviceData)))
576 return -EFAULT;
577 return 0;
578
579 case GLOB_SBD_IOCTL_WRITE_DATA:
580 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
581 "Write one page data\n");
582 return ioctl_write_page_data(arg);
583
584 case GLOB_SBD_IOCTL_READ_DATA:
585 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
586 "Read one page data\n");
587 return ioctl_read_page_data(arg);
588 }
589
590 return -ENOTTY;
591 }
592
593 static DEFINE_MUTEX(ffsport_mutex);
594
GLOB_SBD_unlocked_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)595 int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
596 unsigned int cmd, unsigned long arg)
597 {
598 int ret;
599
600 mutex_lock(&ffsport_mutex);
601 ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
602 mutex_unlock(&ffsport_mutex);
603
604 return ret;
605 }
606
607 static struct block_device_operations GLOB_SBD_ops = {
608 .owner = THIS_MODULE,
609 .open = GLOB_SBD_open,
610 .release = GLOB_SBD_release,
611 .ioctl = GLOB_SBD_unlocked_ioctl,
612 .getgeo = GLOB_SBD_getgeo,
613 };
614
SBD_setup_device(struct spectra_nand_dev * dev,int which)615 static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
616 {
617 int res_blks;
618 u32 sects;
619
620 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
621 __FILE__, __LINE__, __func__);
622
623 memset(dev, 0, sizeof(struct spectra_nand_dev));
624
625 nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
626 "for OS image, %d blocks for bad block replacement.\n",
627 get_res_blk_num_os(),
628 get_res_blk_num_bad_blk());
629
630 res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
631
632 dev->size = (u64)IdentifyDeviceData.PageDataSize *
633 IdentifyDeviceData.PagesPerBlock *
634 (IdentifyDeviceData.wDataBlockNum - res_blks);
635
636 res_blks_os = get_res_blk_num_os();
637
638 spin_lock_init(&dev->qlock);
639
640 dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
641 if (!dev->tmp_buf) {
642 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
643 __FILE__, __LINE__);
644 goto out_vfree;
645 }
646
647 dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
648 if (dev->queue == NULL) {
649 printk(KERN_ERR
650 "Spectra: Request queue could not be initialized."
651 " Aborting\n ");
652 goto out_vfree;
653 }
654 dev->queue->queuedata = dev;
655
656 /* As Linux block layer doesn't support >4KB hardware sector, */
657 /* Here we force report 512 byte hardware sector size to Kernel */
658 blk_queue_logical_block_size(dev->queue, 512);
659
660 blk_queue_flush(dev->queue, REQ_FLUSH);
661
662 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
663 if (IS_ERR(dev->thread)) {
664 blk_cleanup_queue(dev->queue);
665 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
666 return PTR_ERR(dev->thread);
667 }
668
669 dev->gd = alloc_disk(PARTITIONS);
670 if (!dev->gd) {
671 printk(KERN_ERR
672 "Spectra: Could not allocate disk. Aborting \n ");
673 goto out_vfree;
674 }
675 dev->gd->major = GLOB_SBD_majornum;
676 dev->gd->first_minor = which * PARTITIONS;
677 dev->gd->fops = &GLOB_SBD_ops;
678 dev->gd->queue = dev->queue;
679 dev->gd->private_data = dev;
680 snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
681
682 sects = dev->size >> 9;
683 nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
684 set_capacity(dev->gd, sects);
685
686 add_disk(dev->gd);
687
688 return 0;
689 out_vfree:
690 return -ENOMEM;
691 }
692
693 /*
694 static ssize_t show_nand_block_num(struct device *dev,
695 struct device_attribute *attr, char *buf)
696 {
697 return snprintf(buf, PAGE_SIZE, "%d\n",
698 (int)IdentifyDeviceData.wDataBlockNum);
699 }
700
701 static ssize_t show_nand_pages_per_block(struct device *dev,
702 struct device_attribute *attr, char *buf)
703 {
704 return snprintf(buf, PAGE_SIZE, "%d\n",
705 (int)IdentifyDeviceData.PagesPerBlock);
706 }
707
708 static ssize_t show_nand_page_size(struct device *dev,
709 struct device_attribute *attr, char *buf)
710 {
711 return snprintf(buf, PAGE_SIZE, "%d\n",
712 (int)IdentifyDeviceData.PageDataSize);
713 }
714
715 static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
716 static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
717 static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
718
719 static void create_sysfs_entry(struct device *dev)
720 {
721 if (device_create_file(dev, &dev_attr_nand_block_num))
722 printk(KERN_ERR "Spectra: "
723 "failed to create sysfs entry nand_block_num.\n");
724 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
725 printk(KERN_ERR "Spectra: "
726 "failed to create sysfs entry nand_pages_per_block.\n");
727 if (device_create_file(dev, &dev_attr_nand_page_size))
728 printk(KERN_ERR "Spectra: "
729 "failed to create sysfs entry nand_page_size.\n");
730 }
731 */
732
register_spectra_ftl_async(void * unused,async_cookie_t cookie)733 static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
734 {
735 int i;
736
737 /* create_sysfs_entry(&dev->dev); */
738
739 if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
740 printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
741 "Aborting\n");
742 return;
743 } else {
744 nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
745 "Num blocks=%d, pagesperblock=%d, "
746 "pagedatasize=%d, ECCBytesPerSector=%d\n",
747 (int)IdentifyDeviceData.NumBlocks,
748 (int)IdentifyDeviceData.PagesPerBlock,
749 (int)IdentifyDeviceData.PageDataSize,
750 (int)IdentifyDeviceData.wECCBytesPerSector);
751 }
752
753 printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
754 if (GLOB_FTL_Init() != PASS) {
755 printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
756 "Aborting\n");
757 goto out_ftl_flash_register;
758 }
759 printk(KERN_ALERT "Spectra: block table has been found.\n");
760
761 GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
762 if (GLOB_SBD_majornum <= 0) {
763 printk(KERN_ERR "Unable to get the major %d for Spectra",
764 GLOB_SBD_majornum);
765 goto out_ftl_flash_register;
766 }
767
768 for (i = 0; i < NUM_DEVICES; i++)
769 if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
770 goto out_blk_register;
771
772 nand_dbg_print(NAND_DBG_DEBUG,
773 "Spectra: module loaded with major number %d\n",
774 GLOB_SBD_majornum);
775
776 return;
777
778 out_blk_register:
779 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
780 out_ftl_flash_register:
781 GLOB_FTL_Cache_Release();
782 printk(KERN_ERR "Spectra: Module load failed.\n");
783 }
784
register_spectra_ftl()785 int register_spectra_ftl()
786 {
787 async_schedule(register_spectra_ftl_async, NULL);
788 return 0;
789 }
790 EXPORT_SYMBOL_GPL(register_spectra_ftl);
791
GLOB_SBD_init(void)792 static int GLOB_SBD_init(void)
793 {
794 /* Set debug output level (0~3) here. 3 is most verbose */
795 printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
796
797 mutex_init(&spectra_lock);
798
799 if (PASS != GLOB_FTL_Flash_Init()) {
800 printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
801 "Aborting\n");
802 return -ENODEV;
803 }
804 return 0;
805 }
806
GLOB_SBD_exit(void)807 static void __exit GLOB_SBD_exit(void)
808 {
809 int i;
810
811 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
812 __FILE__, __LINE__, __func__);
813
814 for (i = 0; i < NUM_DEVICES; i++) {
815 struct spectra_nand_dev *dev = &nand_device[i];
816 if (dev->gd) {
817 del_gendisk(dev->gd);
818 put_disk(dev->gd);
819 }
820 if (dev->queue)
821 blk_cleanup_queue(dev->queue);
822 kfree(dev->tmp_buf);
823 }
824
825 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
826
827 mutex_lock(&spectra_lock);
828 force_flush_cache();
829 mutex_unlock(&spectra_lock);
830
831 GLOB_FTL_Cache_Release();
832
833 GLOB_FTL_Flash_Release();
834
835 nand_dbg_print(NAND_DBG_DEBUG,
836 "Spectra FTL module (major number %d) unloaded.\n",
837 GLOB_SBD_majornum);
838 }
839
840 module_init(GLOB_SBD_init);
841 module_exit(GLOB_SBD_exit);
842