1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright © 2009 - Maxim Levitsky
4 * SmartMedia/xD translation layer
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/hdreg.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 #include <linux/sysfs.h>
14 #include <linux/bitops.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-ecc-sw-hamming.h>
17 #include "nand/raw/sm_common.h"
18 #include "sm_ftl.h"
19
20
21
22 static struct workqueue_struct *cache_flush_workqueue;
23
24 static int cache_timeout = 1000;
25 module_param(cache_timeout, int, S_IRUGO);
26 MODULE_PARM_DESC(cache_timeout,
27 "Timeout (in ms) for cache flush (1000 ms default");
28
29 static int debug;
30 module_param(debug, int, S_IRUGO | S_IWUSR);
31 MODULE_PARM_DESC(debug, "Debug level (0-2)");
32
33
34 /* ------------------- sysfs attributes ---------------------------------- */
35 struct sm_sysfs_attribute {
36 struct device_attribute dev_attr;
37 char *data;
38 int len;
39 };
40
sm_attr_show(struct device * dev,struct device_attribute * attr,char * buf)41 static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
42 char *buf)
43 {
44 struct sm_sysfs_attribute *sm_attr =
45 container_of(attr, struct sm_sysfs_attribute, dev_attr);
46
47 strncpy(buf, sm_attr->data, sm_attr->len);
48 return sm_attr->len;
49 }
50
51
52 #define NUM_ATTRIBUTES 1
53 #define SM_CIS_VENDOR_OFFSET 0x59
sm_create_sysfs_attributes(struct sm_ftl * ftl)54 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
55 {
56 struct attribute_group *attr_group;
57 struct attribute **attributes;
58 struct sm_sysfs_attribute *vendor_attribute;
59 char *vendor;
60
61 vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
62 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
63 if (!vendor)
64 goto error1;
65
66 /* Initialize sysfs attributes */
67 vendor_attribute =
68 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
69 if (!vendor_attribute)
70 goto error2;
71
72 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
73
74 vendor_attribute->data = vendor;
75 vendor_attribute->len = strlen(vendor);
76 vendor_attribute->dev_attr.attr.name = "vendor";
77 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
78 vendor_attribute->dev_attr.show = sm_attr_show;
79
80
81 /* Create array of pointers to the attributes */
82 attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
83 GFP_KERNEL);
84 if (!attributes)
85 goto error3;
86 attributes[0] = &vendor_attribute->dev_attr.attr;
87
88 /* Finally create the attribute group */
89 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
90 if (!attr_group)
91 goto error4;
92 attr_group->attrs = attributes;
93 return attr_group;
94 error4:
95 kfree(attributes);
96 error3:
97 kfree(vendor_attribute);
98 error2:
99 kfree(vendor);
100 error1:
101 return NULL;
102 }
103
sm_delete_sysfs_attributes(struct sm_ftl * ftl)104 static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
105 {
106 struct attribute **attributes = ftl->disk_attributes->attrs;
107 int i;
108
109 for (i = 0; attributes[i] ; i++) {
110
111 struct device_attribute *dev_attr = container_of(attributes[i],
112 struct device_attribute, attr);
113
114 struct sm_sysfs_attribute *sm_attr =
115 container_of(dev_attr,
116 struct sm_sysfs_attribute, dev_attr);
117
118 kfree(sm_attr->data);
119 kfree(sm_attr);
120 }
121
122 kfree(ftl->disk_attributes->attrs);
123 kfree(ftl->disk_attributes);
124 }
125
126
127 /* ----------------------- oob helpers -------------------------------------- */
128
sm_get_lba(uint8_t * lba)129 static int sm_get_lba(uint8_t *lba)
130 {
131 /* check fixed bits */
132 if ((lba[0] & 0xF8) != 0x10)
133 return -2;
134
135 /* check parity - endianness doesn't matter */
136 if (hweight16(*(uint16_t *)lba) & 1)
137 return -2;
138
139 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
140 }
141
142
143 /*
144 * Read LBA associated with block
145 * returns -1, if block is erased
146 * returns -2 if error happens
147 */
sm_read_lba(struct sm_oob * oob)148 static int sm_read_lba(struct sm_oob *oob)
149 {
150 static const uint32_t erased_pattern[4] = {
151 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
152
153 uint16_t lba_test;
154 int lba;
155
156 /* First test for erased block */
157 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
158 return -1;
159
160 /* Now check is both copies of the LBA differ too much */
161 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
162 if (lba_test && !is_power_of_2(lba_test))
163 return -2;
164
165 /* And read it */
166 lba = sm_get_lba(oob->lba_copy1);
167
168 if (lba == -2)
169 lba = sm_get_lba(oob->lba_copy2);
170
171 return lba;
172 }
173
sm_write_lba(struct sm_oob * oob,uint16_t lba)174 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
175 {
176 uint8_t tmp[2];
177
178 WARN_ON(lba >= 1000);
179
180 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
181 tmp[1] = (lba << 1) & 0xFF;
182
183 if (hweight16(*(uint16_t *)tmp) & 0x01)
184 tmp[1] |= 1;
185
186 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
187 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
188 }
189
190
191 /* Make offset from parts */
sm_mkoffset(struct sm_ftl * ftl,int zone,int block,int boffset)192 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
193 {
194 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
195 WARN_ON(zone < 0 || zone >= ftl->zone_count);
196 WARN_ON(block >= ftl->zone_size);
197 WARN_ON(boffset >= ftl->block_size);
198
199 if (block == -1)
200 return -1;
201
202 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
203 }
204
205 /* Breaks offset into parts */
sm_break_offset(struct sm_ftl * ftl,loff_t loffset,int * zone,int * block,int * boffset)206 static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
207 int *zone, int *block, int *boffset)
208 {
209 u64 offset = loffset;
210 *boffset = do_div(offset, ftl->block_size);
211 *block = do_div(offset, ftl->max_lba);
212 *zone = offset >= ftl->zone_count ? -1 : offset;
213 }
214
215 /* ---------------------- low level IO ------------------------------------- */
216
sm_correct_sector(uint8_t * buffer,struct sm_oob * oob)217 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
218 {
219 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
220 uint8_t ecc[3];
221
222 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
223 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
224 sm_order) < 0)
225 return -EIO;
226
227 buffer += SM_SMALL_PAGE;
228
229 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
230 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
231 sm_order) < 0)
232 return -EIO;
233 return 0;
234 }
235
236 /* Reads a sector + oob*/
sm_read_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)237 static int sm_read_sector(struct sm_ftl *ftl,
238 int zone, int block, int boffset,
239 uint8_t *buffer, struct sm_oob *oob)
240 {
241 struct mtd_info *mtd = ftl->trans->mtd;
242 struct mtd_oob_ops ops = { };
243 struct sm_oob tmp_oob;
244 int ret = -EIO;
245 int try = 0;
246
247 /* FTL can contain -1 entries that are by default filled with bits */
248 if (block == -1) {
249 if (buffer)
250 memset(buffer, 0xFF, SM_SECTOR_SIZE);
251 return 0;
252 }
253
254 /* User might not need the oob, but we do for data verification */
255 if (!oob)
256 oob = &tmp_oob;
257
258 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
259 ops.ooboffs = 0;
260 ops.ooblen = SM_OOB_SIZE;
261 ops.oobbuf = (void *)oob;
262 ops.len = SM_SECTOR_SIZE;
263 ops.datbuf = buffer;
264
265 again:
266 if (try++) {
267 /* Avoid infinite recursion on CIS reads, sm_recheck_media
268 * won't help anyway
269 */
270 if (zone == 0 && block == ftl->cis_block && boffset ==
271 ftl->cis_boffset)
272 return ret;
273
274 /* Test if media is stable */
275 if (try == 3 || sm_recheck_media(ftl))
276 return ret;
277 }
278
279 /* Unfortunately, oob read will _always_ succeed,
280 * despite card removal.....
281 */
282 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
283
284 /* Test for unknown errors */
285 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
286 dbg("read of block %d at zone %d, failed due to error (%d)",
287 block, zone, ret);
288 goto again;
289 }
290
291 /* Do a basic test on the oob, to guard against returned garbage */
292 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
293 goto again;
294
295 /* This should never happen, unless there is a bug in the mtd driver */
296 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
297 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
298
299 if (!buffer)
300 return 0;
301
302 /* Test if sector marked as bad */
303 if (!sm_sector_valid(oob)) {
304 dbg("read of block %d at zone %d, failed because it is marked"
305 " as bad" , block, zone);
306 goto again;
307 }
308
309 /* Test ECC*/
310 if (mtd_is_eccerr(ret) ||
311 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
312
313 dbg("read of block %d at zone %d, failed due to ECC error",
314 block, zone);
315 goto again;
316 }
317
318 return 0;
319 }
320
321 /* Writes a sector to media */
sm_write_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)322 static int sm_write_sector(struct sm_ftl *ftl,
323 int zone, int block, int boffset,
324 uint8_t *buffer, struct sm_oob *oob)
325 {
326 struct mtd_oob_ops ops = { };
327 struct mtd_info *mtd = ftl->trans->mtd;
328 int ret;
329
330 BUG_ON(ftl->readonly);
331
332 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
333 dbg("attempted to write the CIS!");
334 return -EIO;
335 }
336
337 if (ftl->unstable)
338 return -EIO;
339
340 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
341 ops.len = SM_SECTOR_SIZE;
342 ops.datbuf = buffer;
343 ops.ooboffs = 0;
344 ops.ooblen = SM_OOB_SIZE;
345 ops.oobbuf = (void *)oob;
346
347 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
348
349 /* Now we assume that hardware will catch write bitflip errors */
350
351 if (ret) {
352 dbg("write to block %d at zone %d, failed with error %d",
353 block, zone, ret);
354
355 sm_recheck_media(ftl);
356 return ret;
357 }
358
359 /* This should never happen, unless there is a bug in the driver */
360 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
361 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
362
363 return 0;
364 }
365
366 /* ------------------------ block IO ------------------------------------- */
367
368 /* Write a block using data and lba, and invalid sector bitmap */
sm_write_block(struct sm_ftl * ftl,uint8_t * buf,int zone,int block,int lba,unsigned long invalid_bitmap)369 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
370 int zone, int block, int lba,
371 unsigned long invalid_bitmap)
372 {
373 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
374 struct sm_oob oob;
375 int boffset;
376 int retry = 0;
377
378 /* Initialize the oob with requested values */
379 memset(&oob, 0xFF, SM_OOB_SIZE);
380 sm_write_lba(&oob, lba);
381 restart:
382 if (ftl->unstable)
383 return -EIO;
384
385 for (boffset = 0; boffset < ftl->block_size;
386 boffset += SM_SECTOR_SIZE) {
387
388 oob.data_status = 0xFF;
389
390 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
391
392 sm_printk("sector %d of block at LBA %d of zone %d"
393 " couldn't be read, marking it as invalid",
394 boffset / SM_SECTOR_SIZE, lba, zone);
395
396 oob.data_status = 0;
397 }
398
399 if (ftl->smallpagenand) {
400 ecc_sw_hamming_calculate(buf + boffset,
401 SM_SMALL_PAGE, oob.ecc1,
402 sm_order);
403
404 ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
405 SM_SMALL_PAGE, oob.ecc2,
406 sm_order);
407 }
408 if (!sm_write_sector(ftl, zone, block, boffset,
409 buf + boffset, &oob))
410 continue;
411
412 if (!retry) {
413
414 /* If write fails. try to erase the block */
415 /* This is safe, because we never write in blocks
416 * that contain valuable data.
417 * This is intended to repair block that are marked
418 * as erased, but that isn't fully erased
419 */
420
421 if (sm_erase_block(ftl, zone, block, 0))
422 return -EIO;
423
424 retry = 1;
425 goto restart;
426 } else {
427 sm_mark_block_bad(ftl, zone, block);
428 return -EIO;
429 }
430 }
431 return 0;
432 }
433
434
435 /* Mark whole block at offset 'offs' as bad. */
sm_mark_block_bad(struct sm_ftl * ftl,int zone,int block)436 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
437 {
438 struct sm_oob oob;
439 int boffset;
440
441 memset(&oob, 0xFF, SM_OOB_SIZE);
442 oob.block_status = 0xF0;
443
444 if (ftl->unstable)
445 return;
446
447 if (sm_recheck_media(ftl))
448 return;
449
450 sm_printk("marking block %d of zone %d as bad", block, zone);
451
452 /* We aren't checking the return value, because we don't care */
453 /* This also fails on fake xD cards, but I guess these won't expose
454 * any bad blocks till fail completely
455 */
456 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
457 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
458 }
459
460 /*
461 * Erase a block within a zone
462 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
463 */
sm_erase_block(struct sm_ftl * ftl,int zone_num,uint16_t block,int put_free)464 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
465 int put_free)
466 {
467 struct ftl_zone *zone = &ftl->zones[zone_num];
468 struct mtd_info *mtd = ftl->trans->mtd;
469 struct erase_info erase;
470
471 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
472 erase.len = ftl->block_size;
473
474 if (ftl->unstable)
475 return -EIO;
476
477 BUG_ON(ftl->readonly);
478
479 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
480 sm_printk("attempted to erase the CIS!");
481 return -EIO;
482 }
483
484 if (mtd_erase(mtd, &erase)) {
485 sm_printk("erase of block %d in zone %d failed",
486 block, zone_num);
487 goto error;
488 }
489
490 if (put_free)
491 kfifo_in(&zone->free_sectors,
492 (const unsigned char *)&block, sizeof(block));
493
494 return 0;
495 error:
496 sm_mark_block_bad(ftl, zone_num, block);
497 return -EIO;
498 }
499
500 /* Thoroughly test that block is valid. */
sm_check_block(struct sm_ftl * ftl,int zone,int block)501 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
502 {
503 int boffset;
504 struct sm_oob oob;
505 int lbas[] = { -3, 0, 0, 0 };
506 int i = 0;
507 int test_lba;
508
509
510 /* First just check that block doesn't look fishy */
511 /* Only blocks that are valid or are sliced in two parts, are
512 * accepted
513 */
514 for (boffset = 0; boffset < ftl->block_size;
515 boffset += SM_SECTOR_SIZE) {
516
517 /* This shouldn't happen anyway */
518 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
519 return -2;
520
521 test_lba = sm_read_lba(&oob);
522
523 if (lbas[i] != test_lba)
524 lbas[++i] = test_lba;
525
526 /* If we found three different LBAs, something is fishy */
527 if (i == 3)
528 return -EIO;
529 }
530
531 /* If the block is sliced (partially erased usually) erase it */
532 if (i == 2) {
533 sm_erase_block(ftl, zone, block, 1);
534 return 1;
535 }
536
537 return 0;
538 }
539
540 /* ----------------- media scanning --------------------------------- */
541 static const struct chs_entry chs_table[] = {
542 { 1, 125, 4, 4 },
543 { 2, 125, 4, 8 },
544 { 4, 250, 4, 8 },
545 { 8, 250, 4, 16 },
546 { 16, 500, 4, 16 },
547 { 32, 500, 8, 16 },
548 { 64, 500, 8, 32 },
549 { 128, 500, 16, 32 },
550 { 256, 1000, 16, 32 },
551 { 512, 1015, 32, 63 },
552 { 1024, 985, 33, 63 },
553 { 2048, 985, 33, 63 },
554 { 0 },
555 };
556
557
558 static const uint8_t cis_signature[] = {
559 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
560 };
561 /* Find out media parameters.
562 * This ideally has to be based on nand id, but for now device size is enough
563 */
sm_get_media_info(struct sm_ftl * ftl,struct mtd_info * mtd)564 static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
565 {
566 int i;
567 int size_in_megs = mtd->size / (1024 * 1024);
568
569 ftl->readonly = mtd->type == MTD_ROM;
570
571 /* Manual settings for very old devices */
572 ftl->zone_count = 1;
573 ftl->smallpagenand = 0;
574
575 switch (size_in_megs) {
576 case 1:
577 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
578 ftl->zone_size = 256;
579 ftl->max_lba = 250;
580 ftl->block_size = 8 * SM_SECTOR_SIZE;
581 ftl->smallpagenand = 1;
582
583 break;
584 case 2:
585 /* 2 MiB flash SmartMedia (256 byte pages)*/
586 if (mtd->writesize == SM_SMALL_PAGE) {
587 ftl->zone_size = 512;
588 ftl->max_lba = 500;
589 ftl->block_size = 8 * SM_SECTOR_SIZE;
590 ftl->smallpagenand = 1;
591 /* 2 MiB rom SmartMedia */
592 } else {
593
594 if (!ftl->readonly)
595 return -ENODEV;
596
597 ftl->zone_size = 256;
598 ftl->max_lba = 250;
599 ftl->block_size = 16 * SM_SECTOR_SIZE;
600 }
601 break;
602 case 4:
603 /* 4 MiB flash/rom SmartMedia device */
604 ftl->zone_size = 512;
605 ftl->max_lba = 500;
606 ftl->block_size = 16 * SM_SECTOR_SIZE;
607 break;
608 case 8:
609 /* 8 MiB flash/rom SmartMedia device */
610 ftl->zone_size = 1024;
611 ftl->max_lba = 1000;
612 ftl->block_size = 16 * SM_SECTOR_SIZE;
613 }
614
615 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
616 * sizes. SmartMedia cards exist up to 128 MiB and have same layout
617 */
618 if (size_in_megs >= 16) {
619 ftl->zone_count = size_in_megs / 16;
620 ftl->zone_size = 1024;
621 ftl->max_lba = 1000;
622 ftl->block_size = 32 * SM_SECTOR_SIZE;
623 }
624
625 /* Test for proper write,erase and oob sizes */
626 if (mtd->erasesize > ftl->block_size)
627 return -ENODEV;
628
629 if (mtd->writesize > SM_SECTOR_SIZE)
630 return -ENODEV;
631
632 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
633 return -ENODEV;
634
635 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
636 return -ENODEV;
637
638 /* We use OOB */
639 if (!mtd_has_oob(mtd))
640 return -ENODEV;
641
642 /* Find geometry information */
643 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
644 if (chs_table[i].size == size_in_megs) {
645 ftl->cylinders = chs_table[i].cyl;
646 ftl->heads = chs_table[i].head;
647 ftl->sectors = chs_table[i].sec;
648 return 0;
649 }
650 }
651
652 sm_printk("media has unknown size : %dMiB", size_in_megs);
653 ftl->cylinders = 985;
654 ftl->heads = 33;
655 ftl->sectors = 63;
656 return 0;
657 }
658
659 /* Validate the CIS */
sm_read_cis(struct sm_ftl * ftl)660 static int sm_read_cis(struct sm_ftl *ftl)
661 {
662 struct sm_oob oob;
663
664 if (sm_read_sector(ftl,
665 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
666 return -EIO;
667
668 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
669 return -EIO;
670
671 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
672 cis_signature, sizeof(cis_signature))) {
673 return 0;
674 }
675
676 return -EIO;
677 }
678
679 /* Scan the media for the CIS */
sm_find_cis(struct sm_ftl * ftl)680 static int sm_find_cis(struct sm_ftl *ftl)
681 {
682 struct sm_oob oob;
683 int block, boffset;
684 int block_found = 0;
685 int cis_found = 0;
686
687 /* Search for first valid block */
688 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
689
690 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
691 continue;
692
693 if (!sm_block_valid(&oob))
694 continue;
695 block_found = 1;
696 break;
697 }
698
699 if (!block_found)
700 return -EIO;
701
702 /* Search for first valid sector in this block */
703 for (boffset = 0 ; boffset < ftl->block_size;
704 boffset += SM_SECTOR_SIZE) {
705
706 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
707 continue;
708
709 if (!sm_sector_valid(&oob))
710 continue;
711 break;
712 }
713
714 if (boffset == ftl->block_size)
715 return -EIO;
716
717 ftl->cis_block = block;
718 ftl->cis_boffset = boffset;
719 ftl->cis_page_offset = 0;
720
721 cis_found = !sm_read_cis(ftl);
722
723 if (!cis_found) {
724 ftl->cis_page_offset = SM_SMALL_PAGE;
725 cis_found = !sm_read_cis(ftl);
726 }
727
728 if (cis_found) {
729 dbg("CIS block found at offset %x",
730 block * ftl->block_size +
731 boffset + ftl->cis_page_offset);
732 return 0;
733 }
734 return -EIO;
735 }
736
737 /* Basic test to determine if underlying mtd device if functional */
sm_recheck_media(struct sm_ftl * ftl)738 static int sm_recheck_media(struct sm_ftl *ftl)
739 {
740 if (sm_read_cis(ftl)) {
741
742 if (!ftl->unstable) {
743 sm_printk("media unstable, not allowing writes");
744 ftl->unstable = 1;
745 }
746 return -EIO;
747 }
748 return 0;
749 }
750
751 /* Initialize a FTL zone */
sm_init_zone(struct sm_ftl * ftl,int zone_num)752 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
753 {
754 struct ftl_zone *zone = &ftl->zones[zone_num];
755 struct sm_oob oob;
756 uint16_t block;
757 int lba;
758 int i = 0;
759 int len;
760
761 dbg("initializing zone %d", zone_num);
762
763 /* Allocate memory for FTL table */
764 zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
765
766 if (!zone->lba_to_phys_table)
767 return -ENOMEM;
768 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
769
770
771 /* Allocate memory for free sectors FIFO */
772 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
773 kfree(zone->lba_to_phys_table);
774 return -ENOMEM;
775 }
776
777 /* Now scan the zone */
778 for (block = 0 ; block < ftl->zone_size ; block++) {
779
780 /* Skip blocks till the CIS (including) */
781 if (zone_num == 0 && block <= ftl->cis_block)
782 continue;
783
784 /* Read the oob of first sector */
785 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
786 kfifo_free(&zone->free_sectors);
787 kfree(zone->lba_to_phys_table);
788 return -EIO;
789 }
790
791 /* Test to see if block is erased. It is enough to test
792 * first sector, because erase happens in one shot
793 */
794 if (sm_block_erased(&oob)) {
795 kfifo_in(&zone->free_sectors,
796 (unsigned char *)&block, 2);
797 continue;
798 }
799
800 /* If block is marked as bad, skip it */
801 /* This assumes we can trust first sector*/
802 /* However the way the block valid status is defined, ensures
803 * very low probability of failure here
804 */
805 if (!sm_block_valid(&oob)) {
806 dbg("PH %04d <-> <marked bad>", block);
807 continue;
808 }
809
810
811 lba = sm_read_lba(&oob);
812
813 /* Invalid LBA means that block is damaged. */
814 /* We can try to erase it, or mark it as bad, but
815 * lets leave that to recovery application
816 */
817 if (lba == -2 || lba >= ftl->max_lba) {
818 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
819 continue;
820 }
821
822
823 /* If there is no collision,
824 * just put the sector in the FTL table
825 */
826 if (zone->lba_to_phys_table[lba] < 0) {
827 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
828 zone->lba_to_phys_table[lba] = block;
829 continue;
830 }
831
832 sm_printk("collision"
833 " of LBA %d between blocks %d and %d in zone %d",
834 lba, zone->lba_to_phys_table[lba], block, zone_num);
835
836 /* Test that this block is valid*/
837 if (sm_check_block(ftl, zone_num, block))
838 continue;
839
840 /* Test now the old block */
841 if (sm_check_block(ftl, zone_num,
842 zone->lba_to_phys_table[lba])) {
843 zone->lba_to_phys_table[lba] = block;
844 continue;
845 }
846
847 /* If both blocks are valid and share same LBA, it means that
848 * they hold different versions of same data. It not
849 * known which is more recent, thus just erase one of them
850 */
851 sm_printk("both blocks are valid, erasing the later");
852 sm_erase_block(ftl, zone_num, block, 1);
853 }
854
855 dbg("zone initialized");
856 zone->initialized = 1;
857
858 /* No free sectors, means that the zone is heavily damaged, write won't
859 * work, but it can still can be (partially) read
860 */
861 if (!kfifo_len(&zone->free_sectors)) {
862 sm_printk("no free blocks in zone %d", zone_num);
863 return 0;
864 }
865
866 /* Randomize first block we write to */
867 get_random_bytes(&i, 2);
868 i %= (kfifo_len(&zone->free_sectors) / 2);
869
870 while (i--) {
871 len = kfifo_out(&zone->free_sectors,
872 (unsigned char *)&block, 2);
873 WARN_ON(len != 2);
874 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
875 }
876 return 0;
877 }
878
879 /* Get and automatically initialize an FTL mapping for one zone */
sm_get_zone(struct sm_ftl * ftl,int zone_num)880 static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
881 {
882 struct ftl_zone *zone;
883 int error;
884
885 BUG_ON(zone_num >= ftl->zone_count);
886 zone = &ftl->zones[zone_num];
887
888 if (!zone->initialized) {
889 error = sm_init_zone(ftl, zone_num);
890
891 if (error)
892 return ERR_PTR(error);
893 }
894 return zone;
895 }
896
897
898 /* ----------------- cache handling ------------------------------------------*/
899
900 /* Initialize the one block cache */
sm_cache_init(struct sm_ftl * ftl)901 static void sm_cache_init(struct sm_ftl *ftl)
902 {
903 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
904 ftl->cache_clean = 1;
905 ftl->cache_zone = -1;
906 ftl->cache_block = -1;
907 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
908 }
909
910 /* Put sector in one block cache */
sm_cache_put(struct sm_ftl * ftl,char * buffer,int boffset)911 static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
912 {
913 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
914 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
915 ftl->cache_clean = 0;
916 }
917
918 /* Read a sector from the cache */
sm_cache_get(struct sm_ftl * ftl,char * buffer,int boffset)919 static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
920 {
921 if (test_bit(boffset / SM_SECTOR_SIZE,
922 &ftl->cache_data_invalid_bitmap))
923 return -1;
924
925 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
926 return 0;
927 }
928
929 /* Write the cache to hardware */
sm_cache_flush(struct sm_ftl * ftl)930 static int sm_cache_flush(struct sm_ftl *ftl)
931 {
932 struct ftl_zone *zone;
933
934 int sector_num;
935 uint16_t write_sector;
936 int zone_num = ftl->cache_zone;
937 int block_num;
938
939 if (ftl->cache_clean)
940 return 0;
941
942 if (ftl->unstable)
943 return -EIO;
944
945 BUG_ON(zone_num < 0);
946 zone = &ftl->zones[zone_num];
947 block_num = zone->lba_to_phys_table[ftl->cache_block];
948
949
950 /* Try to read all unread areas of the cache block*/
951 for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
952 ftl->block_size / SM_SECTOR_SIZE) {
953
954 if (!sm_read_sector(ftl,
955 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
956 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
957 clear_bit(sector_num,
958 &ftl->cache_data_invalid_bitmap);
959 }
960 restart:
961
962 if (ftl->unstable)
963 return -EIO;
964
965 /* If there are no spare blocks, */
966 /* we could still continue by erasing/writing the current block,
967 * but for such worn out media it doesn't worth the trouble,
968 * and the dangers
969 */
970 if (kfifo_out(&zone->free_sectors,
971 (unsigned char *)&write_sector, 2) != 2) {
972 dbg("no free sectors for write!");
973 return -EIO;
974 }
975
976
977 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
978 ftl->cache_block, ftl->cache_data_invalid_bitmap))
979 goto restart;
980
981 /* Update the FTL table */
982 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
983
984 /* Write succesfull, so erase and free the old block */
985 if (block_num > 0)
986 sm_erase_block(ftl, zone_num, block_num, 1);
987
988 sm_cache_init(ftl);
989 return 0;
990 }
991
992
993 /* flush timer, runs a second after last write */
sm_cache_flush_timer(struct timer_list * t)994 static void sm_cache_flush_timer(struct timer_list *t)
995 {
996 struct sm_ftl *ftl = from_timer(ftl, t, timer);
997 queue_work(cache_flush_workqueue, &ftl->flush_work);
998 }
999
1000 /* cache flush work, kicked by timer */
sm_cache_flush_work(struct work_struct * work)1001 static void sm_cache_flush_work(struct work_struct *work)
1002 {
1003 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
1004 mutex_lock(&ftl->mutex);
1005 sm_cache_flush(ftl);
1006 mutex_unlock(&ftl->mutex);
1007 return;
1008 }
1009
1010 /* ---------------- outside interface -------------------------------------- */
1011
1012 /* outside interface: read a sector */
sm_read(struct mtd_blktrans_dev * dev,unsigned long sect_no,char * buf)1013 static int sm_read(struct mtd_blktrans_dev *dev,
1014 unsigned long sect_no, char *buf)
1015 {
1016 struct sm_ftl *ftl = dev->priv;
1017 struct ftl_zone *zone;
1018 int error = 0, in_cache = 0;
1019 int zone_num, block, boffset;
1020
1021 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1022 mutex_lock(&ftl->mutex);
1023
1024
1025 zone = sm_get_zone(ftl, zone_num);
1026 if (IS_ERR(zone)) {
1027 error = PTR_ERR(zone);
1028 goto unlock;
1029 }
1030
1031 /* Have to look at cache first */
1032 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1033 in_cache = 1;
1034 if (!sm_cache_get(ftl, buf, boffset))
1035 goto unlock;
1036 }
1037
1038 /* Translate the block and return if doesn't exist in the table */
1039 block = zone->lba_to_phys_table[block];
1040
1041 if (block == -1) {
1042 memset(buf, 0xFF, SM_SECTOR_SIZE);
1043 goto unlock;
1044 }
1045
1046 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1047 error = -EIO;
1048 goto unlock;
1049 }
1050
1051 if (in_cache)
1052 sm_cache_put(ftl, buf, boffset);
1053 unlock:
1054 mutex_unlock(&ftl->mutex);
1055 return error;
1056 }
1057
1058 /* outside interface: write a sector */
sm_write(struct mtd_blktrans_dev * dev,unsigned long sec_no,char * buf)1059 static int sm_write(struct mtd_blktrans_dev *dev,
1060 unsigned long sec_no, char *buf)
1061 {
1062 struct sm_ftl *ftl = dev->priv;
1063 struct ftl_zone *zone;
1064 int error = 0, zone_num, block, boffset;
1065
1066 BUG_ON(ftl->readonly);
1067 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1068
1069 /* No need in flush thread running now */
1070 del_timer(&ftl->timer);
1071 mutex_lock(&ftl->mutex);
1072
1073 zone = sm_get_zone(ftl, zone_num);
1074 if (IS_ERR(zone)) {
1075 error = PTR_ERR(zone);
1076 goto unlock;
1077 }
1078
1079 /* If entry is not in cache, flush it */
1080 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1081
1082 error = sm_cache_flush(ftl);
1083 if (error)
1084 goto unlock;
1085
1086 ftl->cache_block = block;
1087 ftl->cache_zone = zone_num;
1088 }
1089
1090 sm_cache_put(ftl, buf, boffset);
1091 unlock:
1092 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1093 mutex_unlock(&ftl->mutex);
1094 return error;
1095 }
1096
1097 /* outside interface: flush everything */
sm_flush(struct mtd_blktrans_dev * dev)1098 static int sm_flush(struct mtd_blktrans_dev *dev)
1099 {
1100 struct sm_ftl *ftl = dev->priv;
1101 int retval;
1102
1103 mutex_lock(&ftl->mutex);
1104 retval = sm_cache_flush(ftl);
1105 mutex_unlock(&ftl->mutex);
1106 return retval;
1107 }
1108
1109 /* outside interface: device is released */
sm_release(struct mtd_blktrans_dev * dev)1110 static void sm_release(struct mtd_blktrans_dev *dev)
1111 {
1112 struct sm_ftl *ftl = dev->priv;
1113
1114 del_timer_sync(&ftl->timer);
1115 cancel_work_sync(&ftl->flush_work);
1116 mutex_lock(&ftl->mutex);
1117 sm_cache_flush(ftl);
1118 mutex_unlock(&ftl->mutex);
1119 }
1120
1121 /* outside interface: get geometry */
sm_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)1122 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1123 {
1124 struct sm_ftl *ftl = dev->priv;
1125 geo->heads = ftl->heads;
1126 geo->sectors = ftl->sectors;
1127 geo->cylinders = ftl->cylinders;
1128 return 0;
1129 }
1130
1131 /* external interface: main initialization function */
sm_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)1132 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1133 {
1134 struct mtd_blktrans_dev *trans;
1135 struct sm_ftl *ftl;
1136
1137 /* Allocate & initialize our private structure */
1138 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1139 if (!ftl)
1140 goto error1;
1141
1142
1143 mutex_init(&ftl->mutex);
1144 timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
1145 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1146
1147 /* Read media information */
1148 if (sm_get_media_info(ftl, mtd)) {
1149 dbg("found unsupported mtd device, aborting");
1150 goto error2;
1151 }
1152
1153
1154 /* Allocate temporary CIS buffer for read retry support */
1155 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1156 if (!ftl->cis_buffer)
1157 goto error2;
1158
1159 /* Allocate zone array, it will be initialized on demand */
1160 ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
1161 GFP_KERNEL);
1162 if (!ftl->zones)
1163 goto error3;
1164
1165 /* Allocate the cache*/
1166 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1167
1168 if (!ftl->cache_data)
1169 goto error4;
1170
1171 sm_cache_init(ftl);
1172
1173
1174 /* Allocate upper layer structure and initialize it */
1175 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1176 if (!trans)
1177 goto error5;
1178
1179 ftl->trans = trans;
1180 trans->priv = ftl;
1181
1182 trans->tr = tr;
1183 trans->mtd = mtd;
1184 trans->devnum = -1;
1185 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1186 trans->readonly = ftl->readonly;
1187
1188 if (sm_find_cis(ftl)) {
1189 dbg("CIS not found on mtd device, aborting");
1190 goto error6;
1191 }
1192
1193 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1194 if (!ftl->disk_attributes)
1195 goto error6;
1196 trans->disk_attributes = ftl->disk_attributes;
1197
1198 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1199 (int)(mtd->size / (1024 * 1024)), mtd->index);
1200
1201 dbg("FTL layout:");
1202 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1203 ftl->zone_count, ftl->max_lba,
1204 ftl->zone_size - ftl->max_lba);
1205 dbg("each block consists of %d bytes",
1206 ftl->block_size);
1207
1208
1209 /* Register device*/
1210 if (add_mtd_blktrans_dev(trans)) {
1211 dbg("error in mtdblktrans layer");
1212 goto error6;
1213 }
1214 return;
1215 error6:
1216 kfree(trans);
1217 error5:
1218 kfree(ftl->cache_data);
1219 error4:
1220 kfree(ftl->zones);
1221 error3:
1222 kfree(ftl->cis_buffer);
1223 error2:
1224 kfree(ftl);
1225 error1:
1226 return;
1227 }
1228
1229 /* main interface: device {surprise,} removal */
sm_remove_dev(struct mtd_blktrans_dev * dev)1230 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1231 {
1232 struct sm_ftl *ftl = dev->priv;
1233 int i;
1234
1235 del_mtd_blktrans_dev(dev);
1236 ftl->trans = NULL;
1237
1238 for (i = 0 ; i < ftl->zone_count; i++) {
1239
1240 if (!ftl->zones[i].initialized)
1241 continue;
1242
1243 kfree(ftl->zones[i].lba_to_phys_table);
1244 kfifo_free(&ftl->zones[i].free_sectors);
1245 }
1246
1247 sm_delete_sysfs_attributes(ftl);
1248 kfree(ftl->cis_buffer);
1249 kfree(ftl->zones);
1250 kfree(ftl->cache_data);
1251 kfree(ftl);
1252 }
1253
1254 static struct mtd_blktrans_ops sm_ftl_ops = {
1255 .name = "smblk",
1256 .major = 0,
1257 .part_bits = SM_FTL_PARTN_BITS,
1258 .blksize = SM_SECTOR_SIZE,
1259 .getgeo = sm_getgeo,
1260
1261 .add_mtd = sm_add_mtd,
1262 .remove_dev = sm_remove_dev,
1263
1264 .readsect = sm_read,
1265 .writesect = sm_write,
1266
1267 .flush = sm_flush,
1268 .release = sm_release,
1269
1270 .owner = THIS_MODULE,
1271 };
1272
sm_module_init(void)1273 static __init int sm_module_init(void)
1274 {
1275 int error = 0;
1276
1277 cache_flush_workqueue = create_freezable_workqueue("smflush");
1278 if (!cache_flush_workqueue)
1279 return -ENOMEM;
1280
1281 error = register_mtd_blktrans(&sm_ftl_ops);
1282 if (error)
1283 destroy_workqueue(cache_flush_workqueue);
1284 return error;
1285
1286 }
1287
sm_module_exit(void)1288 static void __exit sm_module_exit(void)
1289 {
1290 destroy_workqueue(cache_flush_workqueue);
1291 deregister_mtd_blktrans(&sm_ftl_ops);
1292 }
1293
1294 module_init(sm_module_init);
1295 module_exit(sm_module_exit);
1296
1297 MODULE_LICENSE("GPL");
1298 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1299 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
1300