1 /*
2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 *
8 * 10/10/2000 Nicolas Pitre <nico@cam.org>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
15 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
16 * (command set 0x0020)
17 * - added a writev function
18 */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/mtd/map.h>
32 #include <linux/mtd/cfi.h>
33 #include <linux/mtd/compatmac.h>
34
35
36 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
37 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
38 static int cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
39 unsigned long count, loff_t to, size_t *retlen);
40 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
41 static void cfi_staa_sync (struct mtd_info *);
42 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
43 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
44 static int cfi_staa_suspend (struct mtd_info *);
45 static void cfi_staa_resume (struct mtd_info *);
46
47 static void cfi_staa_destroy(struct mtd_info *);
48
49 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
50
51 static struct mtd_info *cfi_staa_setup (struct map_info *);
52
53 static struct mtd_chip_driver cfi_staa_chipdrv = {
54 probe: NULL, /* Not usable directly */
55 destroy: cfi_staa_destroy,
56 name: "cfi_cmdset_0020",
57 module: THIS_MODULE
58 };
59
60 /* #define DEBUG_LOCK_BITS */
61 //#define DEBUG_CFI_FEATURES
62
63 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_intelext * extp)64 static void cfi_tell_features(struct cfi_pri_intelext *extp)
65 {
66 int i;
67 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
68 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
69 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
70 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
71 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
72 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
73 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
74 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
75 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
76 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
77 for (i=9; i<32; i++) {
78 if (extp->FeatureSupport & (1<<i))
79 printk(" - Unknown Bit %X: supported\n", i);
80 }
81
82 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
83 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
84 for (i=1; i<8; i++) {
85 if (extp->SuspendCmdSupport & (1<<i))
86 printk(" - Unknown Bit %X: supported\n", i);
87 }
88
89 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
90 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
91 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
92 for (i=2; i<16; i++) {
93 if (extp->BlkStatusRegMask & (1<<i))
94 printk(" - Unknown Bit %X Active: yes\n",i);
95 }
96
97 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
98 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
99 if (extp->VppOptimal)
100 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
101 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
102 }
103 #endif
104
105 /* This routine is made available to other mtd code via
106 * inter_module_register. It must only be accessed through
107 * inter_module_get which will bump the use count of this module. The
108 * addresses passed back in cfi are valid as long as the use count of
109 * this module is non-zero, i.e. between inter_module_get and
110 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
111 */
cfi_cmdset_0020(struct map_info * map,int primary)112 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
113 {
114 struct cfi_private *cfi = map->fldrv_priv;
115 int i;
116 __u32 base = cfi->chips[0].start;
117
118 if (cfi->cfi_mode) {
119 /*
120 * It's a real CFI chip, not one for which the probe
121 * routine faked a CFI structure. So we read the feature
122 * table from it.
123 */
124 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 struct cfi_pri_intelext *extp;
126 int ofs_factor = cfi->interleave * cfi->device_type;
127
128 printk(" ST Microelectronics Extended Query Table at 0x%4.4X\n", adr);
129 if (!adr)
130 return NULL;
131
132 /* Switch it into Query Mode */
133 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
134
135 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
136 if (!extp) {
137 printk(KERN_ERR "Failed to allocate memory\n");
138 return NULL;
139 }
140
141 /* Read in the Extended Query Table */
142 for (i=0; i<sizeof(*extp); i++) {
143 ((unsigned char *)extp)[i] =
144 cfi_read_query(map, (base+((adr+i)*ofs_factor)));
145 }
146
147 if (extp->MajorVersion != '1' ||
148 (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
149 printk(KERN_WARNING " Unknown staa Extended Query "
150 "version %c.%c.\n", extp->MajorVersion,
151 extp->MinorVersion);
152 kfree(extp);
153 return NULL;
154 }
155
156 /* Do some byteswapping if necessary */
157 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
158 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
159
160 #ifdef DEBUG_CFI_FEATURES
161 /* Tell the user about it in lots of lovely detail */
162 cfi_tell_features(extp);
163 #endif
164
165 /* Install our own private info structure */
166 cfi->cmdset_priv = extp;
167 }
168
169 for (i=0; i< cfi->numchips; i++) {
170 cfi->chips[i].word_write_time = 128;
171 cfi->chips[i].buffer_write_time = 128;
172 cfi->chips[i].erase_time = 1024;
173 }
174
175 map->fldrv = &cfi_staa_chipdrv;
176 MOD_INC_USE_COUNT;
177
178 /* Make sure it's in read mode */
179 cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
180 return cfi_staa_setup(map);
181 }
182
cfi_staa_setup(struct map_info * map)183 static struct mtd_info *cfi_staa_setup(struct map_info *map)
184 {
185 struct cfi_private *cfi = map->fldrv_priv;
186 struct mtd_info *mtd;
187 unsigned long offset = 0;
188 int i,j;
189 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
190
191 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
192 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
193
194 if (!mtd) {
195 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
196 kfree(cfi->cmdset_priv);
197 return NULL;
198 }
199
200 memset(mtd, 0, sizeof(*mtd));
201 mtd->priv = map;
202 mtd->type = MTD_NORFLASH;
203 mtd->size = devsize * cfi->numchips;
204
205 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
206 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
207 * mtd->numeraseregions, GFP_KERNEL);
208 if (!mtd->eraseregions) {
209 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
210 kfree(cfi->cmdset_priv);
211 return NULL;
212 }
213
214 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
215 unsigned long ernum, ersize;
216 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
217 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
218
219 if (mtd->erasesize < ersize) {
220 mtd->erasesize = ersize;
221 }
222 for (j=0; j<cfi->numchips; j++) {
223 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
224 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
225 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
226 }
227 offset += (ersize * ernum);
228 }
229
230 if (offset != devsize) {
231 /* Argh */
232 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
233 kfree(mtd->eraseregions);
234 kfree(cfi->cmdset_priv);
235 return NULL;
236 }
237
238 for (i=0; i<mtd->numeraseregions;i++){
239 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
240 i,mtd->eraseregions[i].offset,
241 mtd->eraseregions[i].erasesize,
242 mtd->eraseregions[i].numblocks);
243 }
244
245 /* Also select the correct geometry setup too */
246 mtd->erase = cfi_staa_erase_varsize;
247 mtd->read = cfi_staa_read;
248 mtd->write = cfi_staa_write_buffers;
249 mtd->writev = cfi_staa_writev;
250 mtd->sync = cfi_staa_sync;
251 mtd->lock = cfi_staa_lock;
252 mtd->unlock = cfi_staa_unlock;
253 mtd->suspend = cfi_staa_suspend;
254 mtd->resume = cfi_staa_resume;
255 mtd->flags = MTD_CAP_NORFLASH;
256 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
257 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
258 map->fldrv = &cfi_staa_chipdrv;
259 MOD_INC_USE_COUNT;
260 mtd->name = map->name;
261 return mtd;
262 }
263
264
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)265 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
266 {
267 __u32 status, status_OK;
268 unsigned long timeo;
269 DECLARE_WAITQUEUE(wait, current);
270 int suspended = 0;
271 unsigned long cmd_addr;
272 struct cfi_private *cfi = map->fldrv_priv;
273
274 adr += chip->start;
275
276 /* Ensure cmd read/writes are aligned. */
277 cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
278
279 /* Let's determine this according to the interleave only once */
280 status_OK = CMD(0x80);
281
282 timeo = jiffies + HZ;
283 retry:
284 spin_lock_bh(chip->mutex);
285
286 /* Check that the chip's ready to talk to us.
287 * If it's in FL_ERASING state, suspend it and make it talk now.
288 */
289 switch (chip->state) {
290 case FL_ERASING:
291 if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
292 goto sleep; /* We don't support erase suspend */
293
294 cfi_write (map, CMD(0xb0), cmd_addr);
295 /* If the flash has finished erasing, then 'erase suspend'
296 * appears to make some (28F320) flash devices switch to
297 * 'read' mode. Make sure that we switch to 'read status'
298 * mode so we get the right data. --rmk
299 */
300 cfi_write(map, CMD(0x70), cmd_addr);
301 chip->oldstate = FL_ERASING;
302 chip->state = FL_ERASE_SUSPENDING;
303 // printk("Erase suspending at 0x%lx\n", cmd_addr);
304 for (;;) {
305 status = cfi_read(map, cmd_addr);
306 if ((status & status_OK) == status_OK)
307 break;
308
309 if (time_after(jiffies, timeo)) {
310 /* Urgh */
311 cfi_write(map, CMD(0xd0), cmd_addr);
312 /* make sure we're in 'read status' mode */
313 cfi_write(map, CMD(0x70), cmd_addr);
314 chip->state = FL_ERASING;
315 spin_unlock_bh(chip->mutex);
316 printk(KERN_ERR "Chip not ready after erase "
317 "suspended: status = 0x%x\n", status);
318 return -EIO;
319 }
320
321 spin_unlock_bh(chip->mutex);
322 cfi_udelay(1);
323 spin_lock_bh(chip->mutex);
324 }
325
326 suspended = 1;
327 cfi_write(map, CMD(0xff), cmd_addr);
328 chip->state = FL_READY;
329 break;
330
331 #if 0
332 case FL_WRITING:
333 /* Not quite yet */
334 #endif
335
336 case FL_READY:
337 break;
338
339 case FL_CFI_QUERY:
340 case FL_JEDEC_QUERY:
341 cfi_write(map, CMD(0x70), cmd_addr);
342 chip->state = FL_STATUS;
343
344 case FL_STATUS:
345 status = cfi_read(map, cmd_addr);
346 if ((status & status_OK) == status_OK) {
347 cfi_write(map, CMD(0xff), cmd_addr);
348 chip->state = FL_READY;
349 break;
350 }
351
352 /* Urgh. Chip not yet ready to talk to us. */
353 if (time_after(jiffies, timeo)) {
354 spin_unlock_bh(chip->mutex);
355 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
356 return -EIO;
357 }
358
359 /* Latency issues. Drop the lock, wait a while and retry */
360 spin_unlock_bh(chip->mutex);
361 cfi_udelay(1);
362 goto retry;
363
364 default:
365 sleep:
366 /* Stick ourselves on a wait queue to be woken when
367 someone changes the status */
368 set_current_state(TASK_UNINTERRUPTIBLE);
369 add_wait_queue(&chip->wq, &wait);
370 spin_unlock_bh(chip->mutex);
371 schedule();
372 remove_wait_queue(&chip->wq, &wait);
373 timeo = jiffies + HZ;
374 goto retry;
375 }
376
377 map->copy_from(map, buf, adr, len);
378
379 if (suspended) {
380 chip->state = chip->oldstate;
381 /* What if one interleaved chip has finished and the
382 other hasn't? The old code would leave the finished
383 one in READY mode. That's bad, and caused -EROFS
384 errors to be returned from do_erase_oneblock because
385 that's the only bit it checked for at the time.
386 As the state machine appears to explicitly allow
387 sending the 0x70 (Read Status) command to an erasing
388 chip and expecting it to be ignored, that's what we
389 do. */
390 cfi_write(map, CMD(0xd0), cmd_addr);
391 cfi_write(map, CMD(0x70), cmd_addr);
392 }
393
394 wake_up(&chip->wq);
395 spin_unlock_bh(chip->mutex);
396 return 0;
397 }
398
cfi_staa_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)399 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
400 {
401 struct map_info *map = mtd->priv;
402 struct cfi_private *cfi = map->fldrv_priv;
403 unsigned long ofs;
404 int chipnum;
405 int ret = 0;
406
407 /* ofs: offset within the first chip that the first read should start */
408 chipnum = (from >> cfi->chipshift);
409 ofs = from - (chipnum << cfi->chipshift);
410
411 *retlen = 0;
412
413 while (len) {
414 unsigned long thislen;
415
416 if (chipnum >= cfi->numchips)
417 break;
418
419 if ((len + ofs -1) >> cfi->chipshift)
420 thislen = (1<<cfi->chipshift) - ofs;
421 else
422 thislen = len;
423
424 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
425 if (ret)
426 break;
427
428 *retlen += thislen;
429 len -= thislen;
430 buf += thislen;
431
432 ofs = 0;
433 chipnum++;
434 }
435 return ret;
436 }
437
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const u_char * buf,int len)438 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
439 unsigned long adr, const u_char *buf, int len)
440 {
441 struct cfi_private *cfi = map->fldrv_priv;
442 __u32 status, status_OK;
443 unsigned long cmd_adr, timeo;
444 DECLARE_WAITQUEUE(wait, current);
445 int wbufsize, z;
446
447 /* M58LW064A requires bus alignment for buffer wriets -- saw */
448 if (adr & (CFIDEV_BUSWIDTH-1))
449 return -EINVAL;
450
451 wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
452 adr += chip->start;
453 cmd_adr = adr & ~(wbufsize-1);
454
455 /* Let's determine this according to the interleave only once */
456 status_OK = CMD(0x80);
457
458 timeo = jiffies + HZ;
459 retry:
460
461 #ifdef DEBUG_CFI_FEATURES
462 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
463 #endif
464 spin_lock_bh(chip->mutex);
465
466 /* Check that the chip's ready to talk to us.
467 * Later, we can actually think about interrupting it
468 * if it's in FL_ERASING state.
469 * Not just yet, though.
470 */
471 switch (chip->state) {
472 case FL_READY:
473 break;
474
475 case FL_CFI_QUERY:
476 case FL_JEDEC_QUERY:
477 cfi_write(map, CMD(0x70), cmd_adr);
478 chip->state = FL_STATUS;
479 #ifdef DEBUG_CFI_FEATURES
480 printk("%s: 1 status[%x]\n", __FUNCTION__, cfi_read(map, cmd_adr));
481 #endif
482
483 case FL_STATUS:
484 status = cfi_read(map, cmd_adr);
485 if ((status & status_OK) == status_OK)
486 break;
487 /* Urgh. Chip not yet ready to talk to us. */
488 if (time_after(jiffies, timeo)) {
489 spin_unlock_bh(chip->mutex);
490 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %x, status = %x\n",
491 status, cfi_read(map, cmd_adr));
492 return -EIO;
493 }
494
495 /* Latency issues. Drop the lock, wait a while and retry */
496 spin_unlock_bh(chip->mutex);
497 cfi_udelay(1);
498 goto retry;
499
500 default:
501 /* Stick ourselves on a wait queue to be woken when
502 someone changes the status */
503 set_current_state(TASK_UNINTERRUPTIBLE);
504 add_wait_queue(&chip->wq, &wait);
505 spin_unlock_bh(chip->mutex);
506 schedule();
507 remove_wait_queue(&chip->wq, &wait);
508 timeo = jiffies + HZ;
509 goto retry;
510 }
511
512 ENABLE_VPP(map);
513 cfi_write(map, CMD(0xe8), cmd_adr);
514 chip->state = FL_WRITING_TO_BUFFER;
515
516 z = 0;
517 for (;;) {
518 status = cfi_read(map, cmd_adr);
519 if ((status & status_OK) == status_OK)
520 break;
521
522 spin_unlock_bh(chip->mutex);
523 cfi_udelay(1);
524 spin_lock_bh(chip->mutex);
525
526 if (++z > 100) {
527 /* Argh. Not ready for write to buffer */
528 DISABLE_VPP(map);
529 cfi_write(map, CMD(0x70), cmd_adr);
530 chip->state = FL_STATUS;
531 spin_unlock_bh(chip->mutex);
532 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x\n", status);
533 return -EIO;
534 }
535 }
536
537 /* Write length of data to come */
538 cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
539
540 /* Write data */
541 for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
542 if (cfi_buswidth_is_1()) {
543 map->write8 (map, *(__u8*)buf, adr+z);
544 buf += sizeof(__u8);
545 } else if (cfi_buswidth_is_2()) {
546 map->write16 (map, *(__u16*)buf, adr+z);
547 buf += sizeof(__u16);
548 } else if (cfi_buswidth_is_4()) {
549 map->write32 (map, *(__u32*)buf, adr+z);
550 buf += sizeof(__u32);
551 } else {
552 DISABLE_VPP(map);
553 return -EINVAL;
554 }
555 }
556 /* GO GO GO */
557 cfi_write(map, CMD(0xd0), cmd_adr);
558 chip->state = FL_WRITING;
559
560 spin_unlock_bh(chip->mutex);
561 cfi_udelay(chip->buffer_write_time);
562 spin_lock_bh(chip->mutex);
563
564 timeo = jiffies + (HZ/2);
565 z = 0;
566 for (;;) {
567 if (chip->state != FL_WRITING) {
568 /* Someone's suspended the write. Sleep */
569 set_current_state(TASK_UNINTERRUPTIBLE);
570 add_wait_queue(&chip->wq, &wait);
571 spin_unlock_bh(chip->mutex);
572 schedule();
573 remove_wait_queue(&chip->wq, &wait);
574 timeo = jiffies + (HZ / 2); /* FIXME */
575 spin_lock_bh(chip->mutex);
576 continue;
577 }
578
579 status = cfi_read(map, cmd_adr);
580 if ((status & status_OK) == status_OK)
581 break;
582
583 /* OK Still waiting */
584 if (time_after(jiffies, timeo)) {
585 /* clear status */
586 cfi_write(map, CMD(0x50), cmd_adr);
587 /* put back into read status register mode */
588 cfi_write(map, CMD(0x70), adr);
589 chip->state = FL_STATUS;
590 DISABLE_VPP(map);
591 spin_unlock_bh(chip->mutex);
592 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
593 return -EIO;
594 }
595
596 /* Latency issues. Drop the lock, wait a while and retry */
597 spin_unlock_bh(chip->mutex);
598 cfi_udelay(1);
599 z++;
600 spin_lock_bh(chip->mutex);
601 }
602 if (!z) {
603 chip->buffer_write_time--;
604 if (!chip->buffer_write_time)
605 chip->buffer_write_time++;
606 }
607 if (z > 1)
608 chip->buffer_write_time++;
609
610 /* Done and happy. */
611 DISABLE_VPP(map);
612 chip->state = FL_STATUS;
613
614 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
615 if ((status & CMD(0x02)) || (status & CMD(0x08)) ||
616 (status & CMD(0x10)) || (status & CMD(0x20))) {
617 #ifdef DEBUG_CFI_FEATURES
618 printk("%s: 2 status[%x]\n", __FUNCTION__, status);
619 #endif
620 /* clear status */
621 cfi_write(map, CMD(0x50), cmd_adr);
622 /* put back into read status register mode */
623 cfi_write(map, CMD(0x70), adr);
624 wake_up(&chip->wq);
625 spin_unlock_bh(chip->mutex);
626 return (status & CMD(0x02)) ? -EROFS : -EIO;
627 }
628 wake_up(&chip->wq);
629 spin_unlock_bh(chip->mutex);
630
631 return 0;
632 }
633
cfi_staa_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)634 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
635 size_t len, size_t *retlen, const u_char *buf)
636 {
637 struct map_info *map = mtd->priv;
638 struct cfi_private *cfi = map->fldrv_priv;
639 int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
640 int ret = 0;
641 int chipnum;
642 unsigned long ofs;
643
644 *retlen = 0;
645 if (!len)
646 return 0;
647
648 chipnum = to >> cfi->chipshift;
649 ofs = to - (chipnum << cfi->chipshift);
650
651 #ifdef DEBUG_CFI_FEATURES
652 printk("%s: CFIDEV_BUSWIDTH[%x]\n", __FUNCTION__, CFIDEV_BUSWIDTH);
653 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
654 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
655 #endif
656
657 /* Write buffer is worth it only if more than one word to write... */
658 while (len > 0) {
659 /* We must not cross write block boundaries */
660 int size = wbufsize - (ofs & (wbufsize-1));
661
662 if (size > len)
663 size = len;
664
665 ret = do_write_buffer(map, &cfi->chips[chipnum],
666 ofs, buf, size);
667 if (ret)
668 return ret;
669
670 ofs += size;
671 buf += size;
672 (*retlen) += size;
673 len -= size;
674
675 if (ofs >> cfi->chipshift) {
676 chipnum ++;
677 ofs = 0;
678 if (chipnum == cfi->numchips)
679 return 0;
680 }
681 }
682
683 return 0;
684 }
685
686 /*
687 * Writev for ECC-Flashes is a little more complicated. We need to maintain
688 * a small buffer for this.
689 * XXX: If the buffer size is not a multiple of 2, this will break
690 */
691 #define ECCBUF_SIZE (mtd->eccsize)
692 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
693 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
694 static int
cfi_staa_writev(struct mtd_info * mtd,const struct iovec * vecs,unsigned long count,loff_t to,size_t * retlen)695 cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
696 unsigned long count, loff_t to, size_t *retlen)
697 {
698 unsigned long i;
699 size_t totlen = 0, thislen;
700 int ret = 0;
701 size_t buflen = 0;
702 static char *buffer;
703
704 if (!ECCBUF_SIZE) {
705 /* We should fall back to a general writev implementation.
706 * Until that is written, just break.
707 */
708 return -EIO;
709 }
710 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
711 if (!buffer)
712 return -ENOMEM;
713
714 for (i=0; i<count; i++) {
715 size_t elem_len = vecs[i].iov_len;
716 void *elem_base = vecs[i].iov_base;
717 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
718 continue;
719 if (buflen) { /* cut off head */
720 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
721 memcpy(buffer+buflen, elem_base, elem_len);
722 buflen += elem_len;
723 continue;
724 }
725 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
726 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
727 totlen += thislen;
728 if (ret || thislen != ECCBUF_SIZE)
729 goto write_error;
730 elem_len -= thislen-buflen;
731 elem_base += thislen-buflen;
732 to += ECCBUF_SIZE;
733 }
734 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
735 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
736 totlen += thislen;
737 if (ret || thislen != ECCBUF_DIV(elem_len))
738 goto write_error;
739 to += thislen;
740 }
741 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
742 if (buflen) {
743 memset(buffer, 0xff, ECCBUF_SIZE);
744 memcpy(buffer, elem_base + thislen, buflen);
745 }
746 }
747 if (buflen) { /* flush last page, even if not full */
748 /* This is sometimes intended behaviour, really */
749 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
750 totlen += thislen;
751 if (ret || thislen != ECCBUF_SIZE)
752 goto write_error;
753 }
754 write_error:
755 if (retlen)
756 *retlen = totlen;
757 return ret;
758 }
759
760
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)761 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
762 {
763 struct cfi_private *cfi = map->fldrv_priv;
764 __u32 status, status_OK;
765 unsigned long timeo;
766 int retries = 3;
767 DECLARE_WAITQUEUE(wait, current);
768 int ret = 0;
769
770 adr += chip->start;
771
772 /* Let's determine this according to the interleave only once */
773 status_OK = CMD(0x80);
774
775 timeo = jiffies + HZ;
776 retry:
777 spin_lock_bh(chip->mutex);
778
779 /* Check that the chip's ready to talk to us. */
780 switch (chip->state) {
781 case FL_CFI_QUERY:
782 case FL_JEDEC_QUERY:
783 case FL_READY:
784 cfi_write(map, CMD(0x70), adr);
785 chip->state = FL_STATUS;
786
787 case FL_STATUS:
788 status = cfi_read(map, adr);
789 if ((status & status_OK) == status_OK)
790 break;
791
792 /* Urgh. Chip not yet ready to talk to us. */
793 if (time_after(jiffies, timeo)) {
794 spin_unlock_bh(chip->mutex);
795 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
796 return -EIO;
797 }
798
799 /* Latency issues. Drop the lock, wait a while and retry */
800 spin_unlock_bh(chip->mutex);
801 cfi_udelay(1);
802 goto retry;
803
804 default:
805 /* Stick ourselves on a wait queue to be woken when
806 someone changes the status */
807 set_current_state(TASK_UNINTERRUPTIBLE);
808 add_wait_queue(&chip->wq, &wait);
809 spin_unlock_bh(chip->mutex);
810 schedule();
811 remove_wait_queue(&chip->wq, &wait);
812 timeo = jiffies + HZ;
813 goto retry;
814 }
815
816 ENABLE_VPP(map);
817 /* Clear the status register first */
818 cfi_write(map, CMD(0x50), adr);
819
820 /* Now erase */
821 cfi_write(map, CMD(0x20), adr);
822 cfi_write(map, CMD(0xD0), adr);
823 chip->state = FL_ERASING;
824
825 spin_unlock_bh(chip->mutex);
826 schedule_timeout(HZ);
827 spin_lock_bh(chip->mutex);
828
829 /* FIXME. Use a timer to check this, and return immediately. */
830 /* Once the state machine's known to be working I'll do that */
831
832 timeo = jiffies + (HZ*20);
833 for (;;) {
834 if (chip->state != FL_ERASING) {
835 /* Someone's suspended the erase. Sleep */
836 set_current_state(TASK_UNINTERRUPTIBLE);
837 add_wait_queue(&chip->wq, &wait);
838 spin_unlock_bh(chip->mutex);
839 schedule();
840 remove_wait_queue(&chip->wq, &wait);
841 timeo = jiffies + (HZ*20); /* FIXME */
842 spin_lock_bh(chip->mutex);
843 continue;
844 }
845
846 status = cfi_read(map, adr);
847 if ((status & status_OK) == status_OK)
848 break;
849
850 /* OK Still waiting */
851 if (time_after(jiffies, timeo)) {
852 cfi_write(map, CMD(0x70), adr);
853 chip->state = FL_STATUS;
854 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
855 DISABLE_VPP(map);
856 spin_unlock_bh(chip->mutex);
857 return -EIO;
858 }
859
860 /* Latency issues. Drop the lock, wait a while and retry */
861 spin_unlock_bh(chip->mutex);
862 cfi_udelay(1);
863 spin_lock_bh(chip->mutex);
864 }
865
866 DISABLE_VPP(map);
867 ret = 0;
868
869 /* We've broken this before. It doesn't hurt to be safe */
870 cfi_write(map, CMD(0x70), adr);
871 chip->state = FL_STATUS;
872 status = cfi_read(map, adr);
873
874 /* check for lock bit */
875 if (status & CMD(0x3a)) {
876 unsigned char chipstatus = status;
877 if (status != CMD(status & 0xff)) {
878 int i;
879 for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
880 chipstatus |= status >> (cfi->device_type * 8);
881 }
882 printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
883 }
884 /* Reset the error bits */
885 cfi_write(map, CMD(0x50), adr);
886 cfi_write(map, CMD(0x70), adr);
887
888 if ((chipstatus & 0x30) == 0x30) {
889 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
890 ret = -EIO;
891 } else if (chipstatus & 0x02) {
892 /* Protection bit set */
893 ret = -EROFS;
894 } else if (chipstatus & 0x8) {
895 /* Voltage */
896 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
897 ret = -EIO;
898 } else if (chipstatus & 0x20) {
899 if (retries--) {
900 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
901 timeo = jiffies + HZ;
902 chip->state = FL_STATUS;
903 spin_unlock_bh(chip->mutex);
904 goto retry;
905 }
906 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
907 ret = -EIO;
908 }
909 }
910
911 wake_up(&chip->wq);
912 spin_unlock_bh(chip->mutex);
913 return ret;
914 }
915
cfi_staa_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)916 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
917 { struct map_info *map = mtd->priv;
918 struct cfi_private *cfi = map->fldrv_priv;
919 unsigned long adr, len;
920 int chipnum, ret = 0;
921 int i, first;
922 struct mtd_erase_region_info *regions = mtd->eraseregions;
923
924 if (instr->addr > mtd->size)
925 return -EINVAL;
926
927 if ((instr->len + instr->addr) > mtd->size)
928 return -EINVAL;
929
930 /* Check that both start and end of the requested erase are
931 * aligned with the erasesize at the appropriate addresses.
932 */
933
934 i = 0;
935
936 /* Skip all erase regions which are ended before the start of
937 the requested erase. Actually, to save on the calculations,
938 we skip to the first erase region which starts after the
939 start of the requested erase, and then go back one.
940 */
941
942 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
943 i++;
944 i--;
945
946 /* OK, now i is pointing at the erase region in which this
947 erase request starts. Check the start of the requested
948 erase range is aligned with the erase size which is in
949 effect here.
950 */
951
952 if (instr->addr & (regions[i].erasesize-1))
953 return -EINVAL;
954
955 /* Remember the erase region we start on */
956 first = i;
957
958 /* Next, check that the end of the requested erase is aligned
959 * with the erase region at that address.
960 */
961
962 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
963 i++;
964
965 /* As before, drop back one to point at the region in which
966 the address actually falls
967 */
968 i--;
969
970 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
971 return -EINVAL;
972
973 chipnum = instr->addr >> cfi->chipshift;
974 adr = instr->addr - (chipnum << cfi->chipshift);
975 len = instr->len;
976
977 i=first;
978
979 while(len) {
980 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
981
982 if (ret)
983 return ret;
984
985 adr += regions[i].erasesize;
986 len -= regions[i].erasesize;
987
988 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
989 i++;
990
991 if (adr >> cfi->chipshift) {
992 adr = 0;
993 chipnum++;
994
995 if (chipnum >= cfi->numchips)
996 break;
997 }
998 }
999
1000 instr->state = MTD_ERASE_DONE;
1001 if (instr->callback)
1002 instr->callback(instr);
1003
1004 return 0;
1005 }
1006
cfi_staa_sync(struct mtd_info * mtd)1007 static void cfi_staa_sync (struct mtd_info *mtd)
1008 {
1009 struct map_info *map = mtd->priv;
1010 struct cfi_private *cfi = map->fldrv_priv;
1011 int i;
1012 struct flchip *chip;
1013 int ret = 0;
1014 DECLARE_WAITQUEUE(wait, current);
1015
1016 for (i=0; !ret && i<cfi->numchips; i++) {
1017 chip = &cfi->chips[i];
1018
1019 retry:
1020 spin_lock_bh(chip->mutex);
1021
1022 switch(chip->state) {
1023 case FL_READY:
1024 case FL_STATUS:
1025 case FL_CFI_QUERY:
1026 case FL_JEDEC_QUERY:
1027 chip->oldstate = chip->state;
1028 chip->state = FL_SYNCING;
1029 /* No need to wake_up() on this state change -
1030 * as the whole point is that nobody can do anything
1031 * with the chip now anyway.
1032 */
1033 case FL_SYNCING:
1034 spin_unlock_bh(chip->mutex);
1035 break;
1036
1037 default:
1038 /* Not an idle state */
1039 set_current_state(TASK_UNINTERRUPTIBLE);
1040 add_wait_queue(&chip->wq, &wait);
1041
1042 spin_unlock_bh(chip->mutex);
1043 schedule();
1044 remove_wait_queue(&chip->wq, &wait);
1045
1046 goto retry;
1047 }
1048 }
1049
1050 /* Unlock the chips again */
1051
1052 for (i--; i >=0; i--) {
1053 chip = &cfi->chips[i];
1054
1055 spin_lock_bh(chip->mutex);
1056
1057 if (chip->state == FL_SYNCING) {
1058 chip->state = chip->oldstate;
1059 wake_up(&chip->wq);
1060 }
1061 spin_unlock_bh(chip->mutex);
1062 }
1063 }
1064
do_lock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)1065 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1066 {
1067 struct cfi_private *cfi = map->fldrv_priv;
1068 __u32 status, status_OK;
1069 unsigned long timeo = jiffies + HZ;
1070 DECLARE_WAITQUEUE(wait, current);
1071
1072 adr += chip->start;
1073
1074 /* Let's determine this according to the interleave only once */
1075 status_OK = CMD(0x80);
1076
1077 timeo = jiffies + HZ;
1078 retry:
1079 spin_lock_bh(chip->mutex);
1080
1081 /* Check that the chip's ready to talk to us. */
1082 switch (chip->state) {
1083 case FL_CFI_QUERY:
1084 case FL_JEDEC_QUERY:
1085 case FL_READY:
1086 cfi_write(map, CMD(0x70), adr);
1087 chip->state = FL_STATUS;
1088
1089 case FL_STATUS:
1090 status = cfi_read(map, adr);
1091 if ((status & status_OK) == status_OK)
1092 break;
1093
1094 /* Urgh. Chip not yet ready to talk to us. */
1095 if (time_after(jiffies, timeo)) {
1096 spin_unlock_bh(chip->mutex);
1097 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1098 return -EIO;
1099 }
1100
1101 /* Latency issues. Drop the lock, wait a while and retry */
1102 spin_unlock_bh(chip->mutex);
1103 cfi_udelay(1);
1104 goto retry;
1105
1106 default:
1107 /* Stick ourselves on a wait queue to be woken when
1108 someone changes the status */
1109 set_current_state(TASK_UNINTERRUPTIBLE);
1110 add_wait_queue(&chip->wq, &wait);
1111 spin_unlock_bh(chip->mutex);
1112 schedule();
1113 remove_wait_queue(&chip->wq, &wait);
1114 timeo = jiffies + HZ;
1115 goto retry;
1116 }
1117
1118 ENABLE_VPP(map);
1119 cfi_write(map, CMD(0x60), adr);
1120 cfi_write(map, CMD(0x01), adr);
1121 chip->state = FL_LOCKING;
1122
1123 spin_unlock_bh(chip->mutex);
1124 schedule_timeout(HZ);
1125 spin_lock_bh(chip->mutex);
1126
1127 /* FIXME. Use a timer to check this, and return immediately. */
1128 /* Once the state machine's known to be working I'll do that */
1129
1130 timeo = jiffies + (HZ*2);
1131 for (;;) {
1132
1133 status = cfi_read(map, adr);
1134 if ((status & status_OK) == status_OK)
1135 break;
1136
1137 /* OK Still waiting */
1138 if (time_after(jiffies, timeo)) {
1139 cfi_write(map, CMD(0x70), adr);
1140 chip->state = FL_STATUS;
1141 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1142 DISABLE_VPP(map);
1143 spin_unlock_bh(chip->mutex);
1144 return -EIO;
1145 }
1146
1147 /* Latency issues. Drop the lock, wait a while and retry */
1148 spin_unlock_bh(chip->mutex);
1149 cfi_udelay(1);
1150 spin_lock_bh(chip->mutex);
1151 }
1152
1153 /* Done and happy. */
1154 chip->state = FL_STATUS;
1155 DISABLE_VPP(map);
1156 wake_up(&chip->wq);
1157 spin_unlock_bh(chip->mutex);
1158 return 0;
1159 }
cfi_staa_lock(struct mtd_info * mtd,loff_t ofs,size_t len)1160 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1161 {
1162 struct map_info *map = mtd->priv;
1163 struct cfi_private *cfi = map->fldrv_priv;
1164 unsigned long adr;
1165 int chipnum, ret = 0;
1166 #ifdef DEBUG_LOCK_BITS
1167 int ofs_factor = cfi->interleave * cfi->device_type;
1168 #endif
1169
1170 if (ofs & (mtd->erasesize - 1))
1171 return -EINVAL;
1172
1173 if (len & (mtd->erasesize -1))
1174 return -EINVAL;
1175
1176 if ((len + ofs) > mtd->size)
1177 return -EINVAL;
1178
1179 chipnum = ofs >> cfi->chipshift;
1180 adr = ofs - (chipnum << cfi->chipshift);
1181
1182 while(len) {
1183
1184 #ifdef DEBUG_LOCK_BITS
1185 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1186 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1187 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1188 #endif
1189
1190 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1191
1192 #ifdef DEBUG_LOCK_BITS
1193 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1194 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1195 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1196 #endif
1197
1198 if (ret)
1199 return ret;
1200
1201 adr += mtd->erasesize;
1202 len -= mtd->erasesize;
1203
1204 if (adr >> cfi->chipshift) {
1205 adr = 0;
1206 chipnum++;
1207
1208 if (chipnum >= cfi->numchips)
1209 break;
1210 }
1211 }
1212 return 0;
1213 }
do_unlock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)1214 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1215 {
1216 struct cfi_private *cfi = map->fldrv_priv;
1217 __u32 status, status_OK;
1218 unsigned long timeo = jiffies + HZ;
1219 DECLARE_WAITQUEUE(wait, current);
1220
1221 adr += chip->start;
1222
1223 /* Let's determine this according to the interleave only once */
1224 status_OK = CMD(0x80);
1225
1226 timeo = jiffies + HZ;
1227 retry:
1228 spin_lock_bh(chip->mutex);
1229
1230 /* Check that the chip's ready to talk to us. */
1231 switch (chip->state) {
1232 case FL_CFI_QUERY:
1233 case FL_JEDEC_QUERY:
1234 case FL_READY:
1235 cfi_write(map, CMD(0x70), adr);
1236 chip->state = FL_STATUS;
1237
1238 case FL_STATUS:
1239 status = cfi_read(map, adr);
1240 if ((status & status_OK) == status_OK)
1241 break;
1242
1243 /* Urgh. Chip not yet ready to talk to us. */
1244 if (time_after(jiffies, timeo)) {
1245 spin_unlock_bh(chip->mutex);
1246 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1247 return -EIO;
1248 }
1249
1250 /* Latency issues. Drop the lock, wait a while and retry */
1251 spin_unlock_bh(chip->mutex);
1252 cfi_udelay(1);
1253 goto retry;
1254
1255 default:
1256 /* Stick ourselves on a wait queue to be woken when
1257 someone changes the status */
1258 set_current_state(TASK_UNINTERRUPTIBLE);
1259 add_wait_queue(&chip->wq, &wait);
1260 spin_unlock_bh(chip->mutex);
1261 schedule();
1262 remove_wait_queue(&chip->wq, &wait);
1263 timeo = jiffies + HZ;
1264 goto retry;
1265 }
1266
1267 ENABLE_VPP(map);
1268 cfi_write(map, CMD(0x60), adr);
1269 cfi_write(map, CMD(0xD0), adr);
1270 chip->state = FL_UNLOCKING;
1271
1272 spin_unlock_bh(chip->mutex);
1273 schedule_timeout(HZ);
1274 spin_lock_bh(chip->mutex);
1275
1276 /* FIXME. Use a timer to check this, and return immediately. */
1277 /* Once the state machine's known to be working I'll do that */
1278
1279 timeo = jiffies + (HZ*2);
1280 for (;;) {
1281
1282 status = cfi_read(map, adr);
1283 if ((status & status_OK) == status_OK)
1284 break;
1285
1286 /* OK Still waiting */
1287 if (time_after(jiffies, timeo)) {
1288 cfi_write(map, CMD(0x70), adr);
1289 chip->state = FL_STATUS;
1290 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1291 DISABLE_VPP(map);
1292 spin_unlock_bh(chip->mutex);
1293 return -EIO;
1294 }
1295
1296 /* Latency issues. Drop the unlock, wait a while and retry */
1297 spin_unlock_bh(chip->mutex);
1298 cfi_udelay(1);
1299 spin_lock_bh(chip->mutex);
1300 }
1301
1302 /* Done and happy. */
1303 chip->state = FL_STATUS;
1304 DISABLE_VPP(map);
1305 wake_up(&chip->wq);
1306 spin_unlock_bh(chip->mutex);
1307 return 0;
1308 }
cfi_staa_unlock(struct mtd_info * mtd,loff_t ofs,size_t len)1309 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1310 {
1311 struct map_info *map = mtd->priv;
1312 struct cfi_private *cfi = map->fldrv_priv;
1313 unsigned long adr;
1314 int chipnum, ret = 0;
1315 #ifdef DEBUG_LOCK_BITS
1316 int ofs_factor = cfi->interleave * cfi->device_type;
1317 #endif
1318
1319 chipnum = ofs >> cfi->chipshift;
1320 adr = ofs - (chipnum << cfi->chipshift);
1321
1322 #ifdef DEBUG_LOCK_BITS
1323 {
1324 unsigned long temp_adr = adr;
1325 unsigned long temp_len = len;
1326
1327 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1328 while (temp_len) {
1329 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1330 temp_adr += mtd->erasesize;
1331 temp_len -= mtd->erasesize;
1332 }
1333 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1334 }
1335 #endif
1336
1337 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1338
1339 #ifdef DEBUG_LOCK_BITS
1340 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1341 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1342 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1343 #endif
1344
1345 return ret;
1346 }
1347
cfi_staa_suspend(struct mtd_info * mtd)1348 static int cfi_staa_suspend(struct mtd_info *mtd)
1349 {
1350 struct map_info *map = mtd->priv;
1351 struct cfi_private *cfi = map->fldrv_priv;
1352 int i;
1353 struct flchip *chip;
1354 int ret = 0;
1355
1356 for (i=0; !ret && i<cfi->numchips; i++) {
1357 chip = &cfi->chips[i];
1358
1359 spin_lock_bh(chip->mutex);
1360
1361 switch(chip->state) {
1362 case FL_READY:
1363 case FL_STATUS:
1364 case FL_CFI_QUERY:
1365 case FL_JEDEC_QUERY:
1366 chip->oldstate = chip->state;
1367 chip->state = FL_PM_SUSPENDED;
1368 /* No need to wake_up() on this state change -
1369 * as the whole point is that nobody can do anything
1370 * with the chip now anyway.
1371 */
1372 case FL_PM_SUSPENDED:
1373 break;
1374
1375 default:
1376 ret = -EAGAIN;
1377 break;
1378 }
1379 spin_unlock_bh(chip->mutex);
1380 }
1381
1382 /* Unlock the chips again */
1383
1384 if (ret) {
1385 for (i--; i >=0; i--) {
1386 chip = &cfi->chips[i];
1387
1388 spin_lock_bh(chip->mutex);
1389
1390 if (chip->state == FL_PM_SUSPENDED) {
1391 /* No need to force it into a known state here,
1392 because we're returning failure, and it didn't
1393 get power cycled */
1394 chip->state = chip->oldstate;
1395 wake_up(&chip->wq);
1396 }
1397 spin_unlock_bh(chip->mutex);
1398 }
1399 }
1400
1401 return ret;
1402 }
1403
cfi_staa_resume(struct mtd_info * mtd)1404 static void cfi_staa_resume(struct mtd_info *mtd)
1405 {
1406 struct map_info *map = mtd->priv;
1407 struct cfi_private *cfi = map->fldrv_priv;
1408 int i;
1409 struct flchip *chip;
1410
1411 for (i=0; i<cfi->numchips; i++) {
1412
1413 chip = &cfi->chips[i];
1414
1415 spin_lock_bh(chip->mutex);
1416
1417 /* Go to known state. Chip may have been power cycled */
1418 if (chip->state == FL_PM_SUSPENDED) {
1419 cfi_write(map, CMD(0xFF), 0);
1420 chip->state = FL_READY;
1421 wake_up(&chip->wq);
1422 }
1423
1424 spin_unlock_bh(chip->mutex);
1425 }
1426 }
1427
cfi_staa_destroy(struct mtd_info * mtd)1428 static void cfi_staa_destroy(struct mtd_info *mtd)
1429 {
1430 struct map_info *map = mtd->priv;
1431 struct cfi_private *cfi = map->fldrv_priv;
1432 kfree(cfi->cmdset_priv);
1433 kfree(cfi);
1434 }
1435
1436 #if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
1437 #define cfi_staa_init init_module
1438 #define cfi_staa_exit cleanup_module
1439 #endif
1440
1441 static char im_name[]="cfi_cmdset_0020";
1442
cfi_staa_init(void)1443 mod_init_t cfi_staa_init(void)
1444 {
1445 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1446 return 0;
1447 }
1448
cfi_staa_exit(void)1449 mod_exit_t cfi_staa_exit(void)
1450 {
1451 inter_module_unregister(im_name);
1452 }
1453
1454 module_init(cfi_staa_init);
1455 module_exit(cfi_staa_exit);
1456