1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 *
7 * 2_by_8 routines added by Simon Munton
8 *
9 * This code is GPL
10 *
11 * $Id: cfi_cmdset_0002.c,v 1.62 2003/01/24 23:30:13 dwmw2 Exp $
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <asm/io.h>
20 #include <asm/byteorder.h>
21
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/mtd/map.h>
27 #include <linux/mtd/cfi.h>
28
29 #define AMD_BOOTLOC_BUG
30
31 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32 static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
34 static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
35 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
36 static void cfi_amdstd_sync (struct mtd_info *);
37 static int cfi_amdstd_suspend (struct mtd_info *);
38 static void cfi_amdstd_resume (struct mtd_info *);
39 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
40
41 static void cfi_amdstd_destroy(struct mtd_info *);
42
43 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
44 static struct mtd_info *cfi_amdstd_setup (struct map_info *);
45
46
47 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
48 probe: NULL, /* Not usable directly */
49 destroy: cfi_amdstd_destroy,
50 name: "cfi_cmdset_0002",
51 module: THIS_MODULE
52 };
53
cfi_cmdset_0002(struct map_info * map,int primary)54 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
55 {
56 struct cfi_private *cfi = map->fldrv_priv;
57 unsigned char bootloc;
58 int ofs_factor = cfi->interleave * cfi->device_type;
59 int i;
60 __u8 major, minor;
61 __u32 base = cfi->chips[0].start;
62
63 if (cfi->cfi_mode==CFI_MODE_CFI){
64 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
65
66 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
67
68 major = cfi_read_query(map, base + (adr+3)*ofs_factor);
69 minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
70
71 printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
72 major, minor, adr);
73 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
74
75 cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
76 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
77 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
78 cfi->mfr = cfi_read_query(map, base);
79 cfi->id = cfi_read_query(map, base + ofs_factor);
80
81 /* Wheee. Bring me the head of someone at AMD. */
82 #ifdef AMD_BOOTLOC_BUG
83 if (((major << 8) | minor) < 0x3131) {
84 /* CFI version 1.0 => don't trust bootloc */
85 if (cfi->id & 0x80) {
86 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
87 bootloc = 3; /* top boot */
88 } else {
89 bootloc = 2; /* bottom boot */
90 }
91 } else
92 #endif
93 {
94 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
95 bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
96 }
97 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
98 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
99
100 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
101 int j = (cfi->cfiq->NumEraseRegions-1)-i;
102 __u32 swap;
103
104 swap = cfi->cfiq->EraseRegionInfo[i];
105 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
106 cfi->cfiq->EraseRegionInfo[j] = swap;
107 }
108 }
109 switch (cfi->device_type) {
110 case CFI_DEVICETYPE_X8:
111 cfi->addr_unlock1 = 0x555;
112 cfi->addr_unlock2 = 0x2aa;
113 break;
114 case CFI_DEVICETYPE_X16:
115 cfi->addr_unlock1 = 0xaaa;
116 if (map->buswidth == cfi->interleave) {
117 /* X16 chip(s) in X8 mode */
118 cfi->addr_unlock2 = 0x555;
119 } else {
120 cfi->addr_unlock2 = 0x554;
121 }
122 break;
123 case CFI_DEVICETYPE_X32:
124 cfi->addr_unlock1 = 0x1555;
125 cfi->addr_unlock2 = 0xaaa;
126 break;
127 default:
128 printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
129 return NULL;
130 }
131 } /* CFI mode */
132
133 for (i=0; i< cfi->numchips; i++) {
134 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
135 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
136 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
137 }
138
139 map->fldrv = &cfi_amdstd_chipdrv;
140
141 cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
142 return cfi_amdstd_setup(map);
143 }
144
cfi_amdstd_setup(struct map_info * map)145 static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
146 {
147 struct cfi_private *cfi = map->fldrv_priv;
148 struct mtd_info *mtd;
149 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
150
151 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
152 printk(KERN_NOTICE "number of %s chips: %d\n",
153 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
154
155 if (!mtd) {
156 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
157 goto setup_err;
158 }
159
160 memset(mtd, 0, sizeof(*mtd));
161 mtd->priv = map;
162 mtd->type = MTD_NORFLASH;
163 /* Also select the correct geometry setup too */
164 mtd->size = devsize * cfi->numchips;
165
166 if (cfi->cfiq->NumEraseRegions == 1) {
167 /* No need to muck about with multiple erase sizes */
168 mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
169 } else {
170 unsigned long offset = 0;
171 int i,j;
172
173 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
174 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
175 if (!mtd->eraseregions) {
176 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
177 goto setup_err;
178 }
179
180 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
181 unsigned long ernum, ersize;
182 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
183 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
184
185 if (mtd->erasesize < ersize) {
186 mtd->erasesize = ersize;
187 }
188 for (j=0; j<cfi->numchips; j++) {
189 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
190 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
191 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
192 }
193 offset += (ersize * ernum);
194 }
195 if (offset != devsize) {
196 /* Argh */
197 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
198 goto setup_err;
199 }
200 #if 0
201 // debug
202 for (i=0; i<mtd->numeraseregions;i++){
203 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
204 i,mtd->eraseregions[i].offset,
205 mtd->eraseregions[i].erasesize,
206 mtd->eraseregions[i].numblocks);
207 }
208 #endif
209 }
210
211 switch (CFIDEV_BUSWIDTH)
212 {
213 case 1:
214 case 2:
215 case 4:
216 #if 1
217 if (mtd->numeraseregions > 1)
218 mtd->erase = cfi_amdstd_erase_varsize;
219 else
220 #endif
221 if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1)
222 mtd->erase = cfi_amdstd_erase_chip;
223 else
224 mtd->erase = cfi_amdstd_erase_onesize;
225 mtd->read = cfi_amdstd_read;
226 mtd->write = cfi_amdstd_write;
227 break;
228
229 default:
230 printk(KERN_WARNING "Unsupported buswidth\n");
231 goto setup_err;
232 break;
233 }
234 if (cfi->fast_prog) {
235 /* In cfi_amdstd_write() we frob the protection stuff
236 without paying any attention to the state machine.
237 This upsets in-progress erases. So we turn this flag
238 off for now till the code gets fixed. */
239 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n");
240 cfi->fast_prog = 0;
241 }
242
243
244 /* does this chip have a secsi area? */
245 if(cfi->mfr==1){
246
247 switch(cfi->id){
248 case 0x50:
249 case 0x53:
250 case 0x55:
251 case 0x56:
252 case 0x5C:
253 case 0x5F:
254 /* Yes */
255 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
256 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
257 default:
258 ;
259 }
260 }
261
262
263 mtd->sync = cfi_amdstd_sync;
264 mtd->suspend = cfi_amdstd_suspend;
265 mtd->resume = cfi_amdstd_resume;
266 mtd->flags = MTD_CAP_NORFLASH;
267 map->fldrv = &cfi_amdstd_chipdrv;
268 mtd->name = map->name;
269 MOD_INC_USE_COUNT;
270 return mtd;
271
272 setup_err:
273 if(mtd) {
274 if(mtd->eraseregions)
275 kfree(mtd->eraseregions);
276 kfree(mtd);
277 }
278 kfree(cfi->cmdset_priv);
279 kfree(cfi->cfiq);
280 return NULL;
281 }
282
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)283 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
284 {
285 DECLARE_WAITQUEUE(wait, current);
286 unsigned long timeo = jiffies + HZ;
287
288 retry:
289 cfi_spin_lock(chip->mutex);
290
291 if (chip->state != FL_READY){
292 #if 0
293 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
294 #endif
295 set_current_state(TASK_UNINTERRUPTIBLE);
296 add_wait_queue(&chip->wq, &wait);
297
298 cfi_spin_unlock(chip->mutex);
299
300 schedule();
301 remove_wait_queue(&chip->wq, &wait);
302 #if 0
303 if(signal_pending(current))
304 return -EINTR;
305 #endif
306 timeo = jiffies + HZ;
307
308 goto retry;
309 }
310
311 adr += chip->start;
312
313 chip->state = FL_READY;
314
315 map->copy_from(map, buf, adr, len);
316
317 wake_up(&chip->wq);
318 cfi_spin_unlock(chip->mutex);
319
320 return 0;
321 }
322
cfi_amdstd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)323 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
324 {
325 struct map_info *map = mtd->priv;
326 struct cfi_private *cfi = map->fldrv_priv;
327 unsigned long ofs;
328 int chipnum;
329 int ret = 0;
330
331 /* ofs: offset within the first chip that the first read should start */
332
333 chipnum = (from >> cfi->chipshift);
334 ofs = from - (chipnum << cfi->chipshift);
335
336
337 *retlen = 0;
338
339 while (len) {
340 unsigned long thislen;
341
342 if (chipnum >= cfi->numchips)
343 break;
344
345 if ((len + ofs -1) >> cfi->chipshift)
346 thislen = (1<<cfi->chipshift) - ofs;
347 else
348 thislen = len;
349
350 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
351 if (ret)
352 break;
353
354 *retlen += thislen;
355 len -= thislen;
356 buf += thislen;
357
358 ofs = 0;
359 chipnum++;
360 }
361 return ret;
362 }
363
do_read_secsi_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)364 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
365 {
366 DECLARE_WAITQUEUE(wait, current);
367 unsigned long timeo = jiffies + HZ;
368 struct cfi_private *cfi = map->fldrv_priv;
369
370 retry:
371 cfi_spin_lock(chip->mutex);
372
373 if (chip->state != FL_READY){
374 #if 0
375 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
376 #endif
377 set_current_state(TASK_UNINTERRUPTIBLE);
378 add_wait_queue(&chip->wq, &wait);
379
380 cfi_spin_unlock(chip->mutex);
381
382 schedule();
383 remove_wait_queue(&chip->wq, &wait);
384 #if 0
385 if(signal_pending(current))
386 return -EINTR;
387 #endif
388 timeo = jiffies + HZ;
389
390 goto retry;
391 }
392
393 adr += chip->start;
394
395 chip->state = FL_READY;
396
397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
399 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
400
401 map->copy_from(map, buf, adr, len);
402
403 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
404 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
405 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
406 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
407
408 wake_up(&chip->wq);
409 cfi_spin_unlock(chip->mutex);
410
411 return 0;
412 }
413
cfi_amdstd_secsi_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)414 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
415 {
416 struct map_info *map = mtd->priv;
417 struct cfi_private *cfi = map->fldrv_priv;
418 unsigned long ofs;
419 int chipnum;
420 int ret = 0;
421
422
423 /* ofs: offset within the first chip that the first read should start */
424
425 /* 8 secsi bytes per chip */
426 chipnum=from>>3;
427 ofs=from & 7;
428
429
430 *retlen = 0;
431
432 while (len) {
433 unsigned long thislen;
434
435 if (chipnum >= cfi->numchips)
436 break;
437
438 if ((len + ofs -1) >> 3)
439 thislen = (1<<3) - ofs;
440 else
441 thislen = len;
442
443 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
444 if (ret)
445 break;
446
447 *retlen += thislen;
448 len -= thislen;
449 buf += thislen;
450
451 ofs = 0;
452 chipnum++;
453 }
454 return ret;
455 }
456
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,__u32 datum,int fast)457 static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
458 {
459 unsigned long timeo = jiffies + HZ;
460 unsigned int oldstatus, status;
461 unsigned int dq6, dq5;
462 struct cfi_private *cfi = map->fldrv_priv;
463 DECLARE_WAITQUEUE(wait, current);
464
465 retry:
466 cfi_spin_lock(chip->mutex);
467
468 if (chip->state != FL_READY) {
469 #if 0
470 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
471 #endif
472 set_current_state(TASK_UNINTERRUPTIBLE);
473 add_wait_queue(&chip->wq, &wait);
474
475 cfi_spin_unlock(chip->mutex);
476
477 schedule();
478 remove_wait_queue(&chip->wq, &wait);
479 #if 0
480 printk(KERN_DEBUG "Wake up to write:\n");
481 if(signal_pending(current))
482 return -EINTR;
483 #endif
484 timeo = jiffies + HZ;
485
486 goto retry;
487 }
488
489 chip->state = FL_WRITING;
490
491 adr += chip->start;
492 ENABLE_VPP(map);
493 if (fast) { /* Unlock bypass */
494 cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
495 }
496 else {
497 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
498 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
499 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
500 }
501
502 cfi_write(map, datum, adr);
503
504 cfi_spin_unlock(chip->mutex);
505 cfi_udelay(chip->word_write_time);
506 cfi_spin_lock(chip->mutex);
507
508 /* Polling toggle bits instead of reading back many times
509 This ensures that write operation is really completed,
510 or tells us why it failed. */
511 dq6 = CMD(1<<6);
512 dq5 = CMD(1<<5);
513 timeo = jiffies + (HZ/1000) + 1; /* setting timeout to 1ms for now */
514
515 oldstatus = cfi_read(map, adr);
516 status = cfi_read(map, adr);
517
518 while( (status & dq6) != (oldstatus & dq6) &&
519 (status & dq5) != dq5 &&
520 !time_after(jiffies, timeo) ) {
521
522 if (need_resched()) {
523 cfi_spin_unlock(chip->mutex);
524 yield();
525 cfi_spin_lock(chip->mutex);
526 } else
527 udelay(1);
528
529 oldstatus = cfi_read( map, adr );
530 status = cfi_read( map, adr );
531 }
532
533 if( (status & dq6) != (oldstatus & dq6) ) {
534 /* The erasing didn't stop?? */
535 if( (status & dq5) == dq5 ) {
536 /* When DQ5 raises, we must check once again
537 if DQ6 is toggling. If not, the erase has been
538 completed OK. If not, reset chip. */
539 oldstatus = cfi_read(map, adr);
540 status = cfi_read(map, adr);
541
542 if ( (oldstatus & 0x00FF) == (status & 0x00FF) ) {
543 printk(KERN_WARNING "Warning: DQ5 raised while program operation was in progress, however operation completed OK\n" );
544 } else {
545 /* DQ5 is active so we can do a reset and stop the erase */
546 cfi_write(map, CMD(0xF0), chip->start);
547 printk(KERN_WARNING "Internal flash device timeout occurred or write operation was performed while flash was programming.\n" );
548 }
549 } else {
550 printk(KERN_WARNING "Waiting for write to complete timed out in do_write_oneword.\n");
551
552 chip->state = FL_READY;
553 wake_up(&chip->wq);
554 cfi_spin_unlock(chip->mutex);
555 DISABLE_VPP(map);
556 return -EIO;
557 }
558 }
559
560 DISABLE_VPP(map);
561 chip->state = FL_READY;
562 wake_up(&chip->wq);
563 cfi_spin_unlock(chip->mutex);
564
565 return 0;
566 }
567
cfi_amdstd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)568 static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
569 {
570 struct map_info *map = mtd->priv;
571 struct cfi_private *cfi = map->fldrv_priv;
572 int ret = 0;
573 int chipnum;
574 unsigned long ofs, chipstart;
575
576 *retlen = 0;
577 if (!len)
578 return 0;
579
580 chipnum = to >> cfi->chipshift;
581 ofs = to - (chipnum << cfi->chipshift);
582 chipstart = cfi->chips[chipnum].start;
583
584 /* If it's not bus-aligned, do the first byte write */
585 if (ofs & (CFIDEV_BUSWIDTH-1)) {
586 unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
587 int i = ofs - bus_ofs;
588 int n = 0;
589 u_char tmp_buf[4];
590 __u32 datum;
591
592 map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
593 while (len && i < CFIDEV_BUSWIDTH)
594 tmp_buf[i++] = buf[n++], len--;
595
596 if (cfi_buswidth_is_2()) {
597 datum = *(__u16*)tmp_buf;
598 } else if (cfi_buswidth_is_4()) {
599 datum = *(__u32*)tmp_buf;
600 } else {
601 return -EINVAL; /* should never happen, but be safe */
602 }
603
604 ret = do_write_oneword(map, &cfi->chips[chipnum],
605 bus_ofs, datum, 0);
606 if (ret)
607 return ret;
608
609 ofs += n;
610 buf += n;
611 (*retlen) += n;
612
613 if (ofs >> cfi->chipshift) {
614 chipnum ++;
615 ofs = 0;
616 if (chipnum == cfi->numchips)
617 return 0;
618 }
619 }
620
621 if (cfi->fast_prog) {
622 /* Go into unlock bypass mode */
623 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
624 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
625 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
626 }
627
628 /* We are now aligned, write as much as possible */
629 while(len >= CFIDEV_BUSWIDTH) {
630 __u32 datum;
631
632 if (cfi_buswidth_is_1()) {
633 datum = *(__u8*)buf;
634 } else if (cfi_buswidth_is_2()) {
635 datum = *(__u16*)buf;
636 } else if (cfi_buswidth_is_4()) {
637 datum = *(__u32*)buf;
638 } else {
639 return -EINVAL;
640 }
641 ret = do_write_oneword(map, &cfi->chips[chipnum],
642 ofs, datum, cfi->fast_prog);
643 if (ret) {
644 if (cfi->fast_prog){
645 /* Get out of unlock bypass mode */
646 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
647 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
648 }
649 return ret;
650 }
651
652 ofs += CFIDEV_BUSWIDTH;
653 buf += CFIDEV_BUSWIDTH;
654 (*retlen) += CFIDEV_BUSWIDTH;
655 len -= CFIDEV_BUSWIDTH;
656
657 if (ofs >> cfi->chipshift) {
658 if (cfi->fast_prog){
659 /* Get out of unlock bypass mode */
660 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
661 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
662 }
663
664 chipnum ++;
665 ofs = 0;
666 if (chipnum == cfi->numchips)
667 return 0;
668 chipstart = cfi->chips[chipnum].start;
669 if (cfi->fast_prog){
670 /* Go into unlock bypass mode for next set of chips */
671 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
672 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
673 cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
674 }
675 }
676 }
677
678 if (cfi->fast_prog){
679 /* Get out of unlock bypass mode */
680 cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
681 cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
682 }
683
684 /* Write the trailing bytes if any */
685 if (len & (CFIDEV_BUSWIDTH-1)) {
686 int i = 0, n = 0;
687 u_char tmp_buf[4];
688 __u32 datum;
689
690 map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
691 while (len--)
692 tmp_buf[i++] = buf[n++];
693
694 if (cfi_buswidth_is_2()) {
695 datum = *(__u16*)tmp_buf;
696 } else if (cfi_buswidth_is_4()) {
697 datum = *(__u32*)tmp_buf;
698 } else {
699 return -EINVAL; /* should never happen, but be safe */
700 }
701
702 ret = do_write_oneword(map, &cfi->chips[chipnum],
703 ofs, datum, 0);
704 if (ret)
705 return ret;
706
707 (*retlen) += n;
708 }
709
710 return 0;
711 }
712
do_erase_chip(struct map_info * map,struct flchip * chip)713 static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
714 {
715 unsigned int oldstatus, status;
716 unsigned int dq6, dq5;
717 unsigned long timeo = jiffies + HZ;
718 unsigned int adr;
719 struct cfi_private *cfi = map->fldrv_priv;
720 DECLARE_WAITQUEUE(wait, current);
721
722 retry:
723 cfi_spin_lock(chip->mutex);
724
725 if (chip->state != FL_READY){
726 set_current_state(TASK_UNINTERRUPTIBLE);
727 add_wait_queue(&chip->wq, &wait);
728
729 cfi_spin_unlock(chip->mutex);
730
731 schedule();
732 remove_wait_queue(&chip->wq, &wait);
733 #if 0
734 if(signal_pending(current))
735 return -EINTR;
736 #endif
737 timeo = jiffies + HZ;
738
739 goto retry;
740 }
741
742 chip->state = FL_ERASING;
743
744 /* Handle devices with one erase region, that only implement
745 * the chip erase command.
746 */
747 ENABLE_VPP(map);
748 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
749 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
750 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
751 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
752 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
753 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
754 timeo = jiffies + (HZ*20);
755 adr = cfi->addr_unlock1;
756
757 /* Wait for the end of programing/erasure by using the toggle method.
758 * As long as there is a programming procedure going on, bit 6 of the last
759 * written byte is toggling it's state with each consectuve read.
760 * The toggling stops as soon as the procedure is completed.
761 *
762 * If the process has gone on for too long on the chip bit 5 gets.
763 * After bit5 is set you can kill the operation by sending a reset
764 * command to the chip.
765 */
766 dq6 = CMD(1<<6);
767 dq5 = CMD(1<<5);
768
769 oldstatus = cfi_read(map, adr);
770 status = cfi_read(map, adr);
771 while( ((status & dq6) != (oldstatus & dq6)) &&
772 ((status & dq5) != dq5) &&
773 !time_after(jiffies, timeo)) {
774 int wait_reps;
775
776 /* an initial short sleep */
777 cfi_spin_unlock(chip->mutex);
778 schedule_timeout(HZ/100);
779 cfi_spin_lock(chip->mutex);
780
781 if (chip->state != FL_ERASING) {
782 /* Someone's suspended the erase. Sleep */
783 set_current_state(TASK_UNINTERRUPTIBLE);
784 add_wait_queue(&chip->wq, &wait);
785
786 cfi_spin_unlock(chip->mutex);
787 printk("erase suspended. Sleeping\n");
788
789 schedule();
790 remove_wait_queue(&chip->wq, &wait);
791 #if 0
792 if (signal_pending(current))
793 return -EINTR;
794 #endif
795 timeo = jiffies + (HZ*2); /* FIXME */
796 cfi_spin_lock(chip->mutex);
797 continue;
798 }
799
800 /* Busy wait for 1/10 of a milisecond */
801 for(wait_reps = 0;
802 (wait_reps < 100) &&
803 ((status & dq6) != (oldstatus & dq6)) &&
804 ((status & dq5) != dq5);
805 wait_reps++) {
806
807 /* Latency issues. Drop the lock, wait a while and retry */
808 cfi_spin_unlock(chip->mutex);
809
810 cfi_udelay(1);
811
812 cfi_spin_lock(chip->mutex);
813 oldstatus = cfi_read(map, adr);
814 status = cfi_read(map, adr);
815 }
816 oldstatus = cfi_read(map, adr);
817 status = cfi_read(map, adr);
818 }
819 if ((status & dq6) != (oldstatus & dq6)) {
820 /* The erasing didn't stop?? */
821 if ((status & dq5) == dq5) {
822 /* dq5 is active so we can do a reset and stop the erase */
823 cfi_write(map, CMD(0xF0), chip->start);
824 }
825 chip->state = FL_READY;
826 wake_up(&chip->wq);
827 cfi_spin_unlock(chip->mutex);
828 printk("waiting for erase to complete timed out.\n");
829 DISABLE_VPP(map);
830 return -EIO;
831 }
832 DISABLE_VPP(map);
833 chip->state = FL_READY;
834 wake_up(&chip->wq);
835 cfi_spin_unlock(chip->mutex);
836
837 return 0;
838 }
839
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)840 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
841 {
842 unsigned int oldstatus, status;
843 unsigned int dq6, dq5;
844 unsigned long timeo = jiffies + HZ;
845 struct cfi_private *cfi = map->fldrv_priv;
846 DECLARE_WAITQUEUE(wait, current);
847
848 retry:
849 cfi_spin_lock(chip->mutex);
850
851 if (chip->state != FL_READY){
852 set_current_state(TASK_UNINTERRUPTIBLE);
853 add_wait_queue(&chip->wq, &wait);
854
855 cfi_spin_unlock(chip->mutex);
856
857 schedule();
858 remove_wait_queue(&chip->wq, &wait);
859 #if 0
860 if(signal_pending(current))
861 return -EINTR;
862 #endif
863 timeo = jiffies + HZ;
864
865 goto retry;
866 }
867
868 chip->state = FL_ERASING;
869
870 adr += chip->start;
871 ENABLE_VPP(map);
872 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
873 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
874 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
875 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
876 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
877 cfi_write(map, CMD(0x30), adr);
878
879 timeo = jiffies + (HZ*20);
880
881 /* Wait for the end of programing/erasure by using the toggle method.
882 * As long as there is a programming procedure going on, bit 6 of the last
883 * written byte is toggling it's state with each consectuve read.
884 * The toggling stops as soon as the procedure is completed.
885 *
886 * If the process has gone on for too long on the chip bit 5 gets.
887 * After bit5 is set you can kill the operation by sending a reset
888 * command to the chip.
889 */
890 dq6 = CMD(1<<6);
891 dq5 = CMD(1<<5);
892
893 oldstatus = cfi_read(map, adr);
894 status = cfi_read(map, adr);
895 while( ((status & dq6) != (oldstatus & dq6)) &&
896 ((status & dq5) != dq5) &&
897 !time_after(jiffies, timeo)) {
898 int wait_reps;
899
900 /* an initial short sleep */
901 cfi_spin_unlock(chip->mutex);
902 schedule_timeout(HZ/100);
903 cfi_spin_lock(chip->mutex);
904
905 if (chip->state != FL_ERASING) {
906 /* Someone's suspended the erase. Sleep */
907 set_current_state(TASK_UNINTERRUPTIBLE);
908 add_wait_queue(&chip->wq, &wait);
909
910 cfi_spin_unlock(chip->mutex);
911 printk(KERN_DEBUG "erase suspended. Sleeping\n");
912
913 schedule();
914 remove_wait_queue(&chip->wq, &wait);
915 #if 0
916 if (signal_pending(current))
917 return -EINTR;
918 #endif
919 timeo = jiffies + (HZ*2); /* FIXME */
920 cfi_spin_lock(chip->mutex);
921 continue;
922 }
923
924 /* Busy wait for 1/10 of a milisecond */
925 for(wait_reps = 0;
926 (wait_reps < 100) &&
927 ((status & dq6) != (oldstatus & dq6)) &&
928 ((status & dq5) != dq5);
929 wait_reps++) {
930
931 /* Latency issues. Drop the lock, wait a while and retry */
932 cfi_spin_unlock(chip->mutex);
933
934 cfi_udelay(1);
935
936 cfi_spin_lock(chip->mutex);
937 oldstatus = cfi_read(map, adr);
938 status = cfi_read(map, adr);
939 }
940 oldstatus = cfi_read(map, adr);
941 status = cfi_read(map, adr);
942 }
943 if( (status & dq6) != (oldstatus & dq6) )
944 {
945 /* The erasing didn't stop?? */
946 if( ( status & dq5 ) == dq5 )
947 {
948 /* When DQ5 raises, we must check once again if DQ6 is toggling.
949 If not, the erase has been completed OK. If not, reset chip. */
950 oldstatus = cfi_read( map, adr );
951 status = cfi_read( map, adr );
952
953 if( ( oldstatus & 0x00FF ) == ( status & 0x00FF ) )
954 {
955 printk( "Warning: DQ5 raised while erase operation was in progress, but erase completed OK\n" );
956 }
957 else
958 {
959 /* DQ5 is active so we can do a reset and stop the erase */
960 cfi_write(map, CMD(0xF0), chip->start);
961 printk( KERN_WARNING "Internal flash device timeout occured or write operation was performed while flash was erasing\n" );
962 }
963 }
964 else
965 {
966 printk( "Waiting for erase to complete timed out in do_erase_oneblock.\n");
967
968 chip->state = FL_READY;
969 wake_up(&chip->wq);
970 cfi_spin_unlock(chip->mutex);
971 DISABLE_VPP(map);
972 return -EIO;
973 }
974 }
975
976 DISABLE_VPP(map);
977 chip->state = FL_READY;
978 wake_up(&chip->wq);
979 cfi_spin_unlock(chip->mutex);
980 return 0;
981 }
982
cfi_amdstd_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)983 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
984 {
985 struct map_info *map = mtd->priv;
986 struct cfi_private *cfi = map->fldrv_priv;
987 unsigned long adr, len;
988 int chipnum, ret = 0;
989 int i, first;
990 struct mtd_erase_region_info *regions = mtd->eraseregions;
991
992 if (instr->addr > mtd->size)
993 return -EINVAL;
994
995 if ((instr->len + instr->addr) > mtd->size)
996 return -EINVAL;
997
998 /* Check that both start and end of the requested erase are
999 * aligned with the erasesize at the appropriate addresses.
1000 */
1001
1002 i = 0;
1003
1004 /* Skip all erase regions which are ended before the start of
1005 the requested erase. Actually, to save on the calculations,
1006 we skip to the first erase region which starts after the
1007 start of the requested erase, and then go back one.
1008 */
1009
1010 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
1011 i++;
1012 i--;
1013
1014 /* OK, now i is pointing at the erase region in which this
1015 erase request starts. Check the start of the requested
1016 erase range is aligned with the erase size which is in
1017 effect here.
1018 */
1019
1020 if (instr->addr & (regions[i].erasesize-1))
1021 return -EINVAL;
1022
1023 /* Remember the erase region we start on */
1024 first = i;
1025
1026 /* Next, check that the end of the requested erase is aligned
1027 * with the erase region at that address.
1028 */
1029
1030 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
1031 i++;
1032
1033 /* As before, drop back one to point at the region in which
1034 the address actually falls
1035 */
1036 i--;
1037
1038 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
1039 return -EINVAL;
1040
1041 chipnum = instr->addr >> cfi->chipshift;
1042 adr = instr->addr - (chipnum << cfi->chipshift);
1043 len = instr->len;
1044
1045 i=first;
1046
1047 while(len) {
1048 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1049
1050 if (ret)
1051 return ret;
1052
1053 adr += regions[i].erasesize;
1054 len -= regions[i].erasesize;
1055
1056 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1057 i++;
1058
1059 if (adr >> cfi->chipshift) {
1060 adr = 0;
1061 chipnum++;
1062
1063 if (chipnum >= cfi->numchips)
1064 break;
1065 }
1066 }
1067
1068 instr->state = MTD_ERASE_DONE;
1069 if (instr->callback)
1070 instr->callback(instr);
1071
1072 return 0;
1073 }
1074
cfi_amdstd_erase_onesize(struct mtd_info * mtd,struct erase_info * instr)1075 static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
1076 {
1077 struct map_info *map = mtd->priv;
1078 struct cfi_private *cfi = map->fldrv_priv;
1079 unsigned long adr, len;
1080 int chipnum, ret = 0;
1081
1082 if (instr->addr & (mtd->erasesize - 1))
1083 return -EINVAL;
1084
1085 if (instr->len & (mtd->erasesize -1))
1086 return -EINVAL;
1087
1088 if ((instr->len + instr->addr) > mtd->size)
1089 return -EINVAL;
1090
1091 chipnum = instr->addr >> cfi->chipshift;
1092 adr = instr->addr - (chipnum << cfi->chipshift);
1093 len = instr->len;
1094
1095 while(len) {
1096 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1097
1098 if (ret)
1099 return ret;
1100
1101 adr += mtd->erasesize;
1102 len -= mtd->erasesize;
1103
1104 if (adr >> cfi->chipshift) {
1105 adr = 0;
1106 chipnum++;
1107
1108 if (chipnum >= cfi->numchips)
1109 break;
1110 }
1111 }
1112
1113 instr->state = MTD_ERASE_DONE;
1114 if (instr->callback)
1115 instr->callback(instr);
1116
1117 return 0;
1118 }
1119
cfi_amdstd_erase_chip(struct mtd_info * mtd,struct erase_info * instr)1120 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1121 {
1122 struct map_info *map = mtd->priv;
1123 struct cfi_private *cfi = map->fldrv_priv;
1124 int ret = 0;
1125
1126 if (instr->addr != 0)
1127 return -EINVAL;
1128
1129 if (instr->len != mtd->size)
1130 return -EINVAL;
1131
1132 ret = do_erase_chip(map, &cfi->chips[0]);
1133 if (ret)
1134 return ret;
1135
1136 instr->state = MTD_ERASE_DONE;
1137 if (instr->callback)
1138 instr->callback(instr);
1139
1140 return 0;
1141 }
1142
cfi_amdstd_sync(struct mtd_info * mtd)1143 static void cfi_amdstd_sync (struct mtd_info *mtd)
1144 {
1145 struct map_info *map = mtd->priv;
1146 struct cfi_private *cfi = map->fldrv_priv;
1147 int i;
1148 struct flchip *chip;
1149 int ret = 0;
1150 DECLARE_WAITQUEUE(wait, current);
1151
1152 for (i=0; !ret && i<cfi->numchips; i++) {
1153 chip = &cfi->chips[i];
1154
1155 retry:
1156 cfi_spin_lock(chip->mutex);
1157
1158 switch(chip->state) {
1159 case FL_READY:
1160 case FL_STATUS:
1161 case FL_CFI_QUERY:
1162 case FL_JEDEC_QUERY:
1163 chip->oldstate = chip->state;
1164 chip->state = FL_SYNCING;
1165 /* No need to wake_up() on this state change -
1166 * as the whole point is that nobody can do anything
1167 * with the chip now anyway.
1168 */
1169 case FL_SYNCING:
1170 cfi_spin_unlock(chip->mutex);
1171 break;
1172
1173 default:
1174 /* Not an idle state */
1175 set_current_state(TASK_UNINTERRUPTIBLE);
1176 add_wait_queue(&chip->wq, &wait);
1177
1178 cfi_spin_unlock(chip->mutex);
1179
1180 schedule();
1181
1182 remove_wait_queue(&chip->wq, &wait);
1183
1184 goto retry;
1185 }
1186 }
1187
1188 /* Unlock the chips again */
1189
1190 for (i--; i >=0; i--) {
1191 chip = &cfi->chips[i];
1192
1193 cfi_spin_lock(chip->mutex);
1194
1195 if (chip->state == FL_SYNCING) {
1196 chip->state = chip->oldstate;
1197 wake_up(&chip->wq);
1198 }
1199 cfi_spin_unlock(chip->mutex);
1200 }
1201 }
1202
1203
cfi_amdstd_suspend(struct mtd_info * mtd)1204 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1205 {
1206 struct map_info *map = mtd->priv;
1207 struct cfi_private *cfi = map->fldrv_priv;
1208 int i;
1209 struct flchip *chip;
1210 int ret = 0;
1211
1212 for (i=0; !ret && i<cfi->numchips; i++) {
1213 chip = &cfi->chips[i];
1214
1215 cfi_spin_lock(chip->mutex);
1216
1217 switch(chip->state) {
1218 case FL_READY:
1219 case FL_STATUS:
1220 case FL_CFI_QUERY:
1221 case FL_JEDEC_QUERY:
1222 chip->oldstate = chip->state;
1223 chip->state = FL_PM_SUSPENDED;
1224 /* No need to wake_up() on this state change -
1225 * as the whole point is that nobody can do anything
1226 * with the chip now anyway.
1227 */
1228 case FL_PM_SUSPENDED:
1229 break;
1230
1231 default:
1232 ret = -EAGAIN;
1233 break;
1234 }
1235 cfi_spin_unlock(chip->mutex);
1236 }
1237
1238 /* Unlock the chips again */
1239
1240 if (ret) {
1241 for (i--; i >=0; i--) {
1242 chip = &cfi->chips[i];
1243
1244 cfi_spin_lock(chip->mutex);
1245
1246 if (chip->state == FL_PM_SUSPENDED) {
1247 chip->state = chip->oldstate;
1248 wake_up(&chip->wq);
1249 }
1250 cfi_spin_unlock(chip->mutex);
1251 }
1252 }
1253
1254 return ret;
1255 }
1256
cfi_amdstd_resume(struct mtd_info * mtd)1257 static void cfi_amdstd_resume(struct mtd_info *mtd)
1258 {
1259 struct map_info *map = mtd->priv;
1260 struct cfi_private *cfi = map->fldrv_priv;
1261 int i;
1262 struct flchip *chip;
1263
1264 for (i=0; i<cfi->numchips; i++) {
1265
1266 chip = &cfi->chips[i];
1267
1268 cfi_spin_lock(chip->mutex);
1269
1270 if (chip->state == FL_PM_SUSPENDED) {
1271 chip->state = FL_READY;
1272 cfi_write(map, CMD(0xF0), chip->start);
1273 wake_up(&chip->wq);
1274 }
1275 else
1276 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1277
1278 cfi_spin_unlock(chip->mutex);
1279 }
1280 }
1281
cfi_amdstd_destroy(struct mtd_info * mtd)1282 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1283 {
1284 struct map_info *map = mtd->priv;
1285 struct cfi_private *cfi = map->fldrv_priv;
1286 kfree(cfi->cmdset_priv);
1287 kfree(cfi->cfiq);
1288 kfree(cfi);
1289 kfree(mtd->eraseregions);
1290 }
1291
1292 static char im_name[]="cfi_cmdset_0002";
1293
cfi_amdstd_init(void)1294 int __init cfi_amdstd_init(void)
1295 {
1296 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1297 return 0;
1298 }
1299
cfi_amdstd_exit(void)1300 static void __exit cfi_amdstd_exit(void)
1301 {
1302 inter_module_unregister(im_name);
1303 }
1304
1305 module_init(cfi_amdstd_init);
1306 module_exit(cfi_amdstd_exit);
1307
1308 MODULE_LICENSE("GPL");
1309 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1310 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1311
1312