1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
8  * 	- completely revamped method functions so they are aware and
9  * 	  independent of the flash geometry (buswidth, interleave, etc.)
10  * 	- scalability vs code size is completely set at compile-time
11  * 	  (see include/linux/mtd/cfi.h for selection)
12  *	- optimized write buffer method
13  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
14  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *	  (command set 0x0020)
16  *	- added a writev function
17  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
18  * 	- Plugged memory leak in cfi_staa_writev().
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/cfi.h>
34 #include <linux/mtd/mtd.h>
35 
36 
37 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
39 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
40 		unsigned long count, loff_t to, size_t *retlen);
41 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
42 static void cfi_staa_sync (struct mtd_info *);
43 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
44 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_suspend (struct mtd_info *);
46 static void cfi_staa_resume (struct mtd_info *);
47 
48 static void cfi_staa_destroy(struct mtd_info *);
49 
50 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
51 
52 static struct mtd_info *cfi_staa_setup (struct map_info *);
53 
54 static struct mtd_chip_driver cfi_staa_chipdrv = {
55 	.probe		= NULL, /* Not usable directly */
56 	.destroy	= cfi_staa_destroy,
57 	.name		= "cfi_cmdset_0020",
58 	.module		= THIS_MODULE
59 };
60 
61 /* #define DEBUG_LOCK_BITS */
62 //#define DEBUG_CFI_FEATURES
63 
64 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_intelext * extp)65 static void cfi_tell_features(struct cfi_pri_intelext *extp)
66 {
67         int i;
68         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
69 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
70 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
71 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
72 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
73 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
74 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
75 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
76 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
77 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
78 	for (i=9; i<32; i++) {
79 		if (extp->FeatureSupport & (1<<i))
80 			printk("     - Unknown Bit %X:      supported\n", i);
81 	}
82 
83 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
84 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
85 	for (i=1; i<8; i++) {
86 		if (extp->SuspendCmdSupport & (1<<i))
87 			printk("     - Unknown Bit %X:               supported\n", i);
88 	}
89 
90 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
91 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
92 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
93 	for (i=2; i<16; i++) {
94 		if (extp->BlkStatusRegMask & (1<<i))
95 			printk("     - Unknown Bit %X Active: yes\n",i);
96 	}
97 
98 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
99 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
100 	if (extp->VppOptimal)
101 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
103 }
104 #endif
105 
106 /* This routine is made available to other mtd code via
107  * inter_module_register.  It must only be accessed through
108  * inter_module_get which will bump the use count of this module.  The
109  * addresses passed back in cfi are valid as long as the use count of
110  * this module is non-zero, i.e. between inter_module_get and
111  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
112  */
cfi_cmdset_0020(struct map_info * map,int primary)113 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
114 {
115 	struct cfi_private *cfi = map->fldrv_priv;
116 	int i;
117 
118 	if (cfi->cfi_mode) {
119 		/*
120 		 * It's a real CFI chip, not one for which the probe
121 		 * routine faked a CFI structure. So we read the feature
122 		 * table from it.
123 		 */
124 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 		struct cfi_pri_intelext *extp;
126 
127 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
128 		if (!extp)
129 			return NULL;
130 
131 		if (extp->MajorVersion != '1' ||
132 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
133 			printk(KERN_ERR "  Unknown ST Microelectronics"
134 			       " Extended Query version %c.%c.\n",
135 			       extp->MajorVersion, extp->MinorVersion);
136 			kfree(extp);
137 			return NULL;
138 		}
139 
140 		/* Do some byteswapping if necessary */
141 		extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
142 		extp->BlkStatusRegMask = cfi32_to_cpu(map,
143 						extp->BlkStatusRegMask);
144 
145 #ifdef DEBUG_CFI_FEATURES
146 		/* Tell the user about it in lots of lovely detail */
147 		cfi_tell_features(extp);
148 #endif
149 
150 		/* Install our own private info structure */
151 		cfi->cmdset_priv = extp;
152 	}
153 
154 	for (i=0; i< cfi->numchips; i++) {
155 		cfi->chips[i].word_write_time = 128;
156 		cfi->chips[i].buffer_write_time = 128;
157 		cfi->chips[i].erase_time = 1024;
158 		cfi->chips[i].ref_point_counter = 0;
159 		init_waitqueue_head(&(cfi->chips[i].wq));
160 	}
161 
162 	return cfi_staa_setup(map);
163 }
164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165 
cfi_staa_setup(struct map_info * map)166 static struct mtd_info *cfi_staa_setup(struct map_info *map)
167 {
168 	struct cfi_private *cfi = map->fldrv_priv;
169 	struct mtd_info *mtd;
170 	unsigned long offset = 0;
171 	int i,j;
172 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173 
174 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176 
177 	if (!mtd) {
178 		kfree(cfi->cmdset_priv);
179 		return NULL;
180 	}
181 
182 	mtd->priv = map;
183 	mtd->type = MTD_NORFLASH;
184 	mtd->size = devsize * cfi->numchips;
185 
186 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
187 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
188 					  sizeof(struct mtd_erase_region_info),
189 					  GFP_KERNEL);
190 	if (!mtd->eraseregions) {
191 		kfree(cfi->cmdset_priv);
192 		kfree(mtd);
193 		return NULL;
194 	}
195 
196 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
197 		unsigned long ernum, ersize;
198 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
199 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
200 
201 		if (mtd->erasesize < ersize) {
202 			mtd->erasesize = ersize;
203 		}
204 		for (j=0; j<cfi->numchips; j++) {
205 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
206 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
208 		}
209 		offset += (ersize * ernum);
210 	}
211 
212 	if (offset != devsize) {
213 		/* Argh */
214 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
215 		kfree(mtd->eraseregions);
216 		kfree(cfi->cmdset_priv);
217 		kfree(mtd);
218 		return NULL;
219 	}
220 
221 	for (i=0; i<mtd->numeraseregions;i++){
222 		printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
223 		       i, (unsigned long long)mtd->eraseregions[i].offset,
224 		       mtd->eraseregions[i].erasesize,
225 		       mtd->eraseregions[i].numblocks);
226 	}
227 
228 	/* Also select the correct geometry setup too */
229 	mtd->_erase = cfi_staa_erase_varsize;
230 	mtd->_read = cfi_staa_read;
231 	mtd->_write = cfi_staa_write_buffers;
232 	mtd->_writev = cfi_staa_writev;
233 	mtd->_sync = cfi_staa_sync;
234 	mtd->_lock = cfi_staa_lock;
235 	mtd->_unlock = cfi_staa_unlock;
236 	mtd->_suspend = cfi_staa_suspend;
237 	mtd->_resume = cfi_staa_resume;
238 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
239 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
240 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
241 	map->fldrv = &cfi_staa_chipdrv;
242 	__module_get(THIS_MODULE);
243 	mtd->name = map->name;
244 	return mtd;
245 }
246 
247 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)248 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
249 {
250 	map_word status, status_OK;
251 	unsigned long timeo;
252 	DECLARE_WAITQUEUE(wait, current);
253 	int suspended = 0;
254 	unsigned long cmd_addr;
255 	struct cfi_private *cfi = map->fldrv_priv;
256 
257 	adr += chip->start;
258 
259 	/* Ensure cmd read/writes are aligned. */
260 	cmd_addr = adr & ~(map_bankwidth(map)-1);
261 
262 	/* Let's determine this according to the interleave only once */
263 	status_OK = CMD(0x80);
264 
265 	timeo = jiffies + HZ;
266  retry:
267 	mutex_lock(&chip->mutex);
268 
269 	/* Check that the chip's ready to talk to us.
270 	 * If it's in FL_ERASING state, suspend it and make it talk now.
271 	 */
272 	switch (chip->state) {
273 	case FL_ERASING:
274 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
275 			goto sleep; /* We don't support erase suspend */
276 
277 		map_write (map, CMD(0xb0), cmd_addr);
278 		/* If the flash has finished erasing, then 'erase suspend'
279 		 * appears to make some (28F320) flash devices switch to
280 		 * 'read' mode.  Make sure that we switch to 'read status'
281 		 * mode so we get the right data. --rmk
282 		 */
283 		map_write(map, CMD(0x70), cmd_addr);
284 		chip->oldstate = FL_ERASING;
285 		chip->state = FL_ERASE_SUSPENDING;
286 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
287 		for (;;) {
288 			status = map_read(map, cmd_addr);
289 			if (map_word_andequal(map, status, status_OK, status_OK))
290 				break;
291 
292 			if (time_after(jiffies, timeo)) {
293 				/* Urgh */
294 				map_write(map, CMD(0xd0), cmd_addr);
295 				/* make sure we're in 'read status' mode */
296 				map_write(map, CMD(0x70), cmd_addr);
297 				chip->state = FL_ERASING;
298 				wake_up(&chip->wq);
299 				mutex_unlock(&chip->mutex);
300 				printk(KERN_ERR "Chip not ready after erase "
301 				       "suspended: status = 0x%lx\n", status.x[0]);
302 				return -EIO;
303 			}
304 
305 			mutex_unlock(&chip->mutex);
306 			cfi_udelay(1);
307 			mutex_lock(&chip->mutex);
308 		}
309 
310 		suspended = 1;
311 		map_write(map, CMD(0xff), cmd_addr);
312 		chip->state = FL_READY;
313 		break;
314 
315 #if 0
316 	case FL_WRITING:
317 		/* Not quite yet */
318 #endif
319 
320 	case FL_READY:
321 		break;
322 
323 	case FL_CFI_QUERY:
324 	case FL_JEDEC_QUERY:
325 		map_write(map, CMD(0x70), cmd_addr);
326 		chip->state = FL_STATUS;
327 		fallthrough;
328 	case FL_STATUS:
329 		status = map_read(map, cmd_addr);
330 		if (map_word_andequal(map, status, status_OK, status_OK)) {
331 			map_write(map, CMD(0xff), cmd_addr);
332 			chip->state = FL_READY;
333 			break;
334 		}
335 
336 		/* Urgh. Chip not yet ready to talk to us. */
337 		if (time_after(jiffies, timeo)) {
338 			mutex_unlock(&chip->mutex);
339 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
340 			return -EIO;
341 		}
342 
343 		/* Latency issues. Drop the lock, wait a while and retry */
344 		mutex_unlock(&chip->mutex);
345 		cfi_udelay(1);
346 		goto retry;
347 
348 	default:
349 	sleep:
350 		/* Stick ourselves on a wait queue to be woken when
351 		   someone changes the status */
352 		set_current_state(TASK_UNINTERRUPTIBLE);
353 		add_wait_queue(&chip->wq, &wait);
354 		mutex_unlock(&chip->mutex);
355 		schedule();
356 		remove_wait_queue(&chip->wq, &wait);
357 		timeo = jiffies + HZ;
358 		goto retry;
359 	}
360 
361 	map_copy_from(map, buf, adr, len);
362 
363 	if (suspended) {
364 		chip->state = chip->oldstate;
365 		/* What if one interleaved chip has finished and the
366 		   other hasn't? The old code would leave the finished
367 		   one in READY mode. That's bad, and caused -EROFS
368 		   errors to be returned from do_erase_oneblock because
369 		   that's the only bit it checked for at the time.
370 		   As the state machine appears to explicitly allow
371 		   sending the 0x70 (Read Status) command to an erasing
372 		   chip and expecting it to be ignored, that's what we
373 		   do. */
374 		map_write(map, CMD(0xd0), cmd_addr);
375 		map_write(map, CMD(0x70), cmd_addr);
376 	}
377 
378 	wake_up(&chip->wq);
379 	mutex_unlock(&chip->mutex);
380 	return 0;
381 }
382 
cfi_staa_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)383 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
384 {
385 	struct map_info *map = mtd->priv;
386 	struct cfi_private *cfi = map->fldrv_priv;
387 	unsigned long ofs;
388 	int chipnum;
389 	int ret = 0;
390 
391 	/* ofs: offset within the first chip that the first read should start */
392 	chipnum = (from >> cfi->chipshift);
393 	ofs = from - (chipnum <<  cfi->chipshift);
394 
395 	while (len) {
396 		unsigned long thislen;
397 
398 		if (chipnum >= cfi->numchips)
399 			break;
400 
401 		if ((len + ofs -1) >> cfi->chipshift)
402 			thislen = (1<<cfi->chipshift) - ofs;
403 		else
404 			thislen = len;
405 
406 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
407 		if (ret)
408 			break;
409 
410 		*retlen += thislen;
411 		len -= thislen;
412 		buf += thislen;
413 
414 		ofs = 0;
415 		chipnum++;
416 	}
417 	return ret;
418 }
419 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const u_char * buf,int len)420 static int do_write_buffer(struct map_info *map, struct flchip *chip,
421 				  unsigned long adr, const u_char *buf, int len)
422 {
423 	struct cfi_private *cfi = map->fldrv_priv;
424 	map_word status, status_OK;
425 	unsigned long cmd_adr, timeo;
426 	DECLARE_WAITQUEUE(wait, current);
427 	int wbufsize, z;
428 
429         /* M58LW064A requires bus alignment for buffer wriets -- saw */
430         if (adr & (map_bankwidth(map)-1))
431             return -EINVAL;
432 
433         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
434         adr += chip->start;
435 	cmd_adr = adr & ~(wbufsize-1);
436 
437 	/* Let's determine this according to the interleave only once */
438         status_OK = CMD(0x80);
439 
440 	timeo = jiffies + HZ;
441  retry:
442 
443 #ifdef DEBUG_CFI_FEATURES
444        printk("%s: chip->state[%d]\n", __func__, chip->state);
445 #endif
446 	mutex_lock(&chip->mutex);
447 
448 	/* Check that the chip's ready to talk to us.
449 	 * Later, we can actually think about interrupting it
450 	 * if it's in FL_ERASING state.
451 	 * Not just yet, though.
452 	 */
453 	switch (chip->state) {
454 	case FL_READY:
455 		break;
456 
457 	case FL_CFI_QUERY:
458 	case FL_JEDEC_QUERY:
459 		map_write(map, CMD(0x70), cmd_adr);
460                 chip->state = FL_STATUS;
461 #ifdef DEBUG_CFI_FEATURES
462 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
463 #endif
464 		fallthrough;
465 	case FL_STATUS:
466 		status = map_read(map, cmd_adr);
467 		if (map_word_andequal(map, status, status_OK, status_OK))
468 			break;
469 		/* Urgh. Chip not yet ready to talk to us. */
470 		if (time_after(jiffies, timeo)) {
471 			mutex_unlock(&chip->mutex);
472                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
473                                status.x[0], map_read(map, cmd_adr).x[0]);
474 			return -EIO;
475 		}
476 
477 		/* Latency issues. Drop the lock, wait a while and retry */
478 		mutex_unlock(&chip->mutex);
479 		cfi_udelay(1);
480 		goto retry;
481 
482 	default:
483 		/* Stick ourselves on a wait queue to be woken when
484 		   someone changes the status */
485 		set_current_state(TASK_UNINTERRUPTIBLE);
486 		add_wait_queue(&chip->wq, &wait);
487 		mutex_unlock(&chip->mutex);
488 		schedule();
489 		remove_wait_queue(&chip->wq, &wait);
490 		timeo = jiffies + HZ;
491 		goto retry;
492 	}
493 
494 	ENABLE_VPP(map);
495 	map_write(map, CMD(0xe8), cmd_adr);
496 	chip->state = FL_WRITING_TO_BUFFER;
497 
498 	z = 0;
499 	for (;;) {
500 		status = map_read(map, cmd_adr);
501 		if (map_word_andequal(map, status, status_OK, status_OK))
502 			break;
503 
504 		mutex_unlock(&chip->mutex);
505 		cfi_udelay(1);
506 		mutex_lock(&chip->mutex);
507 
508 		if (++z > 100) {
509 			/* Argh. Not ready for write to buffer */
510 			DISABLE_VPP(map);
511                         map_write(map, CMD(0x70), cmd_adr);
512 			chip->state = FL_STATUS;
513 			mutex_unlock(&chip->mutex);
514 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
515 			return -EIO;
516 		}
517 	}
518 
519 	/* Write length of data to come */
520 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
521 
522 	/* Write data */
523 	for (z = 0; z < len;
524 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
525 		map_word d;
526 		d = map_word_load(map, buf);
527 		map_write(map, d, adr+z);
528 	}
529 	/* GO GO GO */
530 	map_write(map, CMD(0xd0), cmd_adr);
531 	chip->state = FL_WRITING;
532 
533 	mutex_unlock(&chip->mutex);
534 	cfi_udelay(chip->buffer_write_time);
535 	mutex_lock(&chip->mutex);
536 
537 	timeo = jiffies + (HZ/2);
538 	z = 0;
539 	for (;;) {
540 		if (chip->state != FL_WRITING) {
541 			/* Someone's suspended the write. Sleep */
542 			set_current_state(TASK_UNINTERRUPTIBLE);
543 			add_wait_queue(&chip->wq, &wait);
544 			mutex_unlock(&chip->mutex);
545 			schedule();
546 			remove_wait_queue(&chip->wq, &wait);
547 			timeo = jiffies + (HZ / 2); /* FIXME */
548 			mutex_lock(&chip->mutex);
549 			continue;
550 		}
551 
552 		status = map_read(map, cmd_adr);
553 		if (map_word_andequal(map, status, status_OK, status_OK))
554 			break;
555 
556 		/* OK Still waiting */
557 		if (time_after(jiffies, timeo)) {
558                         /* clear status */
559                         map_write(map, CMD(0x50), cmd_adr);
560                         /* put back into read status register mode */
561                         map_write(map, CMD(0x70), adr);
562 			chip->state = FL_STATUS;
563 			DISABLE_VPP(map);
564 			mutex_unlock(&chip->mutex);
565 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
566 			return -EIO;
567 		}
568 
569 		/* Latency issues. Drop the lock, wait a while and retry */
570 		mutex_unlock(&chip->mutex);
571 		cfi_udelay(1);
572 		z++;
573 		mutex_lock(&chip->mutex);
574 	}
575 	if (!z) {
576 		chip->buffer_write_time--;
577 		if (!chip->buffer_write_time)
578 			chip->buffer_write_time++;
579 	}
580 	if (z > 1)
581 		chip->buffer_write_time++;
582 
583 	/* Done and happy. */
584 	DISABLE_VPP(map);
585 	chip->state = FL_STATUS;
586 
587         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
588         if (map_word_bitsset(map, status, CMD(0x3a))) {
589 #ifdef DEBUG_CFI_FEATURES
590 		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
591 #endif
592 		/* clear status */
593 		map_write(map, CMD(0x50), cmd_adr);
594 		/* put back into read status register mode */
595 		map_write(map, CMD(0x70), adr);
596 		wake_up(&chip->wq);
597 		mutex_unlock(&chip->mutex);
598 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
599 	}
600 	wake_up(&chip->wq);
601 	mutex_unlock(&chip->mutex);
602 
603         return 0;
604 }
605 
cfi_staa_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)606 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
607 				       size_t len, size_t *retlen, const u_char *buf)
608 {
609 	struct map_info *map = mtd->priv;
610 	struct cfi_private *cfi = map->fldrv_priv;
611 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
612 	int ret;
613 	int chipnum;
614 	unsigned long ofs;
615 
616 	chipnum = to >> cfi->chipshift;
617 	ofs = to  - (chipnum << cfi->chipshift);
618 
619 #ifdef DEBUG_CFI_FEATURES
620 	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
621 	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
622 	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
623 #endif
624 
625         /* Write buffer is worth it only if more than one word to write... */
626         while (len > 0) {
627 		/* We must not cross write block boundaries */
628 		int size = wbufsize - (ofs & (wbufsize-1));
629 
630                 if (size > len)
631                     size = len;
632 
633                 ret = do_write_buffer(map, &cfi->chips[chipnum],
634 				      ofs, buf, size);
635 		if (ret)
636 			return ret;
637 
638 		ofs += size;
639 		buf += size;
640 		(*retlen) += size;
641 		len -= size;
642 
643 		if (ofs >> cfi->chipshift) {
644 			chipnum ++;
645 			ofs = 0;
646 			if (chipnum == cfi->numchips)
647 				return 0;
648 		}
649 	}
650 
651 	return 0;
652 }
653 
654 /*
655  * Writev for ECC-Flashes is a little more complicated. We need to maintain
656  * a small buffer for this.
657  * XXX: If the buffer size is not a multiple of 2, this will break
658  */
659 #define ECCBUF_SIZE (mtd->writesize)
660 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
661 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
662 static int
cfi_staa_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)663 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
664 		unsigned long count, loff_t to, size_t *retlen)
665 {
666 	unsigned long i;
667 	size_t	 totlen = 0, thislen;
668 	int	 ret = 0;
669 	size_t	 buflen = 0;
670 	char *buffer;
671 
672 	if (!ECCBUF_SIZE) {
673 		/* We should fall back to a general writev implementation.
674 		 * Until that is written, just break.
675 		 */
676 		return -EIO;
677 	}
678 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
679 	if (!buffer)
680 		return -ENOMEM;
681 
682 	for (i=0; i<count; i++) {
683 		size_t elem_len = vecs[i].iov_len;
684 		void *elem_base = vecs[i].iov_base;
685 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
686 			continue;
687 		if (buflen) { /* cut off head */
688 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
689 				memcpy(buffer+buflen, elem_base, elem_len);
690 				buflen += elem_len;
691 				continue;
692 			}
693 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
694 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
695 					buffer);
696 			totlen += thislen;
697 			if (ret || thislen != ECCBUF_SIZE)
698 				goto write_error;
699 			elem_len -= thislen-buflen;
700 			elem_base += thislen-buflen;
701 			to += ECCBUF_SIZE;
702 		}
703 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
704 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
705 					&thislen, elem_base);
706 			totlen += thislen;
707 			if (ret || thislen != ECCBUF_DIV(elem_len))
708 				goto write_error;
709 			to += thislen;
710 		}
711 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
712 		if (buflen) {
713 			memset(buffer, 0xff, ECCBUF_SIZE);
714 			memcpy(buffer, elem_base + thislen, buflen);
715 		}
716 	}
717 	if (buflen) { /* flush last page, even if not full */
718 		/* This is sometimes intended behaviour, really */
719 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
720 		totlen += thislen;
721 		if (ret || thislen != ECCBUF_SIZE)
722 			goto write_error;
723 	}
724 write_error:
725 	if (retlen)
726 		*retlen = totlen;
727 	kfree(buffer);
728 	return ret;
729 }
730 
731 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)732 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
733 {
734 	struct cfi_private *cfi = map->fldrv_priv;
735 	map_word status, status_OK;
736 	unsigned long timeo;
737 	int retries = 3;
738 	DECLARE_WAITQUEUE(wait, current);
739 	int ret = 0;
740 
741 	adr += chip->start;
742 
743 	/* Let's determine this according to the interleave only once */
744 	status_OK = CMD(0x80);
745 
746 	timeo = jiffies + HZ;
747 retry:
748 	mutex_lock(&chip->mutex);
749 
750 	/* Check that the chip's ready to talk to us. */
751 	switch (chip->state) {
752 	case FL_CFI_QUERY:
753 	case FL_JEDEC_QUERY:
754 	case FL_READY:
755 		map_write(map, CMD(0x70), adr);
756 		chip->state = FL_STATUS;
757 		fallthrough;
758 	case FL_STATUS:
759 		status = map_read(map, adr);
760 		if (map_word_andequal(map, status, status_OK, status_OK))
761 			break;
762 
763 		/* Urgh. Chip not yet ready to talk to us. */
764 		if (time_after(jiffies, timeo)) {
765 			mutex_unlock(&chip->mutex);
766 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
767 			return -EIO;
768 		}
769 
770 		/* Latency issues. Drop the lock, wait a while and retry */
771 		mutex_unlock(&chip->mutex);
772 		cfi_udelay(1);
773 		goto retry;
774 
775 	default:
776 		/* Stick ourselves on a wait queue to be woken when
777 		   someone changes the status */
778 		set_current_state(TASK_UNINTERRUPTIBLE);
779 		add_wait_queue(&chip->wq, &wait);
780 		mutex_unlock(&chip->mutex);
781 		schedule();
782 		remove_wait_queue(&chip->wq, &wait);
783 		timeo = jiffies + HZ;
784 		goto retry;
785 	}
786 
787 	ENABLE_VPP(map);
788 	/* Clear the status register first */
789 	map_write(map, CMD(0x50), adr);
790 
791 	/* Now erase */
792 	map_write(map, CMD(0x20), adr);
793 	map_write(map, CMD(0xD0), adr);
794 	chip->state = FL_ERASING;
795 
796 	mutex_unlock(&chip->mutex);
797 	msleep(1000);
798 	mutex_lock(&chip->mutex);
799 
800 	/* FIXME. Use a timer to check this, and return immediately. */
801 	/* Once the state machine's known to be working I'll do that */
802 
803 	timeo = jiffies + (HZ*20);
804 	for (;;) {
805 		if (chip->state != FL_ERASING) {
806 			/* Someone's suspended the erase. Sleep */
807 			set_current_state(TASK_UNINTERRUPTIBLE);
808 			add_wait_queue(&chip->wq, &wait);
809 			mutex_unlock(&chip->mutex);
810 			schedule();
811 			remove_wait_queue(&chip->wq, &wait);
812 			timeo = jiffies + (HZ*20); /* FIXME */
813 			mutex_lock(&chip->mutex);
814 			continue;
815 		}
816 
817 		status = map_read(map, adr);
818 		if (map_word_andequal(map, status, status_OK, status_OK))
819 			break;
820 
821 		/* OK Still waiting */
822 		if (time_after(jiffies, timeo)) {
823 			map_write(map, CMD(0x70), adr);
824 			chip->state = FL_STATUS;
825 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
826 			DISABLE_VPP(map);
827 			mutex_unlock(&chip->mutex);
828 			return -EIO;
829 		}
830 
831 		/* Latency issues. Drop the lock, wait a while and retry */
832 		mutex_unlock(&chip->mutex);
833 		cfi_udelay(1);
834 		mutex_lock(&chip->mutex);
835 	}
836 
837 	DISABLE_VPP(map);
838 	ret = 0;
839 
840 	/* We've broken this before. It doesn't hurt to be safe */
841 	map_write(map, CMD(0x70), adr);
842 	chip->state = FL_STATUS;
843 	status = map_read(map, adr);
844 
845 	/* check for lock bit */
846 	if (map_word_bitsset(map, status, CMD(0x3a))) {
847 		unsigned char chipstatus = status.x[0];
848 		if (!map_word_equal(map, status, CMD(chipstatus))) {
849 			int i, w;
850 			for (w=0; w<map_words(map); w++) {
851 				for (i = 0; i<cfi_interleave(cfi); i++) {
852 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
853 				}
854 			}
855 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
856 			       status.x[0], chipstatus);
857 		}
858 		/* Reset the error bits */
859 		map_write(map, CMD(0x50), adr);
860 		map_write(map, CMD(0x70), adr);
861 
862 		if ((chipstatus & 0x30) == 0x30) {
863 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
864 			ret = -EIO;
865 		} else if (chipstatus & 0x02) {
866 			/* Protection bit set */
867 			ret = -EROFS;
868 		} else if (chipstatus & 0x8) {
869 			/* Voltage */
870 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
871 			ret = -EIO;
872 		} else if (chipstatus & 0x20) {
873 			if (retries--) {
874 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
875 				timeo = jiffies + HZ;
876 				chip->state = FL_STATUS;
877 				mutex_unlock(&chip->mutex);
878 				goto retry;
879 			}
880 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
881 			ret = -EIO;
882 		}
883 	}
884 
885 	wake_up(&chip->wq);
886 	mutex_unlock(&chip->mutex);
887 	return ret;
888 }
889 
cfi_staa_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)890 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
891 				  struct erase_info *instr)
892 {	struct map_info *map = mtd->priv;
893 	struct cfi_private *cfi = map->fldrv_priv;
894 	unsigned long adr, len;
895 	int chipnum, ret;
896 	int i, first;
897 	struct mtd_erase_region_info *regions = mtd->eraseregions;
898 
899 	/* Check that both start and end of the requested erase are
900 	 * aligned with the erasesize at the appropriate addresses.
901 	 */
902 
903 	i = 0;
904 
905 	/* Skip all erase regions which are ended before the start of
906 	   the requested erase. Actually, to save on the calculations,
907 	   we skip to the first erase region which starts after the
908 	   start of the requested erase, and then go back one.
909 	*/
910 
911 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
912 	       i++;
913 	i--;
914 
915 	/* OK, now i is pointing at the erase region in which this
916 	   erase request starts. Check the start of the requested
917 	   erase range is aligned with the erase size which is in
918 	   effect here.
919 	*/
920 
921 	if (instr->addr & (regions[i].erasesize-1))
922 		return -EINVAL;
923 
924 	/* Remember the erase region we start on */
925 	first = i;
926 
927 	/* Next, check that the end of the requested erase is aligned
928 	 * with the erase region at that address.
929 	 */
930 
931 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
932 		i++;
933 
934 	/* As before, drop back one to point at the region in which
935 	   the address actually falls
936 	*/
937 	i--;
938 
939 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
940 		return -EINVAL;
941 
942 	chipnum = instr->addr >> cfi->chipshift;
943 	adr = instr->addr - (chipnum << cfi->chipshift);
944 	len = instr->len;
945 
946 	i=first;
947 
948 	while(len) {
949 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
950 
951 		if (ret)
952 			return ret;
953 
954 		adr += regions[i].erasesize;
955 		len -= regions[i].erasesize;
956 
957 		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
958 			i++;
959 
960 		if (adr >> cfi->chipshift) {
961 			adr = 0;
962 			chipnum++;
963 
964 			if (chipnum >= cfi->numchips)
965 				break;
966 		}
967 	}
968 
969 	return 0;
970 }
971 
cfi_staa_sync(struct mtd_info * mtd)972 static void cfi_staa_sync (struct mtd_info *mtd)
973 {
974 	struct map_info *map = mtd->priv;
975 	struct cfi_private *cfi = map->fldrv_priv;
976 	int i;
977 	struct flchip *chip;
978 	int ret = 0;
979 	DECLARE_WAITQUEUE(wait, current);
980 
981 	for (i=0; !ret && i<cfi->numchips; i++) {
982 		chip = &cfi->chips[i];
983 
984 	retry:
985 		mutex_lock(&chip->mutex);
986 
987 		switch(chip->state) {
988 		case FL_READY:
989 		case FL_STATUS:
990 		case FL_CFI_QUERY:
991 		case FL_JEDEC_QUERY:
992 			chip->oldstate = chip->state;
993 			chip->state = FL_SYNCING;
994 			/* No need to wake_up() on this state change -
995 			 * as the whole point is that nobody can do anything
996 			 * with the chip now anyway.
997 			 */
998 			fallthrough;
999 		case FL_SYNCING:
1000 			mutex_unlock(&chip->mutex);
1001 			break;
1002 
1003 		default:
1004 			/* Not an idle state */
1005 			set_current_state(TASK_UNINTERRUPTIBLE);
1006 			add_wait_queue(&chip->wq, &wait);
1007 
1008 			mutex_unlock(&chip->mutex);
1009 			schedule();
1010 		        remove_wait_queue(&chip->wq, &wait);
1011 
1012 			goto retry;
1013 		}
1014 	}
1015 
1016 	/* Unlock the chips again */
1017 
1018 	for (i--; i >=0; i--) {
1019 		chip = &cfi->chips[i];
1020 
1021 		mutex_lock(&chip->mutex);
1022 
1023 		if (chip->state == FL_SYNCING) {
1024 			chip->state = chip->oldstate;
1025 			wake_up(&chip->wq);
1026 		}
1027 		mutex_unlock(&chip->mutex);
1028 	}
1029 }
1030 
do_lock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)1031 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1032 {
1033 	struct cfi_private *cfi = map->fldrv_priv;
1034 	map_word status, status_OK;
1035 	unsigned long timeo = jiffies + HZ;
1036 	DECLARE_WAITQUEUE(wait, current);
1037 
1038 	adr += chip->start;
1039 
1040 	/* Let's determine this according to the interleave only once */
1041 	status_OK = CMD(0x80);
1042 
1043 	timeo = jiffies + HZ;
1044 retry:
1045 	mutex_lock(&chip->mutex);
1046 
1047 	/* Check that the chip's ready to talk to us. */
1048 	switch (chip->state) {
1049 	case FL_CFI_QUERY:
1050 	case FL_JEDEC_QUERY:
1051 	case FL_READY:
1052 		map_write(map, CMD(0x70), adr);
1053 		chip->state = FL_STATUS;
1054 		fallthrough;
1055 	case FL_STATUS:
1056 		status = map_read(map, adr);
1057 		if (map_word_andequal(map, status, status_OK, status_OK))
1058 			break;
1059 
1060 		/* Urgh. Chip not yet ready to talk to us. */
1061 		if (time_after(jiffies, timeo)) {
1062 			mutex_unlock(&chip->mutex);
1063 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1064 			return -EIO;
1065 		}
1066 
1067 		/* Latency issues. Drop the lock, wait a while and retry */
1068 		mutex_unlock(&chip->mutex);
1069 		cfi_udelay(1);
1070 		goto retry;
1071 
1072 	default:
1073 		/* Stick ourselves on a wait queue to be woken when
1074 		   someone changes the status */
1075 		set_current_state(TASK_UNINTERRUPTIBLE);
1076 		add_wait_queue(&chip->wq, &wait);
1077 		mutex_unlock(&chip->mutex);
1078 		schedule();
1079 		remove_wait_queue(&chip->wq, &wait);
1080 		timeo = jiffies + HZ;
1081 		goto retry;
1082 	}
1083 
1084 	ENABLE_VPP(map);
1085 	map_write(map, CMD(0x60), adr);
1086 	map_write(map, CMD(0x01), adr);
1087 	chip->state = FL_LOCKING;
1088 
1089 	mutex_unlock(&chip->mutex);
1090 	msleep(1000);
1091 	mutex_lock(&chip->mutex);
1092 
1093 	/* FIXME. Use a timer to check this, and return immediately. */
1094 	/* Once the state machine's known to be working I'll do that */
1095 
1096 	timeo = jiffies + (HZ*2);
1097 	for (;;) {
1098 
1099 		status = map_read(map, adr);
1100 		if (map_word_andequal(map, status, status_OK, status_OK))
1101 			break;
1102 
1103 		/* OK Still waiting */
1104 		if (time_after(jiffies, timeo)) {
1105 			map_write(map, CMD(0x70), adr);
1106 			chip->state = FL_STATUS;
1107 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1108 			DISABLE_VPP(map);
1109 			mutex_unlock(&chip->mutex);
1110 			return -EIO;
1111 		}
1112 
1113 		/* Latency issues. Drop the lock, wait a while and retry */
1114 		mutex_unlock(&chip->mutex);
1115 		cfi_udelay(1);
1116 		mutex_lock(&chip->mutex);
1117 	}
1118 
1119 	/* Done and happy. */
1120 	chip->state = FL_STATUS;
1121 	DISABLE_VPP(map);
1122 	wake_up(&chip->wq);
1123 	mutex_unlock(&chip->mutex);
1124 	return 0;
1125 }
cfi_staa_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1126 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1127 {
1128 	struct map_info *map = mtd->priv;
1129 	struct cfi_private *cfi = map->fldrv_priv;
1130 	unsigned long adr;
1131 	int chipnum, ret;
1132 #ifdef DEBUG_LOCK_BITS
1133 	int ofs_factor = cfi->interleave * cfi->device_type;
1134 #endif
1135 
1136 	if (ofs & (mtd->erasesize - 1))
1137 		return -EINVAL;
1138 
1139 	if (len & (mtd->erasesize -1))
1140 		return -EINVAL;
1141 
1142 	chipnum = ofs >> cfi->chipshift;
1143 	adr = ofs - (chipnum << cfi->chipshift);
1144 
1145 	while(len) {
1146 
1147 #ifdef DEBUG_LOCK_BITS
1148 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1149 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1150 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1151 #endif
1152 
1153 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1154 
1155 #ifdef DEBUG_LOCK_BITS
1156 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1157 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1158 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1159 #endif
1160 
1161 		if (ret)
1162 			return ret;
1163 
1164 		adr += mtd->erasesize;
1165 		len -= mtd->erasesize;
1166 
1167 		if (adr >> cfi->chipshift) {
1168 			adr = 0;
1169 			chipnum++;
1170 
1171 			if (chipnum >= cfi->numchips)
1172 				break;
1173 		}
1174 	}
1175 	return 0;
1176 }
do_unlock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr)1177 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1178 {
1179 	struct cfi_private *cfi = map->fldrv_priv;
1180 	map_word status, status_OK;
1181 	unsigned long timeo = jiffies + HZ;
1182 	DECLARE_WAITQUEUE(wait, current);
1183 
1184 	adr += chip->start;
1185 
1186 	/* Let's determine this according to the interleave only once */
1187 	status_OK = CMD(0x80);
1188 
1189 	timeo = jiffies + HZ;
1190 retry:
1191 	mutex_lock(&chip->mutex);
1192 
1193 	/* Check that the chip's ready to talk to us. */
1194 	switch (chip->state) {
1195 	case FL_CFI_QUERY:
1196 	case FL_JEDEC_QUERY:
1197 	case FL_READY:
1198 		map_write(map, CMD(0x70), adr);
1199 		chip->state = FL_STATUS;
1200 		fallthrough;
1201 	case FL_STATUS:
1202 		status = map_read(map, adr);
1203 		if (map_word_andequal(map, status, status_OK, status_OK))
1204 			break;
1205 
1206 		/* Urgh. Chip not yet ready to talk to us. */
1207 		if (time_after(jiffies, timeo)) {
1208 			mutex_unlock(&chip->mutex);
1209 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1210 			return -EIO;
1211 		}
1212 
1213 		/* Latency issues. Drop the lock, wait a while and retry */
1214 		mutex_unlock(&chip->mutex);
1215 		cfi_udelay(1);
1216 		goto retry;
1217 
1218 	default:
1219 		/* Stick ourselves on a wait queue to be woken when
1220 		   someone changes the status */
1221 		set_current_state(TASK_UNINTERRUPTIBLE);
1222 		add_wait_queue(&chip->wq, &wait);
1223 		mutex_unlock(&chip->mutex);
1224 		schedule();
1225 		remove_wait_queue(&chip->wq, &wait);
1226 		timeo = jiffies + HZ;
1227 		goto retry;
1228 	}
1229 
1230 	ENABLE_VPP(map);
1231 	map_write(map, CMD(0x60), adr);
1232 	map_write(map, CMD(0xD0), adr);
1233 	chip->state = FL_UNLOCKING;
1234 
1235 	mutex_unlock(&chip->mutex);
1236 	msleep(1000);
1237 	mutex_lock(&chip->mutex);
1238 
1239 	/* FIXME. Use a timer to check this, and return immediately. */
1240 	/* Once the state machine's known to be working I'll do that */
1241 
1242 	timeo = jiffies + (HZ*2);
1243 	for (;;) {
1244 
1245 		status = map_read(map, adr);
1246 		if (map_word_andequal(map, status, status_OK, status_OK))
1247 			break;
1248 
1249 		/* OK Still waiting */
1250 		if (time_after(jiffies, timeo)) {
1251 			map_write(map, CMD(0x70), adr);
1252 			chip->state = FL_STATUS;
1253 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1254 			DISABLE_VPP(map);
1255 			mutex_unlock(&chip->mutex);
1256 			return -EIO;
1257 		}
1258 
1259 		/* Latency issues. Drop the unlock, wait a while and retry */
1260 		mutex_unlock(&chip->mutex);
1261 		cfi_udelay(1);
1262 		mutex_lock(&chip->mutex);
1263 	}
1264 
1265 	/* Done and happy. */
1266 	chip->state = FL_STATUS;
1267 	DISABLE_VPP(map);
1268 	wake_up(&chip->wq);
1269 	mutex_unlock(&chip->mutex);
1270 	return 0;
1271 }
cfi_staa_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1272 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1273 {
1274 	struct map_info *map = mtd->priv;
1275 	struct cfi_private *cfi = map->fldrv_priv;
1276 	unsigned long adr;
1277 	int chipnum, ret;
1278 #ifdef DEBUG_LOCK_BITS
1279 	int ofs_factor = cfi->interleave * cfi->device_type;
1280 #endif
1281 
1282 	chipnum = ofs >> cfi->chipshift;
1283 	adr = ofs - (chipnum << cfi->chipshift);
1284 
1285 #ifdef DEBUG_LOCK_BITS
1286 	{
1287 		unsigned long temp_adr = adr;
1288 		unsigned long temp_len = len;
1289 
1290 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1291                 while (temp_len) {
1292 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1293 			temp_adr += mtd->erasesize;
1294 			temp_len -= mtd->erasesize;
1295 		}
1296 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1297 	}
1298 #endif
1299 
1300 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1301 
1302 #ifdef DEBUG_LOCK_BITS
1303 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1304 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1305 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1306 #endif
1307 
1308 	return ret;
1309 }
1310 
cfi_staa_suspend(struct mtd_info * mtd)1311 static int cfi_staa_suspend(struct mtd_info *mtd)
1312 {
1313 	struct map_info *map = mtd->priv;
1314 	struct cfi_private *cfi = map->fldrv_priv;
1315 	int i;
1316 	struct flchip *chip;
1317 	int ret = 0;
1318 
1319 	for (i=0; !ret && i<cfi->numchips; i++) {
1320 		chip = &cfi->chips[i];
1321 
1322 		mutex_lock(&chip->mutex);
1323 
1324 		switch(chip->state) {
1325 		case FL_READY:
1326 		case FL_STATUS:
1327 		case FL_CFI_QUERY:
1328 		case FL_JEDEC_QUERY:
1329 			chip->oldstate = chip->state;
1330 			chip->state = FL_PM_SUSPENDED;
1331 			/* No need to wake_up() on this state change -
1332 			 * as the whole point is that nobody can do anything
1333 			 * with the chip now anyway.
1334 			 */
1335 			break;
1336 
1337 		case FL_PM_SUSPENDED:
1338 			break;
1339 
1340 		default:
1341 			ret = -EAGAIN;
1342 			break;
1343 		}
1344 		mutex_unlock(&chip->mutex);
1345 	}
1346 
1347 	/* Unlock the chips again */
1348 
1349 	if (ret) {
1350 		for (i--; i >=0; i--) {
1351 			chip = &cfi->chips[i];
1352 
1353 			mutex_lock(&chip->mutex);
1354 
1355 			if (chip->state == FL_PM_SUSPENDED) {
1356 				/* No need to force it into a known state here,
1357 				   because we're returning failure, and it didn't
1358 				   get power cycled */
1359 				chip->state = chip->oldstate;
1360 				wake_up(&chip->wq);
1361 			}
1362 			mutex_unlock(&chip->mutex);
1363 		}
1364 	}
1365 
1366 	return ret;
1367 }
1368 
cfi_staa_resume(struct mtd_info * mtd)1369 static void cfi_staa_resume(struct mtd_info *mtd)
1370 {
1371 	struct map_info *map = mtd->priv;
1372 	struct cfi_private *cfi = map->fldrv_priv;
1373 	int i;
1374 	struct flchip *chip;
1375 
1376 	for (i=0; i<cfi->numchips; i++) {
1377 
1378 		chip = &cfi->chips[i];
1379 
1380 		mutex_lock(&chip->mutex);
1381 
1382 		/* Go to known state. Chip may have been power cycled */
1383 		if (chip->state == FL_PM_SUSPENDED) {
1384 			map_write(map, CMD(0xFF), 0);
1385 			chip->state = FL_READY;
1386 			wake_up(&chip->wq);
1387 		}
1388 
1389 		mutex_unlock(&chip->mutex);
1390 	}
1391 }
1392 
cfi_staa_destroy(struct mtd_info * mtd)1393 static void cfi_staa_destroy(struct mtd_info *mtd)
1394 {
1395 	struct map_info *map = mtd->priv;
1396 	struct cfi_private *cfi = map->fldrv_priv;
1397 	kfree(cfi->cmdset_priv);
1398 	kfree(cfi);
1399 }
1400 
1401 MODULE_LICENSE("GPL");
1402