1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
9  * 	- completely revamped method functions so they are aware and
10  * 	  independent of the flash geometry (buswidth, interleave, etc.)
11  * 	- scalability vs code size is completely set at compile-time
12  * 	  (see include/linux/mtd/cfi.h for selection)
13  *	- optimized write buffer method
14  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *	- reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  * 	- auto unlock sectors on resume for auto locking flash on power up
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26 
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37 
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40 
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43 
44 /* Intel chips */
45 #define I82802AB	0x00ad
46 #define I82802AC	0x00ac
47 #define PF38F4476	0x881c
48 #define M28F00AP30	0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A	0x0080
52 #define M50FLW080B	0x0081
53 /* Atmel chips */
54 #define AT49BV640D	0x02de
55 #define AT49BV640DT	0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90	0x00b0
58 #define LH28F640BFHE_PBTL90	0x00b1
59 #define LH28F640BFHE_PTTL70A	0x00b2
60 #define LH28F640BFHE_PBTL70A	0x00b3
61 
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71 				  uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
76 					    size_t *, const u_char *);
77 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
78 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
79 					   size_t *, struct otp_info *);
80 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
81 					   size_t *, struct otp_info *);
82 #endif
83 static int cfi_intelext_suspend (struct mtd_info *);
84 static void cfi_intelext_resume (struct mtd_info *);
85 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
86 
87 static void cfi_intelext_destroy(struct mtd_info *);
88 
89 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
90 
91 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
92 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
93 
94 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
95 		     size_t *retlen, void **virt, resource_size_t *phys);
96 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
97 
98 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
101 #include "fwh_lock.h"
102 
103 
104 
105 /*
106  *  *********** SETUP AND PROBE BITS  ***********
107  */
108 
109 static struct mtd_chip_driver cfi_intelext_chipdrv = {
110 	.probe		= NULL, /* Not usable directly */
111 	.destroy	= cfi_intelext_destroy,
112 	.name		= "cfi_cmdset_0001",
113 	.module		= THIS_MODULE
114 };
115 
116 /* #define DEBUG_LOCK_BITS */
117 /* #define DEBUG_CFI_FEATURES */
118 
119 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_intelext * extp)120 static void cfi_tell_features(struct cfi_pri_intelext *extp)
121 {
122 	int i;
123 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
124 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
125 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
126 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
127 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
128 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
129 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
130 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
131 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
132 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
133 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
134 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
135 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
136 	for (i=11; i<32; i++) {
137 		if (extp->FeatureSupport & (1<<i))
138 			printk("     - Unknown Bit %X:      supported\n", i);
139 	}
140 
141 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
142 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
143 	for (i=1; i<8; i++) {
144 		if (extp->SuspendCmdSupport & (1<<i))
145 			printk("     - Unknown Bit %X:               supported\n", i);
146 	}
147 
148 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
149 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
150 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
151 	for (i=2; i<3; i++) {
152 		if (extp->BlkStatusRegMask & (1<<i))
153 			printk("     - Unknown Bit %X Active: yes\n",i);
154 	}
155 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
156 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
157 	for (i=6; i<16; i++) {
158 		if (extp->BlkStatusRegMask & (1<<i))
159 			printk("     - Unknown Bit %X Active: yes\n",i);
160 	}
161 
162 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
163 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
164 	if (extp->VppOptimal)
165 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
166 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
167 }
168 #endif
169 
170 /* Atmel chips don't use the same PRI format as Intel chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)171 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
172 {
173 	struct map_info *map = mtd->priv;
174 	struct cfi_private *cfi = map->fldrv_priv;
175 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
176 	struct cfi_pri_atmel atmel_pri;
177 	uint32_t features = 0;
178 
179 	/* Reverse byteswapping */
180 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
181 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
182 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
183 
184 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
185 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
186 
187 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
188 
189 	if (atmel_pri.Features & 0x01) /* chip erase supported */
190 		features |= (1<<0);
191 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
192 		features |= (1<<1);
193 	if (atmel_pri.Features & 0x04) /* program suspend supported */
194 		features |= (1<<2);
195 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
196 		features |= (1<<9);
197 	if (atmel_pri.Features & 0x20) /* page mode read supported */
198 		features |= (1<<7);
199 	if (atmel_pri.Features & 0x40) /* queued erase supported */
200 		features |= (1<<4);
201 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
202 		features |= (1<<6);
203 
204 	extp->FeatureSupport = features;
205 
206 	/* burst write mode not supported */
207 	cfi->cfiq->BufWriteTimeoutTyp = 0;
208 	cfi->cfiq->BufWriteTimeoutMax = 0;
209 }
210 
fixup_at49bv640dx_lock(struct mtd_info * mtd)211 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
212 {
213 	struct map_info *map = mtd->priv;
214 	struct cfi_private *cfi = map->fldrv_priv;
215 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
216 
217 	cfip->FeatureSupport |= (1 << 5);
218 	mtd->flags |= MTD_POWERUP_LOCK;
219 }
220 
221 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
222 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
fixup_intel_strataflash(struct mtd_info * mtd)223 static void fixup_intel_strataflash(struct mtd_info *mtd)
224 {
225 	struct map_info *map = mtd->priv;
226 	struct cfi_private *cfi = map->fldrv_priv;
227 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
228 
229 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
230 	                    "erase on write disabled.\n");
231 	extp->SuspendCmdSupport &= ~1;
232 }
233 #endif
234 
235 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
fixup_no_write_suspend(struct mtd_info * mtd)236 static void fixup_no_write_suspend(struct mtd_info *mtd)
237 {
238 	struct map_info *map = mtd->priv;
239 	struct cfi_private *cfi = map->fldrv_priv;
240 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
241 
242 	if (cfip && (cfip->FeatureSupport&4)) {
243 		cfip->FeatureSupport &= ~4;
244 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
245 	}
246 }
247 #endif
248 
fixup_st_m28w320ct(struct mtd_info * mtd)249 static void fixup_st_m28w320ct(struct mtd_info *mtd)
250 {
251 	struct map_info *map = mtd->priv;
252 	struct cfi_private *cfi = map->fldrv_priv;
253 
254 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
255 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
256 }
257 
fixup_st_m28w320cb(struct mtd_info * mtd)258 static void fixup_st_m28w320cb(struct mtd_info *mtd)
259 {
260 	struct map_info *map = mtd->priv;
261 	struct cfi_private *cfi = map->fldrv_priv;
262 
263 	/* Note this is done after the region info is endian swapped */
264 	cfi->cfiq->EraseRegionInfo[1] =
265 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
266 };
267 
is_LH28F640BF(struct cfi_private * cfi)268 static int is_LH28F640BF(struct cfi_private *cfi)
269 {
270 	/* Sharp LH28F640BF Family */
271 	if (cfi->mfr == CFI_MFR_SHARP && (
272 	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
273 	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
274 		return 1;
275 	return 0;
276 }
277 
fixup_LH28F640BF(struct mtd_info * mtd)278 static void fixup_LH28F640BF(struct mtd_info *mtd)
279 {
280 	struct map_info *map = mtd->priv;
281 	struct cfi_private *cfi = map->fldrv_priv;
282 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
283 
284 	/* Reset the Partition Configuration Register on LH28F640BF
285 	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
286 	if (is_LH28F640BF(cfi)) {
287 		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
288 		map_write(map, CMD(0x60), 0);
289 		map_write(map, CMD(0x04), 0);
290 
291 		/* We have set one single partition thus
292 		 * Simultaneous Operations are not allowed */
293 		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
294 		extp->FeatureSupport &= ~512;
295 	}
296 }
297 
fixup_use_point(struct mtd_info * mtd)298 static void fixup_use_point(struct mtd_info *mtd)
299 {
300 	struct map_info *map = mtd->priv;
301 	if (!mtd->_point && map_is_linear(map)) {
302 		mtd->_point   = cfi_intelext_point;
303 		mtd->_unpoint = cfi_intelext_unpoint;
304 	}
305 }
306 
fixup_use_write_buffers(struct mtd_info * mtd)307 static void fixup_use_write_buffers(struct mtd_info *mtd)
308 {
309 	struct map_info *map = mtd->priv;
310 	struct cfi_private *cfi = map->fldrv_priv;
311 	if (cfi->cfiq->BufWriteTimeoutTyp) {
312 		printk(KERN_INFO "Using buffer write method\n" );
313 		mtd->_write = cfi_intelext_write_buffers;
314 		mtd->_writev = cfi_intelext_writev;
315 	}
316 }
317 
318 /*
319  * Some chips power-up with all sectors locked by default.
320  */
fixup_unlock_powerup_lock(struct mtd_info * mtd)321 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
322 {
323 	struct map_info *map = mtd->priv;
324 	struct cfi_private *cfi = map->fldrv_priv;
325 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
326 
327 	if (cfip->FeatureSupport&32) {
328 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
329 		mtd->flags |= MTD_POWERUP_LOCK;
330 	}
331 }
332 
333 static struct cfi_fixup cfi_fixup_table[] = {
334 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
335 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
336 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
337 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
338 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
339 #endif
340 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
341 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
342 #endif
343 #if !FORCE_WORD_WRITE
344 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
345 #endif
346 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
347 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
348 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
351 	{ 0, 0, NULL }
352 };
353 
354 static struct cfi_fixup jedec_fixup_table[] = {
355 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
356 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
357 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
358 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
359 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
360 	{ 0, 0, NULL }
361 };
362 static struct cfi_fixup fixup_table[] = {
363 	/* The CFI vendor ids and the JEDEC vendor IDs appear
364 	 * to be common.  It is like the devices id's are as
365 	 * well.  This table is to pick all cases where
366 	 * we know that is the case.
367 	 */
368 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
369 	{ 0, 0, NULL }
370 };
371 
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_intelext * extp)372 static void cfi_fixup_major_minor(struct cfi_private *cfi,
373 						struct cfi_pri_intelext *extp)
374 {
375 	if (cfi->mfr == CFI_MFR_INTEL &&
376 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
377 		extp->MinorVersion = '1';
378 }
379 
cfi_is_micron_28F00AP30(struct cfi_private * cfi,struct flchip * chip)380 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
381 {
382 	/*
383 	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
384 	 * Erase Supend for their small Erase Blocks(0x8000)
385 	 */
386 	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
387 		return 1;
388 	return 0;
389 }
390 
391 static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info * map,__u16 adr)392 read_pri_intelext(struct map_info *map, __u16 adr)
393 {
394 	struct cfi_private *cfi = map->fldrv_priv;
395 	struct cfi_pri_intelext *extp;
396 	unsigned int extra_size = 0;
397 	unsigned int extp_size = sizeof(*extp);
398 
399  again:
400 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
401 	if (!extp)
402 		return NULL;
403 
404 	cfi_fixup_major_minor(cfi, extp);
405 
406 	if (extp->MajorVersion != '1' ||
407 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
408 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
409 		       "version %c.%c.\n",  extp->MajorVersion,
410 		       extp->MinorVersion);
411 		kfree(extp);
412 		return NULL;
413 	}
414 
415 	/* Do some byteswapping if necessary */
416 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
417 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
418 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
419 
420 	if (extp->MinorVersion >= '0') {
421 		extra_size = 0;
422 
423 		/* Protection Register info */
424 		if (extp->NumProtectionFields)
425 			extra_size += (extp->NumProtectionFields - 1) *
426 				      sizeof(struct cfi_intelext_otpinfo);
427 	}
428 
429 	if (extp->MinorVersion >= '1') {
430 		/* Burst Read info */
431 		extra_size += 2;
432 		if (extp_size < sizeof(*extp) + extra_size)
433 			goto need_more;
434 		extra_size += extp->extra[extra_size - 1];
435 	}
436 
437 	if (extp->MinorVersion >= '3') {
438 		int nb_parts, i;
439 
440 		/* Number of hardware-partitions */
441 		extra_size += 1;
442 		if (extp_size < sizeof(*extp) + extra_size)
443 			goto need_more;
444 		nb_parts = extp->extra[extra_size - 1];
445 
446 		/* skip the sizeof(partregion) field in CFI 1.4 */
447 		if (extp->MinorVersion >= '4')
448 			extra_size += 2;
449 
450 		for (i = 0; i < nb_parts; i++) {
451 			struct cfi_intelext_regioninfo *rinfo;
452 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
453 			extra_size += sizeof(*rinfo);
454 			if (extp_size < sizeof(*extp) + extra_size)
455 				goto need_more;
456 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
457 			extra_size += (rinfo->NumBlockTypes - 1)
458 				      * sizeof(struct cfi_intelext_blockinfo);
459 		}
460 
461 		if (extp->MinorVersion >= '4')
462 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
463 
464 		if (extp_size < sizeof(*extp) + extra_size) {
465 			need_more:
466 			extp_size = sizeof(*extp) + extra_size;
467 			kfree(extp);
468 			if (extp_size > 4096) {
469 				printk(KERN_ERR
470 					"%s: cfi_pri_intelext is too fat\n",
471 					__func__);
472 				return NULL;
473 			}
474 			goto again;
475 		}
476 	}
477 
478 	return extp;
479 }
480 
cfi_cmdset_0001(struct map_info * map,int primary)481 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
482 {
483 	struct cfi_private *cfi = map->fldrv_priv;
484 	struct mtd_info *mtd;
485 	int i;
486 
487 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
488 	if (!mtd)
489 		return NULL;
490 	mtd->priv = map;
491 	mtd->type = MTD_NORFLASH;
492 
493 	/* Fill in the default mtd operations */
494 	mtd->_erase   = cfi_intelext_erase_varsize;
495 	mtd->_read    = cfi_intelext_read;
496 	mtd->_write   = cfi_intelext_write_words;
497 	mtd->_sync    = cfi_intelext_sync;
498 	mtd->_lock    = cfi_intelext_lock;
499 	mtd->_unlock  = cfi_intelext_unlock;
500 	mtd->_is_locked = cfi_intelext_is_locked;
501 	mtd->_suspend = cfi_intelext_suspend;
502 	mtd->_resume  = cfi_intelext_resume;
503 	mtd->flags   = MTD_CAP_NORFLASH;
504 	mtd->name    = map->name;
505 	mtd->writesize = 1;
506 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
507 
508 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
509 
510 	if (cfi->cfi_mode == CFI_MODE_CFI) {
511 		/*
512 		 * It's a real CFI chip, not one for which the probe
513 		 * routine faked a CFI structure. So we read the feature
514 		 * table from it.
515 		 */
516 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
517 		struct cfi_pri_intelext *extp;
518 
519 		extp = read_pri_intelext(map, adr);
520 		if (!extp) {
521 			kfree(mtd);
522 			return NULL;
523 		}
524 
525 		/* Install our own private info structure */
526 		cfi->cmdset_priv = extp;
527 
528 		cfi_fixup(mtd, cfi_fixup_table);
529 
530 #ifdef DEBUG_CFI_FEATURES
531 		/* Tell the user about it in lots of lovely detail */
532 		cfi_tell_features(extp);
533 #endif
534 
535 		if(extp->SuspendCmdSupport & 1) {
536 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
537 		}
538 	}
539 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
540 		/* Apply jedec specific fixups */
541 		cfi_fixup(mtd, jedec_fixup_table);
542 	}
543 	/* Apply generic fixups */
544 	cfi_fixup(mtd, fixup_table);
545 
546 	for (i=0; i< cfi->numchips; i++) {
547 		if (cfi->cfiq->WordWriteTimeoutTyp)
548 			cfi->chips[i].word_write_time =
549 				1<<cfi->cfiq->WordWriteTimeoutTyp;
550 		else
551 			cfi->chips[i].word_write_time = 50000;
552 
553 		if (cfi->cfiq->BufWriteTimeoutTyp)
554 			cfi->chips[i].buffer_write_time =
555 				1<<cfi->cfiq->BufWriteTimeoutTyp;
556 		/* No default; if it isn't specified, we won't use it */
557 
558 		if (cfi->cfiq->BlockEraseTimeoutTyp)
559 			cfi->chips[i].erase_time =
560 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
561 		else
562 			cfi->chips[i].erase_time = 2000000;
563 
564 		if (cfi->cfiq->WordWriteTimeoutTyp &&
565 		    cfi->cfiq->WordWriteTimeoutMax)
566 			cfi->chips[i].word_write_time_max =
567 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
568 				    cfi->cfiq->WordWriteTimeoutMax);
569 		else
570 			cfi->chips[i].word_write_time_max = 50000 * 8;
571 
572 		if (cfi->cfiq->BufWriteTimeoutTyp &&
573 		    cfi->cfiq->BufWriteTimeoutMax)
574 			cfi->chips[i].buffer_write_time_max =
575 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
576 				    cfi->cfiq->BufWriteTimeoutMax);
577 
578 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
579 		    cfi->cfiq->BlockEraseTimeoutMax)
580 			cfi->chips[i].erase_time_max =
581 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
582 				       cfi->cfiq->BlockEraseTimeoutMax);
583 		else
584 			cfi->chips[i].erase_time_max = 2000000 * 8;
585 
586 		cfi->chips[i].ref_point_counter = 0;
587 		init_waitqueue_head(&(cfi->chips[i].wq));
588 	}
589 
590 	map->fldrv = &cfi_intelext_chipdrv;
591 
592 	return cfi_intelext_setup(mtd);
593 }
594 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
595 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
597 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
598 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
599 
cfi_intelext_setup(struct mtd_info * mtd)600 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
601 {
602 	struct map_info *map = mtd->priv;
603 	struct cfi_private *cfi = map->fldrv_priv;
604 	unsigned long offset = 0;
605 	int i,j;
606 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
607 
608 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
609 
610 	mtd->size = devsize * cfi->numchips;
611 
612 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
613 	mtd->eraseregions = kcalloc(mtd->numeraseregions,
614 				    sizeof(struct mtd_erase_region_info),
615 				    GFP_KERNEL);
616 	if (!mtd->eraseregions)
617 		goto setup_err;
618 
619 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
620 		unsigned long ernum, ersize;
621 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
622 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
623 
624 		if (mtd->erasesize < ersize) {
625 			mtd->erasesize = ersize;
626 		}
627 		for (j=0; j<cfi->numchips; j++) {
628 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
629 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
630 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
631 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
632 			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
633 				goto setup_err;
634 		}
635 		offset += (ersize * ernum);
636 	}
637 
638 	if (offset != devsize) {
639 		/* Argh */
640 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
641 		goto setup_err;
642 	}
643 
644 	for (i=0; i<mtd->numeraseregions;i++){
645 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
646 		       i,(unsigned long long)mtd->eraseregions[i].offset,
647 		       mtd->eraseregions[i].erasesize,
648 		       mtd->eraseregions[i].numblocks);
649 	}
650 
651 #ifdef CONFIG_MTD_OTP
652 	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
653 	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
654 	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
655 	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
656 	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
657 	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
658 #endif
659 
660 	/* This function has the potential to distort the reality
661 	   a bit and therefore should be called last. */
662 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
663 		goto setup_err;
664 
665 	__module_get(THIS_MODULE);
666 	register_reboot_notifier(&mtd->reboot_notifier);
667 	return mtd;
668 
669  setup_err:
670 	if (mtd->eraseregions)
671 		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
672 			for (j=0; j<cfi->numchips; j++)
673 				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
674 	kfree(mtd->eraseregions);
675 	kfree(mtd);
676 	kfree(cfi->cmdset_priv);
677 	return NULL;
678 }
679 
cfi_intelext_partition_fixup(struct mtd_info * mtd,struct cfi_private ** pcfi)680 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
681 					struct cfi_private **pcfi)
682 {
683 	struct map_info *map = mtd->priv;
684 	struct cfi_private *cfi = *pcfi;
685 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
686 
687 	/*
688 	 * Probing of multi-partition flash chips.
689 	 *
690 	 * To support multiple partitions when available, we simply arrange
691 	 * for each of them to have their own flchip structure even if they
692 	 * are on the same physical chip.  This means completely recreating
693 	 * a new cfi_private structure right here which is a blatent code
694 	 * layering violation, but this is still the least intrusive
695 	 * arrangement at this point. This can be rearranged in the future
696 	 * if someone feels motivated enough.  --nico
697 	 */
698 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
699 	    && extp->FeatureSupport & (1 << 9)) {
700 		int offs = 0;
701 		struct cfi_private *newcfi;
702 		struct flchip *chip;
703 		struct flchip_shared *shared;
704 		int numregions, numparts, partshift, numvirtchips, i, j;
705 
706 		/* Protection Register info */
707 		if (extp->NumProtectionFields)
708 			offs = (extp->NumProtectionFields - 1) *
709 			       sizeof(struct cfi_intelext_otpinfo);
710 
711 		/* Burst Read info */
712 		offs += extp->extra[offs+1]+2;
713 
714 		/* Number of partition regions */
715 		numregions = extp->extra[offs];
716 		offs += 1;
717 
718 		/* skip the sizeof(partregion) field in CFI 1.4 */
719 		if (extp->MinorVersion >= '4')
720 			offs += 2;
721 
722 		/* Number of hardware partitions */
723 		numparts = 0;
724 		for (i = 0; i < numregions; i++) {
725 			struct cfi_intelext_regioninfo *rinfo;
726 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
727 			numparts += rinfo->NumIdentPartitions;
728 			offs += sizeof(*rinfo)
729 				+ (rinfo->NumBlockTypes - 1) *
730 				  sizeof(struct cfi_intelext_blockinfo);
731 		}
732 
733 		if (!numparts)
734 			numparts = 1;
735 
736 		/* Programming Region info */
737 		if (extp->MinorVersion >= '4') {
738 			struct cfi_intelext_programming_regioninfo *prinfo;
739 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
740 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
741 			mtd->flags &= ~MTD_BIT_WRITEABLE;
742 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
743 			       map->name, mtd->writesize,
744 			       cfi->interleave * prinfo->ControlValid,
745 			       cfi->interleave * prinfo->ControlInvalid);
746 		}
747 
748 		/*
749 		 * All functions below currently rely on all chips having
750 		 * the same geometry so we'll just assume that all hardware
751 		 * partitions are of the same size too.
752 		 */
753 		partshift = cfi->chipshift - __ffs(numparts);
754 
755 		if ((1 << partshift) < mtd->erasesize) {
756 			printk( KERN_ERR
757 				"%s: bad number of hw partitions (%d)\n",
758 				__func__, numparts);
759 			return -EINVAL;
760 		}
761 
762 		numvirtchips = cfi->numchips * numparts;
763 		newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
764 				 GFP_KERNEL);
765 		if (!newcfi)
766 			return -ENOMEM;
767 		shared = kmalloc_array(cfi->numchips,
768 				       sizeof(struct flchip_shared),
769 				       GFP_KERNEL);
770 		if (!shared) {
771 			kfree(newcfi);
772 			return -ENOMEM;
773 		}
774 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
775 		newcfi->numchips = numvirtchips;
776 		newcfi->chipshift = partshift;
777 
778 		chip = &newcfi->chips[0];
779 		for (i = 0; i < cfi->numchips; i++) {
780 			shared[i].writing = shared[i].erasing = NULL;
781 			mutex_init(&shared[i].lock);
782 			for (j = 0; j < numparts; j++) {
783 				*chip = cfi->chips[i];
784 				chip->start += j << partshift;
785 				chip->priv = &shared[i];
786 				/* those should be reset too since
787 				   they create memory references. */
788 				init_waitqueue_head(&chip->wq);
789 				mutex_init(&chip->mutex);
790 				chip++;
791 			}
792 		}
793 
794 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
795 				  "--> %d partitions of %d KiB\n",
796 				  map->name, cfi->numchips, cfi->interleave,
797 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
798 
799 		map->fldrv_priv = newcfi;
800 		*pcfi = newcfi;
801 		kfree(cfi);
802 	}
803 
804 	return 0;
805 }
806 
807 /*
808  *  *********** CHIP ACCESS FUNCTIONS ***********
809  */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)810 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
811 {
812 	DECLARE_WAITQUEUE(wait, current);
813 	struct cfi_private *cfi = map->fldrv_priv;
814 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
815 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
816 	unsigned long timeo = jiffies + HZ;
817 
818 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
819 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
820 		goto sleep;
821 
822 	switch (chip->state) {
823 
824 	case FL_STATUS:
825 		for (;;) {
826 			status = map_read(map, adr);
827 			if (map_word_andequal(map, status, status_OK, status_OK))
828 				break;
829 
830 			/* At this point we're fine with write operations
831 			   in other partitions as they don't conflict. */
832 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
833 				break;
834 
835 			mutex_unlock(&chip->mutex);
836 			cfi_udelay(1);
837 			mutex_lock(&chip->mutex);
838 			/* Someone else might have been playing with it. */
839 			return -EAGAIN;
840 		}
841 		fallthrough;
842 	case FL_READY:
843 	case FL_CFI_QUERY:
844 	case FL_JEDEC_QUERY:
845 		return 0;
846 
847 	case FL_ERASING:
848 		if (!cfip ||
849 		    !(cfip->FeatureSupport & 2) ||
850 		    !(mode == FL_READY || mode == FL_POINT ||
851 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
852 			goto sleep;
853 
854 		/* Do not allow suspend iff read/write to EB address */
855 		if ((adr & chip->in_progress_block_mask) ==
856 		    chip->in_progress_block_addr)
857 			goto sleep;
858 
859 		/* do not suspend small EBs, buggy Micron Chips */
860 		if (cfi_is_micron_28F00AP30(cfi, chip) &&
861 		    (chip->in_progress_block_mask == ~(0x8000-1)))
862 			goto sleep;
863 
864 		/* Erase suspend */
865 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
866 
867 		/* If the flash has finished erasing, then 'erase suspend'
868 		 * appears to make some (28F320) flash devices switch to
869 		 * 'read' mode.  Make sure that we switch to 'read status'
870 		 * mode so we get the right data. --rmk
871 		 */
872 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
873 		chip->oldstate = FL_ERASING;
874 		chip->state = FL_ERASE_SUSPENDING;
875 		chip->erase_suspended = 1;
876 		for (;;) {
877 			status = map_read(map, chip->in_progress_block_addr);
878 			if (map_word_andequal(map, status, status_OK, status_OK))
879 			        break;
880 
881 			if (time_after(jiffies, timeo)) {
882 				/* Urgh. Resume and pretend we weren't here.
883 				 * Make sure we're in 'read status' mode if it had finished */
884 				put_chip(map, chip, adr);
885 				printk(KERN_ERR "%s: Chip not ready after erase "
886 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
887 				return -EIO;
888 			}
889 
890 			mutex_unlock(&chip->mutex);
891 			cfi_udelay(1);
892 			mutex_lock(&chip->mutex);
893 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
894 			   So we can just loop here. */
895 		}
896 		chip->state = FL_STATUS;
897 		return 0;
898 
899 	case FL_XIP_WHILE_ERASING:
900 		if (mode != FL_READY && mode != FL_POINT &&
901 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
902 			goto sleep;
903 		chip->oldstate = chip->state;
904 		chip->state = FL_READY;
905 		return 0;
906 
907 	case FL_SHUTDOWN:
908 		/* The machine is rebooting now,so no one can get chip anymore */
909 		return -EIO;
910 	case FL_POINT:
911 		/* Only if there's no operation suspended... */
912 		if (mode == FL_READY && chip->oldstate == FL_READY)
913 			return 0;
914 		fallthrough;
915 	default:
916 	sleep:
917 		set_current_state(TASK_UNINTERRUPTIBLE);
918 		add_wait_queue(&chip->wq, &wait);
919 		mutex_unlock(&chip->mutex);
920 		schedule();
921 		remove_wait_queue(&chip->wq, &wait);
922 		mutex_lock(&chip->mutex);
923 		return -EAGAIN;
924 	}
925 }
926 
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)927 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
928 {
929 	int ret;
930 	DECLARE_WAITQUEUE(wait, current);
931 
932  retry:
933 	if (chip->priv &&
934 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
935 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
936 		/*
937 		 * OK. We have possibility for contention on the write/erase
938 		 * operations which are global to the real chip and not per
939 		 * partition.  So let's fight it over in the partition which
940 		 * currently has authority on the operation.
941 		 *
942 		 * The rules are as follows:
943 		 *
944 		 * - any write operation must own shared->writing.
945 		 *
946 		 * - any erase operation must own _both_ shared->writing and
947 		 *   shared->erasing.
948 		 *
949 		 * - contention arbitration is handled in the owner's context.
950 		 *
951 		 * The 'shared' struct can be read and/or written only when
952 		 * its lock is taken.
953 		 */
954 		struct flchip_shared *shared = chip->priv;
955 		struct flchip *contender;
956 		mutex_lock(&shared->lock);
957 		contender = shared->writing;
958 		if (contender && contender != chip) {
959 			/*
960 			 * The engine to perform desired operation on this
961 			 * partition is already in use by someone else.
962 			 * Let's fight over it in the context of the chip
963 			 * currently using it.  If it is possible to suspend,
964 			 * that other partition will do just that, otherwise
965 			 * it'll happily send us to sleep.  In any case, when
966 			 * get_chip returns success we're clear to go ahead.
967 			 */
968 			ret = mutex_trylock(&contender->mutex);
969 			mutex_unlock(&shared->lock);
970 			if (!ret)
971 				goto retry;
972 			mutex_unlock(&chip->mutex);
973 			ret = chip_ready(map, contender, contender->start, mode);
974 			mutex_lock(&chip->mutex);
975 
976 			if (ret == -EAGAIN) {
977 				mutex_unlock(&contender->mutex);
978 				goto retry;
979 			}
980 			if (ret) {
981 				mutex_unlock(&contender->mutex);
982 				return ret;
983 			}
984 			mutex_lock(&shared->lock);
985 
986 			/* We should not own chip if it is already
987 			 * in FL_SYNCING state. Put contender and retry. */
988 			if (chip->state == FL_SYNCING) {
989 				put_chip(map, contender, contender->start);
990 				mutex_unlock(&contender->mutex);
991 				goto retry;
992 			}
993 			mutex_unlock(&contender->mutex);
994 		}
995 
996 		/* Check if we already have suspended erase
997 		 * on this chip. Sleep. */
998 		if (mode == FL_ERASING && shared->erasing
999 		    && shared->erasing->oldstate == FL_ERASING) {
1000 			mutex_unlock(&shared->lock);
1001 			set_current_state(TASK_UNINTERRUPTIBLE);
1002 			add_wait_queue(&chip->wq, &wait);
1003 			mutex_unlock(&chip->mutex);
1004 			schedule();
1005 			remove_wait_queue(&chip->wq, &wait);
1006 			mutex_lock(&chip->mutex);
1007 			goto retry;
1008 		}
1009 
1010 		/* We now own it */
1011 		shared->writing = chip;
1012 		if (mode == FL_ERASING)
1013 			shared->erasing = chip;
1014 		mutex_unlock(&shared->lock);
1015 	}
1016 	ret = chip_ready(map, chip, adr, mode);
1017 	if (ret == -EAGAIN)
1018 		goto retry;
1019 
1020 	return ret;
1021 }
1022 
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)1023 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1024 {
1025 	struct cfi_private *cfi = map->fldrv_priv;
1026 
1027 	if (chip->priv) {
1028 		struct flchip_shared *shared = chip->priv;
1029 		mutex_lock(&shared->lock);
1030 		if (shared->writing == chip && chip->oldstate == FL_READY) {
1031 			/* We own the ability to write, but we're done */
1032 			shared->writing = shared->erasing;
1033 			if (shared->writing && shared->writing != chip) {
1034 				/* give back ownership to who we loaned it from */
1035 				struct flchip *loaner = shared->writing;
1036 				mutex_lock(&loaner->mutex);
1037 				mutex_unlock(&shared->lock);
1038 				mutex_unlock(&chip->mutex);
1039 				put_chip(map, loaner, loaner->start);
1040 				mutex_lock(&chip->mutex);
1041 				mutex_unlock(&loaner->mutex);
1042 				wake_up(&chip->wq);
1043 				return;
1044 			}
1045 			shared->erasing = NULL;
1046 			shared->writing = NULL;
1047 		} else if (shared->erasing == chip && shared->writing != chip) {
1048 			/*
1049 			 * We own the ability to erase without the ability
1050 			 * to write, which means the erase was suspended
1051 			 * and some other partition is currently writing.
1052 			 * Don't let the switch below mess things up since
1053 			 * we don't have ownership to resume anything.
1054 			 */
1055 			mutex_unlock(&shared->lock);
1056 			wake_up(&chip->wq);
1057 			return;
1058 		}
1059 		mutex_unlock(&shared->lock);
1060 	}
1061 
1062 	switch(chip->oldstate) {
1063 	case FL_ERASING:
1064 		/* What if one interleaved chip has finished and the
1065 		   other hasn't? The old code would leave the finished
1066 		   one in READY mode. That's bad, and caused -EROFS
1067 		   errors to be returned from do_erase_oneblock because
1068 		   that's the only bit it checked for at the time.
1069 		   As the state machine appears to explicitly allow
1070 		   sending the 0x70 (Read Status) command to an erasing
1071 		   chip and expecting it to be ignored, that's what we
1072 		   do. */
1073 		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1074 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1075 		chip->oldstate = FL_READY;
1076 		chip->state = FL_ERASING;
1077 		break;
1078 
1079 	case FL_XIP_WHILE_ERASING:
1080 		chip->state = chip->oldstate;
1081 		chip->oldstate = FL_READY;
1082 		break;
1083 
1084 	case FL_READY:
1085 	case FL_STATUS:
1086 	case FL_JEDEC_QUERY:
1087 		break;
1088 	default:
1089 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1090 	}
1091 	wake_up(&chip->wq);
1092 }
1093 
1094 #ifdef CONFIG_MTD_XIP
1095 
1096 /*
1097  * No interrupt what so ever can be serviced while the flash isn't in array
1098  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1099  * enclosing any code path where the flash is known not to be in array mode.
1100  * And within a XIP disabled code path, only functions marked with __xipram
1101  * may be called and nothing else (it's a good thing to inspect generated
1102  * assembly to make sure inline functions were actually inlined and that gcc
1103  * didn't emit calls to its own support functions). Also configuring MTD CFI
1104  * support to a single buswidth and a single interleave is also recommended.
1105  */
1106 
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1107 static void xip_disable(struct map_info *map, struct flchip *chip,
1108 			unsigned long adr)
1109 {
1110 	/* TODO: chips with no XIP use should ignore and return */
1111 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1112 	local_irq_disable();
1113 }
1114 
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1115 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1116 				unsigned long adr)
1117 {
1118 	struct cfi_private *cfi = map->fldrv_priv;
1119 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1120 		map_write(map, CMD(0xff), adr);
1121 		chip->state = FL_READY;
1122 	}
1123 	(void) map_read(map, adr);
1124 	xip_iprefetch();
1125 	local_irq_enable();
1126 }
1127 
1128 /*
1129  * When a delay is required for the flash operation to complete, the
1130  * xip_wait_for_operation() function is polling for both the given timeout
1131  * and pending (but still masked) hardware interrupts.  Whenever there is an
1132  * interrupt pending then the flash erase or write operation is suspended,
1133  * array mode restored and interrupts unmasked.  Task scheduling might also
1134  * happen at that point.  The CPU eventually returns from the interrupt or
1135  * the call to schedule() and the suspended flash operation is resumed for
1136  * the remaining of the delay period.
1137  *
1138  * Warning: this function _will_ fool interrupt latency tracing tools.
1139  */
1140 
xip_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long adr,unsigned int chip_op_time_max)1141 static int __xipram xip_wait_for_operation(
1142 		struct map_info *map, struct flchip *chip,
1143 		unsigned long adr, unsigned int chip_op_time_max)
1144 {
1145 	struct cfi_private *cfi = map->fldrv_priv;
1146 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1147 	map_word status, OK = CMD(0x80);
1148 	unsigned long usec, suspended, start, done;
1149 	flstate_t oldstate, newstate;
1150 
1151        	start = xip_currtime();
1152 	usec = chip_op_time_max;
1153 	if (usec == 0)
1154 		usec = 500000;
1155 	done = 0;
1156 
1157 	do {
1158 		cpu_relax();
1159 		if (xip_irqpending() && cfip &&
1160 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1161 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1162 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1163 			/*
1164 			 * Let's suspend the erase or write operation when
1165 			 * supported.  Note that we currently don't try to
1166 			 * suspend interleaved chips if there is already
1167 			 * another operation suspended (imagine what happens
1168 			 * when one chip was already done with the current
1169 			 * operation while another chip suspended it, then
1170 			 * we resume the whole thing at once).  Yes, it
1171 			 * can happen!
1172 			 */
1173 			usec -= done;
1174 			map_write(map, CMD(0xb0), adr);
1175 			map_write(map, CMD(0x70), adr);
1176 			suspended = xip_currtime();
1177 			do {
1178 				if (xip_elapsed_since(suspended) > 100000) {
1179 					/*
1180 					 * The chip doesn't want to suspend
1181 					 * after waiting for 100 msecs.
1182 					 * This is a critical error but there
1183 					 * is not much we can do here.
1184 					 */
1185 					return -EIO;
1186 				}
1187 				status = map_read(map, adr);
1188 			} while (!map_word_andequal(map, status, OK, OK));
1189 
1190 			/* Suspend succeeded */
1191 			oldstate = chip->state;
1192 			if (oldstate == FL_ERASING) {
1193 				if (!map_word_bitsset(map, status, CMD(0x40)))
1194 					break;
1195 				newstate = FL_XIP_WHILE_ERASING;
1196 				chip->erase_suspended = 1;
1197 			} else {
1198 				if (!map_word_bitsset(map, status, CMD(0x04)))
1199 					break;
1200 				newstate = FL_XIP_WHILE_WRITING;
1201 				chip->write_suspended = 1;
1202 			}
1203 			chip->state = newstate;
1204 			map_write(map, CMD(0xff), adr);
1205 			(void) map_read(map, adr);
1206 			xip_iprefetch();
1207 			local_irq_enable();
1208 			mutex_unlock(&chip->mutex);
1209 			xip_iprefetch();
1210 			cond_resched();
1211 
1212 			/*
1213 			 * We're back.  However someone else might have
1214 			 * decided to go write to the chip if we are in
1215 			 * a suspended erase state.  If so let's wait
1216 			 * until it's done.
1217 			 */
1218 			mutex_lock(&chip->mutex);
1219 			while (chip->state != newstate) {
1220 				DECLARE_WAITQUEUE(wait, current);
1221 				set_current_state(TASK_UNINTERRUPTIBLE);
1222 				add_wait_queue(&chip->wq, &wait);
1223 				mutex_unlock(&chip->mutex);
1224 				schedule();
1225 				remove_wait_queue(&chip->wq, &wait);
1226 				mutex_lock(&chip->mutex);
1227 			}
1228 			/* Disallow XIP again */
1229 			local_irq_disable();
1230 
1231 			/* Resume the write or erase operation */
1232 			map_write(map, CMD(0xd0), adr);
1233 			map_write(map, CMD(0x70), adr);
1234 			chip->state = oldstate;
1235 			start = xip_currtime();
1236 		} else if (usec >= 1000000/HZ) {
1237 			/*
1238 			 * Try to save on CPU power when waiting delay
1239 			 * is at least a system timer tick period.
1240 			 * No need to be extremely accurate here.
1241 			 */
1242 			xip_cpu_idle();
1243 		}
1244 		status = map_read(map, adr);
1245 		done = xip_elapsed_since(start);
1246 	} while (!map_word_andequal(map, status, OK, OK)
1247 		 && done < usec);
1248 
1249 	return (done >= usec) ? -ETIME : 0;
1250 }
1251 
1252 /*
1253  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1254  * the flash is actively programming or erasing since we have to poll for
1255  * the operation to complete anyway.  We can't do that in a generic way with
1256  * a XIP setup so do it before the actual flash operation in this case
1257  * and stub it out from INVAL_CACHE_AND_WAIT.
1258  */
1259 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1260 	INVALIDATE_CACHED_RANGE(map, from, size)
1261 
1262 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1263 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1264 
1265 #else
1266 
1267 #define xip_disable(map, chip, adr)
1268 #define xip_enable(map, chip, adr)
1269 #define XIP_INVAL_CACHED_RANGE(x...)
1270 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1271 
inval_cache_and_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long cmd_adr,unsigned long inval_adr,int inval_len,unsigned int chip_op_time,unsigned int chip_op_time_max)1272 static int inval_cache_and_wait_for_operation(
1273 		struct map_info *map, struct flchip *chip,
1274 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1275 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1276 {
1277 	struct cfi_private *cfi = map->fldrv_priv;
1278 	map_word status, status_OK = CMD(0x80);
1279 	int chip_state = chip->state;
1280 	unsigned int timeo, sleep_time, reset_timeo;
1281 
1282 	mutex_unlock(&chip->mutex);
1283 	if (inval_len)
1284 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1285 	mutex_lock(&chip->mutex);
1286 
1287 	timeo = chip_op_time_max;
1288 	if (!timeo)
1289 		timeo = 500000;
1290 	reset_timeo = timeo;
1291 	sleep_time = chip_op_time / 2;
1292 
1293 	for (;;) {
1294 		if (chip->state != chip_state) {
1295 			/* Someone's suspended the operation: sleep */
1296 			DECLARE_WAITQUEUE(wait, current);
1297 			set_current_state(TASK_UNINTERRUPTIBLE);
1298 			add_wait_queue(&chip->wq, &wait);
1299 			mutex_unlock(&chip->mutex);
1300 			schedule();
1301 			remove_wait_queue(&chip->wq, &wait);
1302 			mutex_lock(&chip->mutex);
1303 			continue;
1304 		}
1305 
1306 		status = map_read(map, cmd_adr);
1307 		if (map_word_andequal(map, status, status_OK, status_OK))
1308 			break;
1309 
1310 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1311 			/* Erase suspend occurred while sleep: reset timeout */
1312 			timeo = reset_timeo;
1313 			chip->erase_suspended = 0;
1314 		}
1315 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1316 			/* Write suspend occurred while sleep: reset timeout */
1317 			timeo = reset_timeo;
1318 			chip->write_suspended = 0;
1319 		}
1320 		if (!timeo) {
1321 			map_write(map, CMD(0x70), cmd_adr);
1322 			chip->state = FL_STATUS;
1323 			return -ETIME;
1324 		}
1325 
1326 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1327 		mutex_unlock(&chip->mutex);
1328 		if (sleep_time >= 1000000/HZ) {
1329 			/*
1330 			 * Half of the normal delay still remaining
1331 			 * can be performed with a sleeping delay instead
1332 			 * of busy waiting.
1333 			 */
1334 			msleep(sleep_time/1000);
1335 			timeo -= sleep_time;
1336 			sleep_time = 1000000/HZ;
1337 		} else {
1338 			udelay(1);
1339 			cond_resched();
1340 			timeo--;
1341 		}
1342 		mutex_lock(&chip->mutex);
1343 	}
1344 
1345 	/* Done and happy. */
1346  	chip->state = FL_STATUS;
1347 	return 0;
1348 }
1349 
1350 #endif
1351 
1352 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1353 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1354 
1355 
do_point_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1356 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1357 {
1358 	unsigned long cmd_addr;
1359 	struct cfi_private *cfi = map->fldrv_priv;
1360 	int ret;
1361 
1362 	adr += chip->start;
1363 
1364 	/* Ensure cmd read/writes are aligned. */
1365 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1366 
1367 	mutex_lock(&chip->mutex);
1368 
1369 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1370 
1371 	if (!ret) {
1372 		if (chip->state != FL_POINT && chip->state != FL_READY)
1373 			map_write(map, CMD(0xff), cmd_addr);
1374 
1375 		chip->state = FL_POINT;
1376 		chip->ref_point_counter++;
1377 	}
1378 	mutex_unlock(&chip->mutex);
1379 
1380 	return ret;
1381 }
1382 
cfi_intelext_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1383 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1384 		size_t *retlen, void **virt, resource_size_t *phys)
1385 {
1386 	struct map_info *map = mtd->priv;
1387 	struct cfi_private *cfi = map->fldrv_priv;
1388 	unsigned long ofs, last_end = 0;
1389 	int chipnum;
1390 	int ret;
1391 
1392 	if (!map->virt)
1393 		return -EINVAL;
1394 
1395 	/* Now lock the chip(s) to POINT state */
1396 
1397 	/* ofs: offset within the first chip that the first read should start */
1398 	chipnum = (from >> cfi->chipshift);
1399 	ofs = from - (chipnum << cfi->chipshift);
1400 
1401 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1402 	if (phys)
1403 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1404 
1405 	while (len) {
1406 		unsigned long thislen;
1407 
1408 		if (chipnum >= cfi->numchips)
1409 			break;
1410 
1411 		/* We cannot point across chips that are virtually disjoint */
1412 		if (!last_end)
1413 			last_end = cfi->chips[chipnum].start;
1414 		else if (cfi->chips[chipnum].start != last_end)
1415 			break;
1416 
1417 		if ((len + ofs -1) >> cfi->chipshift)
1418 			thislen = (1<<cfi->chipshift) - ofs;
1419 		else
1420 			thislen = len;
1421 
1422 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1423 		if (ret)
1424 			break;
1425 
1426 		*retlen += thislen;
1427 		len -= thislen;
1428 
1429 		ofs = 0;
1430 		last_end += 1 << cfi->chipshift;
1431 		chipnum++;
1432 	}
1433 	return 0;
1434 }
1435 
cfi_intelext_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1436 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1437 {
1438 	struct map_info *map = mtd->priv;
1439 	struct cfi_private *cfi = map->fldrv_priv;
1440 	unsigned long ofs;
1441 	int chipnum, err = 0;
1442 
1443 	/* Now unlock the chip(s) POINT state */
1444 
1445 	/* ofs: offset within the first chip that the first read should start */
1446 	chipnum = (from >> cfi->chipshift);
1447 	ofs = from - (chipnum <<  cfi->chipshift);
1448 
1449 	while (len && !err) {
1450 		unsigned long thislen;
1451 		struct flchip *chip;
1452 
1453 		chip = &cfi->chips[chipnum];
1454 		if (chipnum >= cfi->numchips)
1455 			break;
1456 
1457 		if ((len + ofs -1) >> cfi->chipshift)
1458 			thislen = (1<<cfi->chipshift) - ofs;
1459 		else
1460 			thislen = len;
1461 
1462 		mutex_lock(&chip->mutex);
1463 		if (chip->state == FL_POINT) {
1464 			chip->ref_point_counter--;
1465 			if(chip->ref_point_counter == 0)
1466 				chip->state = FL_READY;
1467 		} else {
1468 			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1469 			err = -EINVAL;
1470 		}
1471 
1472 		put_chip(map, chip, chip->start);
1473 		mutex_unlock(&chip->mutex);
1474 
1475 		len -= thislen;
1476 		ofs = 0;
1477 		chipnum++;
1478 	}
1479 
1480 	return err;
1481 }
1482 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1483 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1484 {
1485 	unsigned long cmd_addr;
1486 	struct cfi_private *cfi = map->fldrv_priv;
1487 	int ret;
1488 
1489 	adr += chip->start;
1490 
1491 	/* Ensure cmd read/writes are aligned. */
1492 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1493 
1494 	mutex_lock(&chip->mutex);
1495 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1496 	if (ret) {
1497 		mutex_unlock(&chip->mutex);
1498 		return ret;
1499 	}
1500 
1501 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1502 		map_write(map, CMD(0xff), cmd_addr);
1503 
1504 		chip->state = FL_READY;
1505 	}
1506 
1507 	map_copy_from(map, buf, adr, len);
1508 
1509 	put_chip(map, chip, cmd_addr);
1510 
1511 	mutex_unlock(&chip->mutex);
1512 	return 0;
1513 }
1514 
cfi_intelext_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1515 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1516 {
1517 	struct map_info *map = mtd->priv;
1518 	struct cfi_private *cfi = map->fldrv_priv;
1519 	unsigned long ofs;
1520 	int chipnum;
1521 	int ret = 0;
1522 
1523 	/* ofs: offset within the first chip that the first read should start */
1524 	chipnum = (from >> cfi->chipshift);
1525 	ofs = from - (chipnum <<  cfi->chipshift);
1526 
1527 	while (len) {
1528 		unsigned long thislen;
1529 
1530 		if (chipnum >= cfi->numchips)
1531 			break;
1532 
1533 		if ((len + ofs -1) >> cfi->chipshift)
1534 			thislen = (1<<cfi->chipshift) - ofs;
1535 		else
1536 			thislen = len;
1537 
1538 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1539 		if (ret)
1540 			break;
1541 
1542 		*retlen += thislen;
1543 		len -= thislen;
1544 		buf += thislen;
1545 
1546 		ofs = 0;
1547 		chipnum++;
1548 	}
1549 	return ret;
1550 }
1551 
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1552 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1553 				     unsigned long adr, map_word datum, int mode)
1554 {
1555 	struct cfi_private *cfi = map->fldrv_priv;
1556 	map_word status, write_cmd;
1557 	int ret;
1558 
1559 	adr += chip->start;
1560 
1561 	switch (mode) {
1562 	case FL_WRITING:
1563 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1564 		break;
1565 	case FL_OTP_WRITE:
1566 		write_cmd = CMD(0xc0);
1567 		break;
1568 	default:
1569 		return -EINVAL;
1570 	}
1571 
1572 	mutex_lock(&chip->mutex);
1573 	ret = get_chip(map, chip, adr, mode);
1574 	if (ret) {
1575 		mutex_unlock(&chip->mutex);
1576 		return ret;
1577 	}
1578 
1579 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1580 	ENABLE_VPP(map);
1581 	xip_disable(map, chip, adr);
1582 	map_write(map, write_cmd, adr);
1583 	map_write(map, datum, adr);
1584 	chip->state = mode;
1585 
1586 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1587 				   adr, map_bankwidth(map),
1588 				   chip->word_write_time,
1589 				   chip->word_write_time_max);
1590 	if (ret) {
1591 		xip_enable(map, chip, adr);
1592 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1593 		goto out;
1594 	}
1595 
1596 	/* check for errors */
1597 	status = map_read(map, adr);
1598 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1599 		unsigned long chipstatus = MERGESTATUS(status);
1600 
1601 		/* reset status */
1602 		map_write(map, CMD(0x50), adr);
1603 		map_write(map, CMD(0x70), adr);
1604 		xip_enable(map, chip, adr);
1605 
1606 		if (chipstatus & 0x02) {
1607 			ret = -EROFS;
1608 		} else if (chipstatus & 0x08) {
1609 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1610 			ret = -EIO;
1611 		} else {
1612 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1613 			ret = -EINVAL;
1614 		}
1615 
1616 		goto out;
1617 	}
1618 
1619 	xip_enable(map, chip, adr);
1620  out:	DISABLE_VPP(map);
1621 	put_chip(map, chip, adr);
1622 	mutex_unlock(&chip->mutex);
1623 	return ret;
1624 }
1625 
1626 
cfi_intelext_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1627 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1628 {
1629 	struct map_info *map = mtd->priv;
1630 	struct cfi_private *cfi = map->fldrv_priv;
1631 	int ret;
1632 	int chipnum;
1633 	unsigned long ofs;
1634 
1635 	chipnum = to >> cfi->chipshift;
1636 	ofs = to  - (chipnum << cfi->chipshift);
1637 
1638 	/* If it's not bus-aligned, do the first byte write */
1639 	if (ofs & (map_bankwidth(map)-1)) {
1640 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1641 		int gap = ofs - bus_ofs;
1642 		int n;
1643 		map_word datum;
1644 
1645 		n = min_t(int, len, map_bankwidth(map)-gap);
1646 		datum = map_word_ff(map);
1647 		datum = map_word_load_partial(map, datum, buf, gap, n);
1648 
1649 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1650 					       bus_ofs, datum, FL_WRITING);
1651 		if (ret)
1652 			return ret;
1653 
1654 		len -= n;
1655 		ofs += n;
1656 		buf += n;
1657 		(*retlen) += n;
1658 
1659 		if (ofs >> cfi->chipshift) {
1660 			chipnum ++;
1661 			ofs = 0;
1662 			if (chipnum == cfi->numchips)
1663 				return 0;
1664 		}
1665 	}
1666 
1667 	while(len >= map_bankwidth(map)) {
1668 		map_word datum = map_word_load(map, buf);
1669 
1670 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1671 				       ofs, datum, FL_WRITING);
1672 		if (ret)
1673 			return ret;
1674 
1675 		ofs += map_bankwidth(map);
1676 		buf += map_bankwidth(map);
1677 		(*retlen) += map_bankwidth(map);
1678 		len -= map_bankwidth(map);
1679 
1680 		if (ofs >> cfi->chipshift) {
1681 			chipnum ++;
1682 			ofs = 0;
1683 			if (chipnum == cfi->numchips)
1684 				return 0;
1685 		}
1686 	}
1687 
1688 	if (len & (map_bankwidth(map)-1)) {
1689 		map_word datum;
1690 
1691 		datum = map_word_ff(map);
1692 		datum = map_word_load_partial(map, datum, buf, 0, len);
1693 
1694 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1695 				       ofs, datum, FL_WRITING);
1696 		if (ret)
1697 			return ret;
1698 
1699 		(*retlen) += len;
1700 	}
1701 
1702 	return 0;
1703 }
1704 
1705 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)1706 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1707 				    unsigned long adr, const struct kvec **pvec,
1708 				    unsigned long *pvec_seek, int len)
1709 {
1710 	struct cfi_private *cfi = map->fldrv_priv;
1711 	map_word status, write_cmd, datum;
1712 	unsigned long cmd_adr;
1713 	int ret, wbufsize, word_gap, words;
1714 	const struct kvec *vec;
1715 	unsigned long vec_seek;
1716 	unsigned long initial_adr;
1717 	int initial_len = len;
1718 
1719 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1720 	adr += chip->start;
1721 	initial_adr = adr;
1722 	cmd_adr = adr & ~(wbufsize-1);
1723 
1724 	/* Sharp LH28F640BF chips need the first address for the
1725 	 * Page Buffer Program command. See Table 5 of
1726 	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1727 	if (is_LH28F640BF(cfi))
1728 		cmd_adr = adr;
1729 
1730 	/* Let's determine this according to the interleave only once */
1731 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1732 
1733 	mutex_lock(&chip->mutex);
1734 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1735 	if (ret) {
1736 		mutex_unlock(&chip->mutex);
1737 		return ret;
1738 	}
1739 
1740 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1741 	ENABLE_VPP(map);
1742 	xip_disable(map, chip, cmd_adr);
1743 
1744 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1745 	   [...], the device will not accept any more Write to Buffer commands".
1746 	   So we must check here and reset those bits if they're set. Otherwise
1747 	   we're just pissing in the wind */
1748 	if (chip->state != FL_STATUS) {
1749 		map_write(map, CMD(0x70), cmd_adr);
1750 		chip->state = FL_STATUS;
1751 	}
1752 	status = map_read(map, cmd_adr);
1753 	if (map_word_bitsset(map, status, CMD(0x30))) {
1754 		xip_enable(map, chip, cmd_adr);
1755 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1756 		xip_disable(map, chip, cmd_adr);
1757 		map_write(map, CMD(0x50), cmd_adr);
1758 		map_write(map, CMD(0x70), cmd_adr);
1759 	}
1760 
1761 	chip->state = FL_WRITING_TO_BUFFER;
1762 	map_write(map, write_cmd, cmd_adr);
1763 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1764 	if (ret) {
1765 		/* Argh. Not ready for write to buffer */
1766 		map_word Xstatus = map_read(map, cmd_adr);
1767 		map_write(map, CMD(0x70), cmd_adr);
1768 		chip->state = FL_STATUS;
1769 		status = map_read(map, cmd_adr);
1770 		map_write(map, CMD(0x50), cmd_adr);
1771 		map_write(map, CMD(0x70), cmd_adr);
1772 		xip_enable(map, chip, cmd_adr);
1773 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1774 				map->name, Xstatus.x[0], status.x[0]);
1775 		goto out;
1776 	}
1777 
1778 	/* Figure out the number of words to write */
1779 	word_gap = (-adr & (map_bankwidth(map)-1));
1780 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1781 	if (!word_gap) {
1782 		words--;
1783 	} else {
1784 		word_gap = map_bankwidth(map) - word_gap;
1785 		adr -= word_gap;
1786 		datum = map_word_ff(map);
1787 	}
1788 
1789 	/* Write length of data to come */
1790 	map_write(map, CMD(words), cmd_adr );
1791 
1792 	/* Write data */
1793 	vec = *pvec;
1794 	vec_seek = *pvec_seek;
1795 	do {
1796 		int n = map_bankwidth(map) - word_gap;
1797 		if (n > vec->iov_len - vec_seek)
1798 			n = vec->iov_len - vec_seek;
1799 		if (n > len)
1800 			n = len;
1801 
1802 		if (!word_gap && len < map_bankwidth(map))
1803 			datum = map_word_ff(map);
1804 
1805 		datum = map_word_load_partial(map, datum,
1806 					      vec->iov_base + vec_seek,
1807 					      word_gap, n);
1808 
1809 		len -= n;
1810 		word_gap += n;
1811 		if (!len || word_gap == map_bankwidth(map)) {
1812 			map_write(map, datum, adr);
1813 			adr += map_bankwidth(map);
1814 			word_gap = 0;
1815 		}
1816 
1817 		vec_seek += n;
1818 		if (vec_seek == vec->iov_len) {
1819 			vec++;
1820 			vec_seek = 0;
1821 		}
1822 	} while (len);
1823 	*pvec = vec;
1824 	*pvec_seek = vec_seek;
1825 
1826 	/* GO GO GO */
1827 	map_write(map, CMD(0xd0), cmd_adr);
1828 	chip->state = FL_WRITING;
1829 
1830 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1831 				   initial_adr, initial_len,
1832 				   chip->buffer_write_time,
1833 				   chip->buffer_write_time_max);
1834 	if (ret) {
1835 		map_write(map, CMD(0x70), cmd_adr);
1836 		chip->state = FL_STATUS;
1837 		xip_enable(map, chip, cmd_adr);
1838 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1839 		goto out;
1840 	}
1841 
1842 	/* check for errors */
1843 	status = map_read(map, cmd_adr);
1844 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1845 		unsigned long chipstatus = MERGESTATUS(status);
1846 
1847 		/* reset status */
1848 		map_write(map, CMD(0x50), cmd_adr);
1849 		map_write(map, CMD(0x70), cmd_adr);
1850 		xip_enable(map, chip, cmd_adr);
1851 
1852 		if (chipstatus & 0x02) {
1853 			ret = -EROFS;
1854 		} else if (chipstatus & 0x08) {
1855 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1856 			ret = -EIO;
1857 		} else {
1858 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1859 			ret = -EINVAL;
1860 		}
1861 
1862 		goto out;
1863 	}
1864 
1865 	xip_enable(map, chip, cmd_adr);
1866  out:	DISABLE_VPP(map);
1867 	put_chip(map, chip, cmd_adr);
1868 	mutex_unlock(&chip->mutex);
1869 	return ret;
1870 }
1871 
cfi_intelext_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)1872 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1873 				unsigned long count, loff_t to, size_t *retlen)
1874 {
1875 	struct map_info *map = mtd->priv;
1876 	struct cfi_private *cfi = map->fldrv_priv;
1877 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1878 	int ret;
1879 	int chipnum;
1880 	unsigned long ofs, vec_seek, i;
1881 	size_t len = 0;
1882 
1883 	for (i = 0; i < count; i++)
1884 		len += vecs[i].iov_len;
1885 
1886 	if (!len)
1887 		return 0;
1888 
1889 	chipnum = to >> cfi->chipshift;
1890 	ofs = to - (chipnum << cfi->chipshift);
1891 	vec_seek = 0;
1892 
1893 	do {
1894 		/* We must not cross write block boundaries */
1895 		int size = wbufsize - (ofs & (wbufsize-1));
1896 
1897 		if (size > len)
1898 			size = len;
1899 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1900 				      ofs, &vecs, &vec_seek, size);
1901 		if (ret)
1902 			return ret;
1903 
1904 		ofs += size;
1905 		(*retlen) += size;
1906 		len -= size;
1907 
1908 		if (ofs >> cfi->chipshift) {
1909 			chipnum ++;
1910 			ofs = 0;
1911 			if (chipnum == cfi->numchips)
1912 				return 0;
1913 		}
1914 
1915 		/* Be nice and reschedule with the chip in a usable state for other
1916 		   processes. */
1917 		cond_resched();
1918 
1919 	} while (len);
1920 
1921 	return 0;
1922 }
1923 
cfi_intelext_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1924 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1925 				       size_t len, size_t *retlen, const u_char *buf)
1926 {
1927 	struct kvec vec;
1928 
1929 	vec.iov_base = (void *) buf;
1930 	vec.iov_len = len;
1931 
1932 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1933 }
1934 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)1935 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1936 				      unsigned long adr, int len, void *thunk)
1937 {
1938 	struct cfi_private *cfi = map->fldrv_priv;
1939 	map_word status;
1940 	int retries = 3;
1941 	int ret;
1942 
1943 	adr += chip->start;
1944 
1945  retry:
1946 	mutex_lock(&chip->mutex);
1947 	ret = get_chip(map, chip, adr, FL_ERASING);
1948 	if (ret) {
1949 		mutex_unlock(&chip->mutex);
1950 		return ret;
1951 	}
1952 
1953 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1954 	ENABLE_VPP(map);
1955 	xip_disable(map, chip, adr);
1956 
1957 	/* Clear the status register first */
1958 	map_write(map, CMD(0x50), adr);
1959 
1960 	/* Now erase */
1961 	map_write(map, CMD(0x20), adr);
1962 	map_write(map, CMD(0xD0), adr);
1963 	chip->state = FL_ERASING;
1964 	chip->erase_suspended = 0;
1965 	chip->in_progress_block_addr = adr;
1966 	chip->in_progress_block_mask = ~(len - 1);
1967 
1968 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1969 				   adr, len,
1970 				   chip->erase_time,
1971 				   chip->erase_time_max);
1972 	if (ret) {
1973 		map_write(map, CMD(0x70), adr);
1974 		chip->state = FL_STATUS;
1975 		xip_enable(map, chip, adr);
1976 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1977 		goto out;
1978 	}
1979 
1980 	/* We've broken this before. It doesn't hurt to be safe */
1981 	map_write(map, CMD(0x70), adr);
1982 	chip->state = FL_STATUS;
1983 	status = map_read(map, adr);
1984 
1985 	/* check for errors */
1986 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1987 		unsigned long chipstatus = MERGESTATUS(status);
1988 
1989 		/* Reset the error bits */
1990 		map_write(map, CMD(0x50), adr);
1991 		map_write(map, CMD(0x70), adr);
1992 		xip_enable(map, chip, adr);
1993 
1994 		if ((chipstatus & 0x30) == 0x30) {
1995 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1996 			ret = -EINVAL;
1997 		} else if (chipstatus & 0x02) {
1998 			/* Protection bit set */
1999 			ret = -EROFS;
2000 		} else if (chipstatus & 0x8) {
2001 			/* Voltage */
2002 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2003 			ret = -EIO;
2004 		} else if (chipstatus & 0x20 && retries--) {
2005 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2006 			DISABLE_VPP(map);
2007 			put_chip(map, chip, adr);
2008 			mutex_unlock(&chip->mutex);
2009 			goto retry;
2010 		} else {
2011 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2012 			ret = -EIO;
2013 		}
2014 
2015 		goto out;
2016 	}
2017 
2018 	xip_enable(map, chip, adr);
2019  out:	DISABLE_VPP(map);
2020 	put_chip(map, chip, adr);
2021 	mutex_unlock(&chip->mutex);
2022 	return ret;
2023 }
2024 
cfi_intelext_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2025 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2026 {
2027 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2028 				instr->len, NULL);
2029 }
2030 
cfi_intelext_sync(struct mtd_info * mtd)2031 static void cfi_intelext_sync (struct mtd_info *mtd)
2032 {
2033 	struct map_info *map = mtd->priv;
2034 	struct cfi_private *cfi = map->fldrv_priv;
2035 	int i;
2036 	struct flchip *chip;
2037 	int ret = 0;
2038 
2039 	for (i=0; !ret && i<cfi->numchips; i++) {
2040 		chip = &cfi->chips[i];
2041 
2042 		mutex_lock(&chip->mutex);
2043 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2044 
2045 		if (!ret) {
2046 			chip->oldstate = chip->state;
2047 			chip->state = FL_SYNCING;
2048 			/* No need to wake_up() on this state change -
2049 			 * as the whole point is that nobody can do anything
2050 			 * with the chip now anyway.
2051 			 */
2052 		}
2053 		mutex_unlock(&chip->mutex);
2054 	}
2055 
2056 	/* Unlock the chips again */
2057 
2058 	for (i--; i >=0; i--) {
2059 		chip = &cfi->chips[i];
2060 
2061 		mutex_lock(&chip->mutex);
2062 
2063 		if (chip->state == FL_SYNCING) {
2064 			chip->state = chip->oldstate;
2065 			chip->oldstate = FL_READY;
2066 			wake_up(&chip->wq);
2067 		}
2068 		mutex_unlock(&chip->mutex);
2069 	}
2070 }
2071 
do_getlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2072 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2073 						struct flchip *chip,
2074 						unsigned long adr,
2075 						int len, void *thunk)
2076 {
2077 	struct cfi_private *cfi = map->fldrv_priv;
2078 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2079 
2080 	adr += chip->start;
2081 	xip_disable(map, chip, adr+(2*ofs_factor));
2082 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2083 	chip->state = FL_JEDEC_QUERY;
2084 	status = cfi_read_query(map, adr+(2*ofs_factor));
2085 	xip_enable(map, chip, 0);
2086 	return status;
2087 }
2088 
2089 #ifdef DEBUG_LOCK_BITS
do_printlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2090 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2091 						struct flchip *chip,
2092 						unsigned long adr,
2093 						int len, void *thunk)
2094 {
2095 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2096 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2097 	return 0;
2098 }
2099 #endif
2100 
2101 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2102 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2103 
do_xxlock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2104 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2105 				       unsigned long adr, int len, void *thunk)
2106 {
2107 	struct cfi_private *cfi = map->fldrv_priv;
2108 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2109 	int mdelay;
2110 	int ret;
2111 
2112 	adr += chip->start;
2113 
2114 	mutex_lock(&chip->mutex);
2115 	ret = get_chip(map, chip, adr, FL_LOCKING);
2116 	if (ret) {
2117 		mutex_unlock(&chip->mutex);
2118 		return ret;
2119 	}
2120 
2121 	ENABLE_VPP(map);
2122 	xip_disable(map, chip, adr);
2123 
2124 	map_write(map, CMD(0x60), adr);
2125 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2126 		map_write(map, CMD(0x01), adr);
2127 		chip->state = FL_LOCKING;
2128 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2129 		map_write(map, CMD(0xD0), adr);
2130 		chip->state = FL_UNLOCKING;
2131 	} else
2132 		BUG();
2133 
2134 	/*
2135 	 * If Instant Individual Block Locking supported then no need
2136 	 * to delay.
2137 	 */
2138 	/*
2139 	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2140 	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2141 	 *
2142 	 * See "Clear Block Lock-Bits Time" on page 40 in
2143 	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2144 	 * from February 2003
2145 	 */
2146 	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2147 
2148 	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2149 	if (ret) {
2150 		map_write(map, CMD(0x70), adr);
2151 		chip->state = FL_STATUS;
2152 		xip_enable(map, chip, adr);
2153 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2154 		goto out;
2155 	}
2156 
2157 	xip_enable(map, chip, adr);
2158  out:	DISABLE_VPP(map);
2159 	put_chip(map, chip, adr);
2160 	mutex_unlock(&chip->mutex);
2161 	return ret;
2162 }
2163 
cfi_intelext_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2164 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2165 {
2166 	int ret;
2167 
2168 #ifdef DEBUG_LOCK_BITS
2169 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2170 	       __func__, ofs, len);
2171 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2172 		ofs, len, NULL);
2173 #endif
2174 
2175 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2176 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2177 
2178 #ifdef DEBUG_LOCK_BITS
2179 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2180 	       __func__, ret);
2181 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2182 		ofs, len, NULL);
2183 #endif
2184 
2185 	return ret;
2186 }
2187 
cfi_intelext_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2188 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2189 {
2190 	int ret;
2191 
2192 #ifdef DEBUG_LOCK_BITS
2193 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2194 	       __func__, ofs, len);
2195 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2196 		ofs, len, NULL);
2197 #endif
2198 
2199 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2200 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2201 
2202 #ifdef DEBUG_LOCK_BITS
2203 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2204 	       __func__, ret);
2205 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2206 		ofs, len, NULL);
2207 #endif
2208 
2209 	return ret;
2210 }
2211 
cfi_intelext_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2212 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2213 				  uint64_t len)
2214 {
2215 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2216 				ofs, len, NULL) ? 1 : 0;
2217 }
2218 
2219 #ifdef CONFIG_MTD_OTP
2220 
2221 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2222 			u_long data_offset, u_char *buf, u_int size,
2223 			u_long prot_offset, u_int groupno, u_int groupsize);
2224 
2225 static int __xipram
do_otp_read(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2226 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2227 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2228 {
2229 	struct cfi_private *cfi = map->fldrv_priv;
2230 	int ret;
2231 
2232 	mutex_lock(&chip->mutex);
2233 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2234 	if (ret) {
2235 		mutex_unlock(&chip->mutex);
2236 		return ret;
2237 	}
2238 
2239 	/* let's ensure we're not reading back cached data from array mode */
2240 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2241 
2242 	xip_disable(map, chip, chip->start);
2243 	if (chip->state != FL_JEDEC_QUERY) {
2244 		map_write(map, CMD(0x90), chip->start);
2245 		chip->state = FL_JEDEC_QUERY;
2246 	}
2247 	map_copy_from(map, buf, chip->start + offset, size);
2248 	xip_enable(map, chip, chip->start);
2249 
2250 	/* then ensure we don't keep OTP data in the cache */
2251 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2252 
2253 	put_chip(map, chip, chip->start);
2254 	mutex_unlock(&chip->mutex);
2255 	return 0;
2256 }
2257 
2258 static int
do_otp_write(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2259 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2260 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2261 {
2262 	int ret;
2263 
2264 	while (size) {
2265 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2266 		int gap = offset - bus_ofs;
2267 		int n = min_t(int, size, map_bankwidth(map)-gap);
2268 		map_word datum = map_word_ff(map);
2269 
2270 		datum = map_word_load_partial(map, datum, buf, gap, n);
2271 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2272 		if (ret)
2273 			return ret;
2274 
2275 		offset += n;
2276 		buf += n;
2277 		size -= n;
2278 	}
2279 
2280 	return 0;
2281 }
2282 
2283 static int
do_otp_lock(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2284 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2285 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2286 {
2287 	struct cfi_private *cfi = map->fldrv_priv;
2288 	map_word datum;
2289 
2290 	/* make sure area matches group boundaries */
2291 	if (size != grpsz)
2292 		return -EXDEV;
2293 
2294 	datum = map_word_ff(map);
2295 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2296 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2297 }
2298 
cfi_intelext_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)2299 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2300 				 size_t *retlen, u_char *buf,
2301 				 otp_op_t action, int user_regs)
2302 {
2303 	struct map_info *map = mtd->priv;
2304 	struct cfi_private *cfi = map->fldrv_priv;
2305 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2306 	struct flchip *chip;
2307 	struct cfi_intelext_otpinfo *otp;
2308 	u_long devsize, reg_prot_offset, data_offset;
2309 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2310 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2311 	int ret;
2312 
2313 	*retlen = 0;
2314 
2315 	/* Check that we actually have some OTP registers */
2316 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2317 		return -ENODATA;
2318 
2319 	/* we need real chips here not virtual ones */
2320 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2321 	chip_step = devsize >> cfi->chipshift;
2322 	chip_num = 0;
2323 
2324 	/* Some chips have OTP located in the _top_ partition only.
2325 	   For example: Intel 28F256L18T (T means top-parameter device) */
2326 	if (cfi->mfr == CFI_MFR_INTEL) {
2327 		switch (cfi->id) {
2328 		case 0x880b:
2329 		case 0x880c:
2330 		case 0x880d:
2331 			chip_num = chip_step - 1;
2332 		}
2333 	}
2334 
2335 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2336 		chip = &cfi->chips[chip_num];
2337 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2338 
2339 		/* first OTP region */
2340 		field = 0;
2341 		reg_prot_offset = extp->ProtRegAddr;
2342 		reg_fact_groups = 1;
2343 		reg_fact_size = 1 << extp->FactProtRegSize;
2344 		reg_user_groups = 1;
2345 		reg_user_size = 1 << extp->UserProtRegSize;
2346 
2347 		while (len > 0) {
2348 			/* flash geometry fixup */
2349 			data_offset = reg_prot_offset + 1;
2350 			data_offset *= cfi->interleave * cfi->device_type;
2351 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2352 			reg_fact_size *= cfi->interleave;
2353 			reg_user_size *= cfi->interleave;
2354 
2355 			if (user_regs) {
2356 				groups = reg_user_groups;
2357 				groupsize = reg_user_size;
2358 				/* skip over factory reg area */
2359 				groupno = reg_fact_groups;
2360 				data_offset += reg_fact_groups * reg_fact_size;
2361 			} else {
2362 				groups = reg_fact_groups;
2363 				groupsize = reg_fact_size;
2364 				groupno = 0;
2365 			}
2366 
2367 			while (len > 0 && groups > 0) {
2368 				if (!action) {
2369 					/*
2370 					 * Special case: if action is NULL
2371 					 * we fill buf with otp_info records.
2372 					 */
2373 					struct otp_info *otpinfo;
2374 					map_word lockword;
2375 					len -= sizeof(struct otp_info);
2376 					if (len <= 0)
2377 						return -ENOSPC;
2378 					ret = do_otp_read(map, chip,
2379 							  reg_prot_offset,
2380 							  (u_char *)&lockword,
2381 							  map_bankwidth(map),
2382 							  0, 0,  0);
2383 					if (ret)
2384 						return ret;
2385 					otpinfo = (struct otp_info *)buf;
2386 					otpinfo->start = from;
2387 					otpinfo->length = groupsize;
2388 					otpinfo->locked =
2389 					   !map_word_bitsset(map, lockword,
2390 							     CMD(1 << groupno));
2391 					from += groupsize;
2392 					buf += sizeof(*otpinfo);
2393 					*retlen += sizeof(*otpinfo);
2394 				} else if (from >= groupsize) {
2395 					from -= groupsize;
2396 					data_offset += groupsize;
2397 				} else {
2398 					int size = groupsize;
2399 					data_offset += from;
2400 					size -= from;
2401 					from = 0;
2402 					if (size > len)
2403 						size = len;
2404 					ret = action(map, chip, data_offset,
2405 						     buf, size, reg_prot_offset,
2406 						     groupno, groupsize);
2407 					if (ret < 0)
2408 						return ret;
2409 					buf += size;
2410 					len -= size;
2411 					*retlen += size;
2412 					data_offset += size;
2413 				}
2414 				groupno++;
2415 				groups--;
2416 			}
2417 
2418 			/* next OTP region */
2419 			if (++field == extp->NumProtectionFields)
2420 				break;
2421 			reg_prot_offset = otp->ProtRegAddr;
2422 			reg_fact_groups = otp->FactGroups;
2423 			reg_fact_size = 1 << otp->FactProtRegSize;
2424 			reg_user_groups = otp->UserGroups;
2425 			reg_user_size = 1 << otp->UserProtRegSize;
2426 			otp++;
2427 		}
2428 	}
2429 
2430 	return 0;
2431 }
2432 
cfi_intelext_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2433 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2434 					   size_t len, size_t *retlen,
2435 					    u_char *buf)
2436 {
2437 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2438 				     buf, do_otp_read, 0);
2439 }
2440 
cfi_intelext_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2441 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2442 					   size_t len, size_t *retlen,
2443 					    u_char *buf)
2444 {
2445 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2446 				     buf, do_otp_read, 1);
2447 }
2448 
cfi_intelext_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,const u_char * buf)2449 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2450 					    size_t len, size_t *retlen,
2451 					    const u_char *buf)
2452 {
2453 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2454 				     (u_char *)buf, do_otp_write, 1);
2455 }
2456 
cfi_intelext_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2457 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2458 					   loff_t from, size_t len)
2459 {
2460 	size_t retlen;
2461 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2462 				     NULL, do_otp_lock, 1);
2463 }
2464 
cfi_intelext_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2465 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2466 					   size_t *retlen, struct otp_info *buf)
2467 
2468 {
2469 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2470 				     NULL, 0);
2471 }
2472 
cfi_intelext_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2473 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2474 					   size_t *retlen, struct otp_info *buf)
2475 {
2476 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2477 				     NULL, 1);
2478 }
2479 
2480 #endif
2481 
cfi_intelext_save_locks(struct mtd_info * mtd)2482 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2483 {
2484 	struct mtd_erase_region_info *region;
2485 	int block, status, i;
2486 	unsigned long adr;
2487 	size_t len;
2488 
2489 	for (i = 0; i < mtd->numeraseregions; i++) {
2490 		region = &mtd->eraseregions[i];
2491 		if (!region->lockmap)
2492 			continue;
2493 
2494 		for (block = 0; block < region->numblocks; block++){
2495 			len = region->erasesize;
2496 			adr = region->offset + block * len;
2497 
2498 			status = cfi_varsize_frob(mtd,
2499 					do_getlockstatus_oneblock, adr, len, NULL);
2500 			if (status)
2501 				set_bit(block, region->lockmap);
2502 			else
2503 				clear_bit(block, region->lockmap);
2504 		}
2505 	}
2506 }
2507 
cfi_intelext_suspend(struct mtd_info * mtd)2508 static int cfi_intelext_suspend(struct mtd_info *mtd)
2509 {
2510 	struct map_info *map = mtd->priv;
2511 	struct cfi_private *cfi = map->fldrv_priv;
2512 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2513 	int i;
2514 	struct flchip *chip;
2515 	int ret = 0;
2516 
2517 	if ((mtd->flags & MTD_POWERUP_LOCK)
2518 	    && extp && (extp->FeatureSupport & (1 << 5)))
2519 		cfi_intelext_save_locks(mtd);
2520 
2521 	for (i=0; !ret && i<cfi->numchips; i++) {
2522 		chip = &cfi->chips[i];
2523 
2524 		mutex_lock(&chip->mutex);
2525 
2526 		switch (chip->state) {
2527 		case FL_READY:
2528 		case FL_STATUS:
2529 		case FL_CFI_QUERY:
2530 		case FL_JEDEC_QUERY:
2531 			if (chip->oldstate == FL_READY) {
2532 				/* place the chip in a known state before suspend */
2533 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2534 				chip->oldstate = chip->state;
2535 				chip->state = FL_PM_SUSPENDED;
2536 				/* No need to wake_up() on this state change -
2537 				 * as the whole point is that nobody can do anything
2538 				 * with the chip now anyway.
2539 				 */
2540 			} else {
2541 				/* There seems to be an operation pending. We must wait for it. */
2542 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2543 				ret = -EAGAIN;
2544 			}
2545 			break;
2546 		default:
2547 			/* Should we actually wait? Once upon a time these routines weren't
2548 			   allowed to. Or should we return -EAGAIN, because the upper layers
2549 			   ought to have already shut down anything which was using the device
2550 			   anyway? The latter for now. */
2551 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2552 			ret = -EAGAIN;
2553 			break;
2554 		case FL_PM_SUSPENDED:
2555 			break;
2556 		}
2557 		mutex_unlock(&chip->mutex);
2558 	}
2559 
2560 	/* Unlock the chips again */
2561 
2562 	if (ret) {
2563 		for (i--; i >=0; i--) {
2564 			chip = &cfi->chips[i];
2565 
2566 			mutex_lock(&chip->mutex);
2567 
2568 			if (chip->state == FL_PM_SUSPENDED) {
2569 				/* No need to force it into a known state here,
2570 				   because we're returning failure, and it didn't
2571 				   get power cycled */
2572 				chip->state = chip->oldstate;
2573 				chip->oldstate = FL_READY;
2574 				wake_up(&chip->wq);
2575 			}
2576 			mutex_unlock(&chip->mutex);
2577 		}
2578 	}
2579 
2580 	return ret;
2581 }
2582 
cfi_intelext_restore_locks(struct mtd_info * mtd)2583 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2584 {
2585 	struct mtd_erase_region_info *region;
2586 	int block, i;
2587 	unsigned long adr;
2588 	size_t len;
2589 
2590 	for (i = 0; i < mtd->numeraseregions; i++) {
2591 		region = &mtd->eraseregions[i];
2592 		if (!region->lockmap)
2593 			continue;
2594 
2595 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2596 			len = region->erasesize;
2597 			adr = region->offset + block * len;
2598 			cfi_intelext_unlock(mtd, adr, len);
2599 		}
2600 	}
2601 }
2602 
cfi_intelext_resume(struct mtd_info * mtd)2603 static void cfi_intelext_resume(struct mtd_info *mtd)
2604 {
2605 	struct map_info *map = mtd->priv;
2606 	struct cfi_private *cfi = map->fldrv_priv;
2607 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2608 	int i;
2609 	struct flchip *chip;
2610 
2611 	for (i=0; i<cfi->numchips; i++) {
2612 
2613 		chip = &cfi->chips[i];
2614 
2615 		mutex_lock(&chip->mutex);
2616 
2617 		/* Go to known state. Chip may have been power cycled */
2618 		if (chip->state == FL_PM_SUSPENDED) {
2619 			/* Refresh LH28F640BF Partition Config. Register */
2620 			fixup_LH28F640BF(mtd);
2621 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2622 			chip->oldstate = chip->state = FL_READY;
2623 			wake_up(&chip->wq);
2624 		}
2625 
2626 		mutex_unlock(&chip->mutex);
2627 	}
2628 
2629 	if ((mtd->flags & MTD_POWERUP_LOCK)
2630 	    && extp && (extp->FeatureSupport & (1 << 5)))
2631 		cfi_intelext_restore_locks(mtd);
2632 }
2633 
cfi_intelext_reset(struct mtd_info * mtd)2634 static int cfi_intelext_reset(struct mtd_info *mtd)
2635 {
2636 	struct map_info *map = mtd->priv;
2637 	struct cfi_private *cfi = map->fldrv_priv;
2638 	int i, ret;
2639 
2640 	for (i=0; i < cfi->numchips; i++) {
2641 		struct flchip *chip = &cfi->chips[i];
2642 
2643 		/* force the completion of any ongoing operation
2644 		   and switch to array mode so any bootloader in
2645 		   flash is accessible for soft reboot. */
2646 		mutex_lock(&chip->mutex);
2647 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2648 		if (!ret) {
2649 			map_write(map, CMD(0xff), chip->start);
2650 			chip->state = FL_SHUTDOWN;
2651 			put_chip(map, chip, chip->start);
2652 		}
2653 		mutex_unlock(&chip->mutex);
2654 	}
2655 
2656 	return 0;
2657 }
2658 
cfi_intelext_reboot(struct notifier_block * nb,unsigned long val,void * v)2659 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2660 			       void *v)
2661 {
2662 	struct mtd_info *mtd;
2663 
2664 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2665 	cfi_intelext_reset(mtd);
2666 	return NOTIFY_DONE;
2667 }
2668 
cfi_intelext_destroy(struct mtd_info * mtd)2669 static void cfi_intelext_destroy(struct mtd_info *mtd)
2670 {
2671 	struct map_info *map = mtd->priv;
2672 	struct cfi_private *cfi = map->fldrv_priv;
2673 	struct mtd_erase_region_info *region;
2674 	int i;
2675 	cfi_intelext_reset(mtd);
2676 	unregister_reboot_notifier(&mtd->reboot_notifier);
2677 	kfree(cfi->cmdset_priv);
2678 	kfree(cfi->cfiq);
2679 	kfree(cfi->chips[0].priv);
2680 	kfree(cfi);
2681 	for (i = 0; i < mtd->numeraseregions; i++) {
2682 		region = &mtd->eraseregions[i];
2683 		kfree(region->lockmap);
2684 	}
2685 	kfree(mtd->eraseregions);
2686 }
2687 
2688 MODULE_LICENSE("GPL");
2689 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2690 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2691 MODULE_ALIAS("cfi_cmdset_0003");
2692 MODULE_ALIAS("cfi_cmdset_0200");
2693