1 
2 /* Common Flash Interface structures
3  * See http://support.intel.com/design/flash/technote/index.htm
4  * $Id: cfi.h,v 1.32 2002/09/05 05:15:32 acurtis Exp $
5  */
6 
7 #ifndef __MTD_CFI_H__
8 #define __MTD_CFI_H__
9 
10 #include <linux/config.h>
11 #include <linux/delay.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/mtd/flashchip.h>
15 #include <linux/mtd/cfi_endian.h>
16 
17 /*
18  * You can optimize the code size and performance by defining only
19  * the geometry(ies) available on your hardware.
20  * CFIDEV_INTERLEAVE_n, where  represents the interleave (number of chips to fill the bus width)
21  * CFIDEV_BUSWIDTH_n, where n is the bus width in bytes (1, 2, 4 or 8 bytes)
22  *
23  * By default, all (known) geometries are supported.
24  */
25 
26 #ifndef CONFIG_MTD_CFI_GEOMETRY
27 
28 /* The default case - support all but 64-bit, which has
29    a performance penalty */
30 
31 #define CFIDEV_INTERLEAVE_1 (1)
32 #define CFIDEV_INTERLEAVE_2 (2)
33 #define CFIDEV_INTERLEAVE_4 (4)
34 
35 #define CFIDEV_BUSWIDTH_1 (1)
36 #define CFIDEV_BUSWIDTH_2 (2)
37 #define CFIDEV_BUSWIDTH_4 (4)
38 
39 typedef __u32 cfi_word;
40 
41 #else
42 
43 /* Explicitly configured buswidth/interleave support */
44 
45 #ifdef CONFIG_MTD_CFI_I1
46 #define CFIDEV_INTERLEAVE_1 (1)
47 #endif
48 #ifdef CONFIG_MTD_CFI_I2
49 #define CFIDEV_INTERLEAVE_2 (2)
50 #endif
51 #ifdef CONFIG_MTD_CFI_I4
52 #define CFIDEV_INTERLEAVE_4 (4)
53 #endif
54 #ifdef CONFIG_MTD_CFI_I8
55 #define CFIDEV_INTERLEAVE_8 (8)
56 #endif
57 
58 #ifdef CONFIG_MTD_CFI_B1
59 #define CFIDEV_BUSWIDTH_1 (1)
60 #endif
61 #ifdef CONFIG_MTD_CFI_B2
62 #define CFIDEV_BUSWIDTH_2 (2)
63 #endif
64 #ifdef CONFIG_MTD_CFI_B4
65 #define CFIDEV_BUSWIDTH_4 (4)
66 #endif
67 #ifdef CONFIG_MTD_CFI_B8
68 #define CFIDEV_BUSWIDTH_8 (8)
69 #endif
70 
71 /* pick the largest necessary */
72 #ifdef CONFIG_MTD_CFI_B8
73 typedef __u64 cfi_word;
74 
75 /* This only works if asm/io.h is included first */
76 #ifndef __raw_readll
77 #define __raw_readll(addr)	(*(volatile __u64 *)(addr))
78 #endif
79 #ifndef __raw_writell
80 #define __raw_writell(v, addr)	(*(volatile __u64 *)(addr) = (v))
81 #endif
82 #define CFI_WORD_64
83 #else  /* CONFIG_MTD_CFI_B8 */
84 /* All others can use 32-bits. It's probably more efficient than
85    the smaller types anyway */
86 typedef __u32 cfi_word;
87 #endif /* CONFIG_MTD_CFI_B8 */
88 
89 #endif
90 
91 /*
92  * The following macros are used to select the code to execute:
93  *   cfi_buswidth_is_*()
94  *   cfi_interleave_is_*()
95  *   [where * is either 1, 2, 4, or 8]
96  * Those macros should be used with 'if' statements.  If only one of few
97  * geometry arrangements are selected, they expand to constants thus allowing
98  * the compiler (most of them being 0) to optimize away all the unneeded code,
99  * while still validating the syntax (which is not possible with embedded
100  * #if ... #endif constructs).
101  * The exception to this is the 64-bit versions, which need an extension
102  * to the cfi_word type, and cause compiler warnings about shifts being
103  * out of range.
104  */
105 
106 #ifdef CFIDEV_INTERLEAVE_1
107 # ifdef CFIDEV_INTERLEAVE
108 #  undef CFIDEV_INTERLEAVE
109 #  define CFIDEV_INTERLEAVE (cfi->interleave)
110 # else
111 #  define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_1
112 # endif
113 # define cfi_interleave_is_1() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_1)
114 #else
115 # define cfi_interleave_is_1() (0)
116 #endif
117 
118 #ifdef CFIDEV_INTERLEAVE_2
119 # ifdef CFIDEV_INTERLEAVE
120 #  undef CFIDEV_INTERLEAVE
121 #  define CFIDEV_INTERLEAVE (cfi->interleave)
122 # else
123 #  define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_2
124 # endif
125 # define cfi_interleave_is_2() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_2)
126 #else
127 # define cfi_interleave_is_2() (0)
128 #endif
129 
130 #ifdef CFIDEV_INTERLEAVE_4
131 # ifdef CFIDEV_INTERLEAVE
132 #  undef CFIDEV_INTERLEAVE
133 #  define CFIDEV_INTERLEAVE (cfi->interleave)
134 # else
135 #  define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_4
136 # endif
137 # define cfi_interleave_is_4() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_4)
138 #else
139 # define cfi_interleave_is_4() (0)
140 #endif
141 
142 #ifdef CFIDEV_INTERLEAVE_8
143 # ifdef CFIDEV_INTERLEAVE
144 #  undef CFIDEV_INTERLEAVE
145 #  define CFIDEV_INTERLEAVE (cfi->interleave)
146 # else
147 #  define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_8
148 # endif
149 # define cfi_interleave_is_8() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_8)
150 #else
151 # define cfi_interleave_is_8() (0)
152 #endif
153 
154 #ifndef CFIDEV_INTERLEAVE
155 #error You must define at least one interleave to support!
156 #endif
157 
158 #ifdef CFIDEV_BUSWIDTH_1
159 # ifdef CFIDEV_BUSWIDTH
160 #  undef CFIDEV_BUSWIDTH
161 #  define CFIDEV_BUSWIDTH (map->buswidth)
162 # else
163 #  define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_1
164 # endif
165 # define cfi_buswidth_is_1() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_1)
166 #else
167 # define cfi_buswidth_is_1() (0)
168 #endif
169 
170 #ifdef CFIDEV_BUSWIDTH_2
171 # ifdef CFIDEV_BUSWIDTH
172 #  undef CFIDEV_BUSWIDTH
173 #  define CFIDEV_BUSWIDTH (map->buswidth)
174 # else
175 #  define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_2
176 # endif
177 # define cfi_buswidth_is_2() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_2)
178 #else
179 # define cfi_buswidth_is_2() (0)
180 #endif
181 
182 #ifdef CFIDEV_BUSWIDTH_4
183 # ifdef CFIDEV_BUSWIDTH
184 #  undef CFIDEV_BUSWIDTH
185 #  define CFIDEV_BUSWIDTH (map->buswidth)
186 # else
187 #  define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_4
188 # endif
189 # define cfi_buswidth_is_4() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_4)
190 #else
191 # define cfi_buswidth_is_4() (0)
192 #endif
193 
194 #ifdef CFIDEV_BUSWIDTH_8
195 # ifdef CFIDEV_BUSWIDTH
196 #  undef CFIDEV_BUSWIDTH
197 #  define CFIDEV_BUSWIDTH (map->buswidth)
198 # else
199 #  define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_8
200 # endif
201 # define cfi_buswidth_is_8() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_8)
202 #else
203 # define cfi_buswidth_is_8() (0)
204 #endif
205 
206 #ifndef CFIDEV_BUSWIDTH
207 #error You must define at least one bus width to support!
208 #endif
209 
210 /* NB: these values must represents the number of bytes needed to meet the
211  *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes.
212  *     These numbers are used in calculations.
213  */
214 #define CFI_DEVICETYPE_X8  (8 / 8)
215 #define CFI_DEVICETYPE_X16 (16 / 8)
216 #define CFI_DEVICETYPE_X32 (32 / 8)
217 #define CFI_DEVICETYPE_X64 (64 / 8)
218 
219 /* NB: We keep these structures in memory in HOST byteorder, except
220  * where individually noted.
221  */
222 
223 /* Basic Query Structure */
224 struct cfi_ident {
225   __u8  qry[3];
226   __u16 P_ID;
227   __u16 P_ADR;
228   __u16 A_ID;
229   __u16 A_ADR;
230   __u8  VccMin;
231   __u8  VccMax;
232   __u8  VppMin;
233   __u8  VppMax;
234   __u8  WordWriteTimeoutTyp;
235   __u8  BufWriteTimeoutTyp;
236   __u8  BlockEraseTimeoutTyp;
237   __u8  ChipEraseTimeoutTyp;
238   __u8  WordWriteTimeoutMax;
239   __u8  BufWriteTimeoutMax;
240   __u8  BlockEraseTimeoutMax;
241   __u8  ChipEraseTimeoutMax;
242   __u8  DevSize;
243   __u16 InterfaceDesc;
244   __u16 MaxBufWriteSize;
245   __u8  NumEraseRegions;
246   __u32 EraseRegionInfo[0]; /* Not host ordered */
247 } __attribute__((packed));
248 
249 /* Extended Query Structure for both PRI and ALT */
250 
251 struct cfi_extquery {
252   __u8  pri[3];
253   __u8  MajorVersion;
254   __u8  MinorVersion;
255 } __attribute__((packed));
256 
257 /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
258 
259 struct cfi_pri_intelext {
260   __u8  pri[3];
261   __u8  MajorVersion;
262   __u8  MinorVersion;
263   __u32 FeatureSupport;
264   __u8  SuspendCmdSupport;
265   __u16 BlkStatusRegMask;
266   __u8  VccOptimal;
267   __u8  VppOptimal;
268   __u8  NumProtectionFields;
269   __u16 ProtRegAddr;
270   __u8  FactProtRegSize;
271   __u8  UserProtRegSize;
272 } __attribute__((packed));
273 
274 struct cfi_pri_query {
275   __u8  NumFields;
276   __u32 ProtField[1]; /* Not host ordered */
277 } __attribute__((packed));
278 
279 struct cfi_bri_query {
280   __u8  PageModeReadCap;
281   __u8  NumFields;
282   __u32 ConfField[1]; /* Not host ordered */
283 } __attribute__((packed));
284 
285 #define P_ID_NONE 0
286 #define P_ID_INTEL_EXT 1
287 #define P_ID_AMD_STD 2
288 #define P_ID_INTEL_STD 3
289 #define P_ID_AMD_EXT 4
290 #define P_ID_MITSUBISHI_STD 256
291 #define P_ID_MITSUBISHI_EXT 257
292 #define P_ID_RESERVED 65535
293 
294 
295 #define CFI_MODE_CFI	1
296 #define CFI_MODE_JEDEC	0
297 
298 struct cfi_private {
299 	__u16 cmdset;
300 	void *cmdset_priv;
301 	int interleave;
302 	int device_type;
303 	int cfi_mode;		/* Are we a JEDEC device pretending to be CFI? */
304 	int addr_unlock1;
305 	int addr_unlock2;
306 	int fast_prog;
307 	struct mtd_info *(*cmdset_setup)(struct map_info *);
308 	struct cfi_ident *cfiq; /* For now only one. We insist that all devs
309 				  must be of the same type. */
310 	int mfr, id;
311 	int numchips;
312 	unsigned long chipshift; /* Because they're of the same type */
313 	const char *im_name;	 /* inter_module name for cmdset_setup */
314 	struct flchip chips[0];  /* per-chip data structure for each chip */
315 };
316 
317 #define MAX_CFI_CHIPS 8 /* Entirely arbitrary to avoid realloc() */
318 
319 /*
320  * Returns the command address according to the given geometry.
321  */
cfi_build_cmd_addr(__u32 cmd_ofs,int interleave,int type)322 static inline __u32 cfi_build_cmd_addr(__u32 cmd_ofs, int interleave, int type)
323 {
324 	return (cmd_ofs * type) * interleave;
325 }
326 
327 /*
328  * Transforms the CFI command for the given geometry (bus width & interleave.
329  */
cfi_build_cmd(u_char cmd,struct map_info * map,struct cfi_private * cfi)330 static inline cfi_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
331 {
332 	cfi_word val = 0;
333 
334 	if (cfi_buswidth_is_1()) {
335 		/* 1 x8 device */
336 		val = cmd;
337 	} else if (cfi_buswidth_is_2()) {
338 		if (cfi_interleave_is_1()) {
339 			/* 1 x16 device in x16 mode */
340 			val = cpu_to_cfi16(cmd);
341 		} else if (cfi_interleave_is_2()) {
342 			/* 2 (x8, x16 or x32) devices in x8 mode */
343 			val = cpu_to_cfi16((cmd << 8) | cmd);
344 		}
345 	} else if (cfi_buswidth_is_4()) {
346 		if (cfi_interleave_is_1()) {
347 			/* 1 x32 device in x32 mode */
348 			val = cpu_to_cfi32(cmd);
349 		} else if (cfi_interleave_is_2()) {
350 			/* 2 x16 device in x16 mode */
351 			val = cpu_to_cfi32((cmd << 16) | cmd);
352 		} else if (cfi_interleave_is_4()) {
353 			/* 4 (x8, x16 or x32) devices in x8 mode */
354 			val = (cmd << 16) | cmd;
355 			val = cpu_to_cfi32((val << 8) | val);
356 		}
357 #ifdef CFI_WORD_64
358 	} else if (cfi_buswidth_is_8()) {
359 		if (cfi_interleave_is_1()) {
360 			/* 1 x64 device in x64 mode */
361 			val = cpu_to_cfi64(cmd);
362 		} else if (cfi_interleave_is_2()) {
363 			/* 2 x32 device in x32 mode */
364 			val = cmd;
365 			val = cpu_to_cfi64((val << 32) | val);
366 		} else if (cfi_interleave_is_4()) {
367 			/* 4 (x16, x32 or x64) devices in x16 mode */
368 			val = (cmd << 16) | cmd;
369 			val = cpu_to_cfi64((val << 32) | val);
370 		} else if (cfi_interleave_is_8()) {
371 			/* 8 (x8, x16 or x32) devices in x8 mode */
372 			val = (cmd << 8) | cmd;
373 			val = (val << 16) | val;
374 			val = (val << 32) | val;
375 			val = cpu_to_cfi64(val);
376 		}
377 #endif /* CFI_WORD_64 */
378 	}
379 	return val;
380 }
381 #define CMD(x)  cfi_build_cmd((x), map, cfi)
382 
383 /*
384  * Read a value according to the bus width.
385  */
386 
cfi_read(struct map_info * map,__u32 addr)387 static inline cfi_word cfi_read(struct map_info *map, __u32 addr)
388 {
389 	if (cfi_buswidth_is_1()) {
390 		return map->read8(map, addr);
391 	} else if (cfi_buswidth_is_2()) {
392 		return map->read16(map, addr);
393 	} else if (cfi_buswidth_is_4()) {
394 		return map->read32(map, addr);
395 	} else if (cfi_buswidth_is_8()) {
396 		return map->read64(map, addr);
397 	} else {
398 		return 0;
399 	}
400 }
401 
402 /*
403  * Write a value according to the bus width.
404  */
405 
cfi_write(struct map_info * map,cfi_word val,__u32 addr)406 static inline void cfi_write(struct map_info *map, cfi_word val, __u32 addr)
407 {
408 	if (cfi_buswidth_is_1()) {
409 		map->write8(map, val, addr);
410 	} else if (cfi_buswidth_is_2()) {
411 		map->write16(map, val, addr);
412 	} else if (cfi_buswidth_is_4()) {
413 		map->write32(map, val, addr);
414 	} else if (cfi_buswidth_is_8()) {
415 		map->write64(map, val, addr);
416 	}
417 }
418 
419 /*
420  * Sends a CFI command to a bank of flash for the given geometry.
421  *
422  * Returns the offset in flash where the command was written.
423  * If prev_val is non-null, it will be set to the value at the command address,
424  * before the command was written.
425  */
cfi_send_gen_cmd(u_char cmd,__u32 cmd_addr,__u32 base,struct map_info * map,struct cfi_private * cfi,int type,cfi_word * prev_val)426 static inline __u32 cfi_send_gen_cmd(u_char cmd, __u32 cmd_addr, __u32 base,
427 				struct map_info *map, struct cfi_private *cfi,
428 				int type, cfi_word *prev_val)
429 {
430 	cfi_word val;
431 	__u32 addr = base + cfi_build_cmd_addr(cmd_addr, CFIDEV_INTERLEAVE, type);
432 
433 	val = cfi_build_cmd(cmd, map, cfi);
434 
435 	if (prev_val)
436 		*prev_val = cfi_read(map, addr);
437 
438 	cfi_write(map, val, addr);
439 
440 	return addr - base;
441 }
442 
cfi_read_query(struct map_info * map,__u32 addr)443 static inline __u8 cfi_read_query(struct map_info *map, __u32 addr)
444 {
445 	if (cfi_buswidth_is_1()) {
446 		return map->read8(map, addr);
447 	} else if (cfi_buswidth_is_2()) {
448 		return cfi16_to_cpu(map->read16(map, addr));
449 	} else if (cfi_buswidth_is_4()) {
450 		return cfi32_to_cpu(map->read32(map, addr));
451 	} else if (cfi_buswidth_is_8()) {
452 		return cfi64_to_cpu(map->read64(map, addr));
453 	} else {
454 		return 0;
455 	}
456 }
457 
cfi_udelay(int us)458 static inline void cfi_udelay(int us)
459 {
460 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
461 	unsigned long t = us * HZ / 1000000;
462 	if (t) {
463 		set_current_state(TASK_UNINTERRUPTIBLE);
464 		schedule_timeout(t);
465 		return;
466 	}
467 #endif
468 	udelay(us);
469 	cond_resched();
470 }
471 
cfi_spin_lock(spinlock_t * mutex)472 static inline void cfi_spin_lock(spinlock_t *mutex)
473 {
474 	spin_lock_bh(mutex);
475 }
476 
cfi_spin_unlock(spinlock_t * mutex)477 static inline void cfi_spin_unlock(spinlock_t *mutex)
478 {
479 	spin_unlock_bh(mutex);
480 }
481 
482 
483 #endif /* __MTD_CFI_H__ */
484