1 /*
2  * Common Flash Interface support:
3  *   Generic utility functions not dependent on command set
4  *
5  * Copyright (C) 2002 Red Hat
6  * Copyright (C) 2003 STMicroelectronics Limited
7  *
8  * This code is covered by the GPL.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <asm/io.h>
15 #include <asm/byteorder.h>
16 
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 
cfi_qry_present(struct map_info * map,__u32 base,struct cfi_private * cfi)26 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
27 			     struct cfi_private *cfi)
28 {
29 	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
30 	map_word val[3];
31 	map_word qry[3];
32 
33 	qry[0] = cfi_build_cmd('Q', map, cfi);
34 	qry[1] = cfi_build_cmd('R', map, cfi);
35 	qry[2] = cfi_build_cmd('Y', map, cfi);
36 
37 	val[0] = map_read(map, base + osf*0x10);
38 	val[1] = map_read(map, base + osf*0x11);
39 	val[2] = map_read(map, base + osf*0x12);
40 
41 	if (!map_word_equal(map, qry[0], val[0]))
42 		return 0;
43 
44 	if (!map_word_equal(map, qry[1], val[1]))
45 		return 0;
46 
47 	if (!map_word_equal(map, qry[2], val[2]))
48 		return 0;
49 
50 	return 1; 	/* "QRY" found */
51 }
52 EXPORT_SYMBOL_GPL(cfi_qry_present);
53 
cfi_qry_mode_on(uint32_t base,struct map_info * map,struct cfi_private * cfi)54 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
55 			     struct cfi_private *cfi)
56 {
57 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
58 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
59 	if (cfi_qry_present(map, base, cfi))
60 		return 1;
61 	/* QRY not found probably we deal with some odd CFI chips */
62 	/* Some revisions of some old Intel chips? */
63 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
64 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
65 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
66 	if (cfi_qry_present(map, base, cfi))
67 		return 1;
68 	/* ST M29DW chips */
69 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
70 	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
71 	if (cfi_qry_present(map, base, cfi))
72 		return 1;
73 	/* some old SST chips, e.g. 39VF160x/39VF320x */
74 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
75 	cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
76 	cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
77 	cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
78 	if (cfi_qry_present(map, base, cfi))
79 		return 1;
80 	/* SST 39VF640xB */
81 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
82 	cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
83 	cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
84 	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
85 	if (cfi_qry_present(map, base, cfi))
86 		return 1;
87 	/* QRY not found */
88 	return 0;
89 }
90 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
91 
cfi_qry_mode_off(uint32_t base,struct map_info * map,struct cfi_private * cfi)92 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
93 			       struct cfi_private *cfi)
94 {
95 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
96 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
97 	/* M29W128G flashes require an additional reset command
98 	   when exit qry mode */
99 	if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
100 		cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
101 }
102 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
103 
104 struct cfi_extquery *
cfi_read_pri(struct map_info * map,__u16 adr,__u16 size,const char * name)105 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
106 {
107 	struct cfi_private *cfi = map->fldrv_priv;
108 	__u32 base = 0; // cfi->chips[0].start;
109 	int ofs_factor = cfi->interleave * cfi->device_type;
110 	int i;
111 	struct cfi_extquery *extp = NULL;
112 
113 	if (!adr)
114 		goto out;
115 
116 	printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
117 
118 	extp = kmalloc(size, GFP_KERNEL);
119 	if (!extp) {
120 		printk(KERN_ERR "Failed to allocate memory\n");
121 		goto out;
122 	}
123 
124 #ifdef CONFIG_MTD_XIP
125 	local_irq_disable();
126 #endif
127 
128 	/* Switch it into Query Mode */
129 	cfi_qry_mode_on(base, map, cfi);
130 	/* Read in the Extended Query Table */
131 	for (i=0; i<size; i++) {
132 		((unsigned char *)extp)[i] =
133 			cfi_read_query(map, base+((adr+i)*ofs_factor));
134 	}
135 
136 	/* Make sure it returns to read mode */
137 	cfi_qry_mode_off(base, map, cfi);
138 
139 #ifdef CONFIG_MTD_XIP
140 	(void) map_read(map, base);
141 	xip_iprefetch();
142 	local_irq_enable();
143 #endif
144 
145  out:	return extp;
146 }
147 
148 EXPORT_SYMBOL(cfi_read_pri);
149 
cfi_fixup(struct mtd_info * mtd,struct cfi_fixup * fixups)150 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
151 {
152 	struct map_info *map = mtd->priv;
153 	struct cfi_private *cfi = map->fldrv_priv;
154 	struct cfi_fixup *f;
155 
156 	for (f=fixups; f->fixup; f++) {
157 		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
158 		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
159 			f->fixup(mtd);
160 		}
161 	}
162 }
163 
164 EXPORT_SYMBOL(cfi_fixup);
165 
cfi_varsize_frob(struct mtd_info * mtd,varsize_frob_t frob,loff_t ofs,size_t len,void * thunk)166 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
167 				     loff_t ofs, size_t len, void *thunk)
168 {
169 	struct map_info *map = mtd->priv;
170 	struct cfi_private *cfi = map->fldrv_priv;
171 	unsigned long adr;
172 	int chipnum, ret = 0;
173 	int i, first;
174 	struct mtd_erase_region_info *regions = mtd->eraseregions;
175 
176 	/* Check that both start and end of the requested erase are
177 	 * aligned with the erasesize at the appropriate addresses.
178 	 */
179 
180 	i = 0;
181 
182 	/* Skip all erase regions which are ended before the start of
183 	   the requested erase. Actually, to save on the calculations,
184 	   we skip to the first erase region which starts after the
185 	   start of the requested erase, and then go back one.
186 	*/
187 
188 	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
189 	       i++;
190 	i--;
191 
192 	/* OK, now i is pointing at the erase region in which this
193 	   erase request starts. Check the start of the requested
194 	   erase range is aligned with the erase size which is in
195 	   effect here.
196 	*/
197 
198 	if (ofs & (regions[i].erasesize-1))
199 		return -EINVAL;
200 
201 	/* Remember the erase region we start on */
202 	first = i;
203 
204 	/* Next, check that the end of the requested erase is aligned
205 	 * with the erase region at that address.
206 	 */
207 
208 	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
209 		i++;
210 
211 	/* As before, drop back one to point at the region in which
212 	   the address actually falls
213 	*/
214 	i--;
215 
216 	if ((ofs + len) & (regions[i].erasesize-1))
217 		return -EINVAL;
218 
219 	chipnum = ofs >> cfi->chipshift;
220 	adr = ofs - (chipnum << cfi->chipshift);
221 
222 	i=first;
223 
224 	while(len) {
225 		int size = regions[i].erasesize;
226 
227 		ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
228 
229 		if (ret)
230 			return ret;
231 
232 		adr += size;
233 		ofs += size;
234 		len -= size;
235 
236 		if (ofs == regions[i].offset + size * regions[i].numblocks)
237 			i++;
238 
239 		if (adr >> cfi->chipshift) {
240 			adr = 0;
241 			chipnum++;
242 
243 			if (chipnum >= cfi->numchips)
244 			break;
245 		}
246 	}
247 
248 	return 0;
249 }
250 
251 EXPORT_SYMBOL(cfi_varsize_frob);
252 
253 MODULE_LICENSE("GPL");
254