1 /*
2 * This file contains an ECC algorithm that detects and corrects 1 bit
3 * errors in a 256 byte block of data.
4 *
5 * drivers/mtd/nand/nand_ecc.c
6 *
7 * Copyright © 2008 Koninklijke Philips Electronics NV.
8 * Author: Frans Meulenbroeks
9 *
10 * Completely replaces the previous ECC implementation which was written by:
11 * Steven J. Hill (sjhill@realitydiluted.com)
12 * Thomas Gleixner (tglx@linutronix.de)
13 *
14 * Information on how this algorithm works and how it was developed
15 * can be found in Documentation/mtd/nand_ecc.txt
16 *
17 * This file is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; either version 2 or (at your option) any
20 * later version.
21 *
22 * This file is distributed in the hope that it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 * for more details.
26 *
27 * You should have received a copy of the GNU General Public License along
28 * with this file; if not, write to the Free Software Foundation, Inc.,
29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
30 *
31 */
32
33 /*
34 * The STANDALONE macro is useful when running the code outside the kernel
35 * e.g. when running the code in a testbed or a benchmark program.
36 * When STANDALONE is used, the module related macros are commented out
37 * as well as the linux include files.
38 * Instead a private definition of mtd_info is given to satisfy the compiler
39 * (the code does not use mtd_info, so the code does not care)
40 */
41 #ifndef STANDALONE
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/mtd/mtd.h>
46 #include <linux/mtd/nand.h>
47 #include <linux/mtd/nand_ecc.h>
48 #include <asm/byteorder.h>
49 #else
50 #include <stdint.h>
51 struct mtd_info;
52 #define EXPORT_SYMBOL(x) /* x */
53
54 #define MODULE_LICENSE(x) /* x */
55 #define MODULE_AUTHOR(x) /* x */
56 #define MODULE_DESCRIPTION(x) /* x */
57
58 #define printk printf
59 #define KERN_ERR ""
60 #endif
61
62 /*
63 * invparity is a 256 byte table that contains the odd parity
64 * for each byte. So if the number of bits in a byte is even,
65 * the array element is 1, and when the number of bits is odd
66 * the array eleemnt is 0.
67 */
68 static const char invparity[256] = {
69 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
70 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
71 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
72 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
73 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
74 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
75 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
76 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
77 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
78 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
79 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
80 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
81 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
82 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
83 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
84 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
85 };
86
87 /*
88 * bitsperbyte contains the number of bits per byte
89 * this is only used for testing and repairing parity
90 * (a precalculated value slightly improves performance)
91 */
92 static const char bitsperbyte[256] = {
93 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
94 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
95 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
96 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
97 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
98 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
99 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
100 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
101 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
102 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
103 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
104 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
105 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
106 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
107 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
108 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
109 };
110
111 /*
112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ECC data that identify the faulty location.
114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details
116 */
117 static const char addressbits[256] = {
118 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
119 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
120 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
121 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
122 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
123 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
124 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
125 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
126 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
127 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
128 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
129 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
130 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
131 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
132 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
133 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
134 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
135 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
136 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
137 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
138 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
139 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
140 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
141 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
142 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
143 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
144 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
145 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
146 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
147 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
148 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
149 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
150 };
151
152 /**
153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block
155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ECC step (256 or 512)
157 * @code: output buffer with ECC
158 */
__nand_calculate_ecc(const unsigned char * buf,unsigned int eccsize,unsigned char * code)159 void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
160 unsigned char *code)
161 {
162 int i;
163 const uint32_t *bp = (uint32_t *)buf;
164 /* 256 or 512 bytes/ecc */
165 const uint32_t eccsize_mult = eccsize >> 8;
166 uint32_t cur; /* current value in buffer */
167 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
168 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
169 uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
170 uint32_t uninitialized_var(rp17); /* to make compiler happy */
171 uint32_t par; /* the cumulative parity for all data */
172 uint32_t tmppar; /* the cumulative parity for this iteration;
173 for rp12, rp14 and rp16 at the end of the
174 loop */
175
176 par = 0;
177 rp4 = 0;
178 rp6 = 0;
179 rp8 = 0;
180 rp10 = 0;
181 rp12 = 0;
182 rp14 = 0;
183 rp16 = 0;
184
185 /*
186 * The loop is unrolled a number of times;
187 * This avoids if statements to decide on which rp value to update
188 * Also we process the data by longwords.
189 * Note: passing unaligned data might give a performance penalty.
190 * It is assumed that the buffers are aligned.
191 * tmppar is the cumulative sum of this iteration.
192 * needed for calculating rp12, rp14, rp16 and par
193 * also used as a performance improvement for rp6, rp8 and rp10
194 */
195 for (i = 0; i < eccsize_mult << 2; i++) {
196 cur = *bp++;
197 tmppar = cur;
198 rp4 ^= cur;
199 cur = *bp++;
200 tmppar ^= cur;
201 rp6 ^= tmppar;
202 cur = *bp++;
203 tmppar ^= cur;
204 rp4 ^= cur;
205 cur = *bp++;
206 tmppar ^= cur;
207 rp8 ^= tmppar;
208
209 cur = *bp++;
210 tmppar ^= cur;
211 rp4 ^= cur;
212 rp6 ^= cur;
213 cur = *bp++;
214 tmppar ^= cur;
215 rp6 ^= cur;
216 cur = *bp++;
217 tmppar ^= cur;
218 rp4 ^= cur;
219 cur = *bp++;
220 tmppar ^= cur;
221 rp10 ^= tmppar;
222
223 cur = *bp++;
224 tmppar ^= cur;
225 rp4 ^= cur;
226 rp6 ^= cur;
227 rp8 ^= cur;
228 cur = *bp++;
229 tmppar ^= cur;
230 rp6 ^= cur;
231 rp8 ^= cur;
232 cur = *bp++;
233 tmppar ^= cur;
234 rp4 ^= cur;
235 rp8 ^= cur;
236 cur = *bp++;
237 tmppar ^= cur;
238 rp8 ^= cur;
239
240 cur = *bp++;
241 tmppar ^= cur;
242 rp4 ^= cur;
243 rp6 ^= cur;
244 cur = *bp++;
245 tmppar ^= cur;
246 rp6 ^= cur;
247 cur = *bp++;
248 tmppar ^= cur;
249 rp4 ^= cur;
250 cur = *bp++;
251 tmppar ^= cur;
252
253 par ^= tmppar;
254 if ((i & 0x1) == 0)
255 rp12 ^= tmppar;
256 if ((i & 0x2) == 0)
257 rp14 ^= tmppar;
258 if (eccsize_mult == 2 && (i & 0x4) == 0)
259 rp16 ^= tmppar;
260 }
261
262 /*
263 * handle the fact that we use longword operations
264 * we'll bring rp4..rp14..rp16 back to single byte entities by
265 * shifting and xoring first fold the upper and lower 16 bits,
266 * then the upper and lower 8 bits.
267 */
268 rp4 ^= (rp4 >> 16);
269 rp4 ^= (rp4 >> 8);
270 rp4 &= 0xff;
271 rp6 ^= (rp6 >> 16);
272 rp6 ^= (rp6 >> 8);
273 rp6 &= 0xff;
274 rp8 ^= (rp8 >> 16);
275 rp8 ^= (rp8 >> 8);
276 rp8 &= 0xff;
277 rp10 ^= (rp10 >> 16);
278 rp10 ^= (rp10 >> 8);
279 rp10 &= 0xff;
280 rp12 ^= (rp12 >> 16);
281 rp12 ^= (rp12 >> 8);
282 rp12 &= 0xff;
283 rp14 ^= (rp14 >> 16);
284 rp14 ^= (rp14 >> 8);
285 rp14 &= 0xff;
286 if (eccsize_mult == 2) {
287 rp16 ^= (rp16 >> 16);
288 rp16 ^= (rp16 >> 8);
289 rp16 &= 0xff;
290 }
291
292 /*
293 * we also need to calculate the row parity for rp0..rp3
294 * This is present in par, because par is now
295 * rp3 rp3 rp2 rp2 in little endian and
296 * rp2 rp2 rp3 rp3 in big endian
297 * as well as
298 * rp1 rp0 rp1 rp0 in little endian and
299 * rp0 rp1 rp0 rp1 in big endian
300 * First calculate rp2 and rp3
301 */
302 #ifdef __BIG_ENDIAN
303 rp2 = (par >> 16);
304 rp2 ^= (rp2 >> 8);
305 rp2 &= 0xff;
306 rp3 = par & 0xffff;
307 rp3 ^= (rp3 >> 8);
308 rp3 &= 0xff;
309 #else
310 rp3 = (par >> 16);
311 rp3 ^= (rp3 >> 8);
312 rp3 &= 0xff;
313 rp2 = par & 0xffff;
314 rp2 ^= (rp2 >> 8);
315 rp2 &= 0xff;
316 #endif
317
318 /* reduce par to 16 bits then calculate rp1 and rp0 */
319 par ^= (par >> 16);
320 #ifdef __BIG_ENDIAN
321 rp0 = (par >> 8) & 0xff;
322 rp1 = (par & 0xff);
323 #else
324 rp1 = (par >> 8) & 0xff;
325 rp0 = (par & 0xff);
326 #endif
327
328 /* finally reduce par to 8 bits */
329 par ^= (par >> 8);
330 par &= 0xff;
331
332 /*
333 * and calculate rp5..rp15..rp17
334 * note that par = rp4 ^ rp5 and due to the commutative property
335 * of the ^ operator we can say:
336 * rp5 = (par ^ rp4);
337 * The & 0xff seems superfluous, but benchmarking learned that
338 * leaving it out gives slightly worse results. No idea why, probably
339 * it has to do with the way the pipeline in pentium is organized.
340 */
341 rp5 = (par ^ rp4) & 0xff;
342 rp7 = (par ^ rp6) & 0xff;
343 rp9 = (par ^ rp8) & 0xff;
344 rp11 = (par ^ rp10) & 0xff;
345 rp13 = (par ^ rp12) & 0xff;
346 rp15 = (par ^ rp14) & 0xff;
347 if (eccsize_mult == 2)
348 rp17 = (par ^ rp16) & 0xff;
349
350 /*
351 * Finally calculate the ECC bits.
352 * Again here it might seem that there are performance optimisations
353 * possible, but benchmarks showed that on the system this is developed
354 * the code below is the fastest
355 */
356 #ifdef CONFIG_MTD_NAND_ECC_SMC
357 code[0] =
358 (invparity[rp7] << 7) |
359 (invparity[rp6] << 6) |
360 (invparity[rp5] << 5) |
361 (invparity[rp4] << 4) |
362 (invparity[rp3] << 3) |
363 (invparity[rp2] << 2) |
364 (invparity[rp1] << 1) |
365 (invparity[rp0]);
366 code[1] =
367 (invparity[rp15] << 7) |
368 (invparity[rp14] << 6) |
369 (invparity[rp13] << 5) |
370 (invparity[rp12] << 4) |
371 (invparity[rp11] << 3) |
372 (invparity[rp10] << 2) |
373 (invparity[rp9] << 1) |
374 (invparity[rp8]);
375 #else
376 code[1] =
377 (invparity[rp7] << 7) |
378 (invparity[rp6] << 6) |
379 (invparity[rp5] << 5) |
380 (invparity[rp4] << 4) |
381 (invparity[rp3] << 3) |
382 (invparity[rp2] << 2) |
383 (invparity[rp1] << 1) |
384 (invparity[rp0]);
385 code[0] =
386 (invparity[rp15] << 7) |
387 (invparity[rp14] << 6) |
388 (invparity[rp13] << 5) |
389 (invparity[rp12] << 4) |
390 (invparity[rp11] << 3) |
391 (invparity[rp10] << 2) |
392 (invparity[rp9] << 1) |
393 (invparity[rp8]);
394 #endif
395 if (eccsize_mult == 1)
396 code[2] =
397 (invparity[par & 0xf0] << 7) |
398 (invparity[par & 0x0f] << 6) |
399 (invparity[par & 0xcc] << 5) |
400 (invparity[par & 0x33] << 4) |
401 (invparity[par & 0xaa] << 3) |
402 (invparity[par & 0x55] << 2) |
403 3;
404 else
405 code[2] =
406 (invparity[par & 0xf0] << 7) |
407 (invparity[par & 0x0f] << 6) |
408 (invparity[par & 0xcc] << 5) |
409 (invparity[par & 0x33] << 4) |
410 (invparity[par & 0xaa] << 3) |
411 (invparity[par & 0x55] << 2) |
412 (invparity[rp17] << 1) |
413 (invparity[rp16] << 0);
414 }
415 EXPORT_SYMBOL(__nand_calculate_ecc);
416
417 /**
418 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
419 * block
420 * @mtd: MTD block structure
421 * @buf: input buffer with raw data
422 * @code: output buffer with ECC
423 */
nand_calculate_ecc(struct mtd_info * mtd,const unsigned char * buf,unsigned char * code)424 int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
425 unsigned char *code)
426 {
427 __nand_calculate_ecc(buf,
428 ((struct nand_chip *)mtd->priv)->ecc.size, code);
429
430 return 0;
431 }
432 EXPORT_SYMBOL(nand_calculate_ecc);
433
434 /**
435 * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
436 * @buf: raw data read from the chip
437 * @read_ecc: ECC from the chip
438 * @calc_ecc: the ECC calculated from raw data
439 * @eccsize: data bytes per ECC step (256 or 512)
440 *
441 * Detect and correct a 1 bit error for eccsize byte block
442 */
__nand_correct_data(unsigned char * buf,unsigned char * read_ecc,unsigned char * calc_ecc,unsigned int eccsize)443 int __nand_correct_data(unsigned char *buf,
444 unsigned char *read_ecc, unsigned char *calc_ecc,
445 unsigned int eccsize)
446 {
447 unsigned char b0, b1, b2, bit_addr;
448 unsigned int byte_addr;
449 /* 256 or 512 bytes/ecc */
450 const uint32_t eccsize_mult = eccsize >> 8;
451
452 /*
453 * b0 to b2 indicate which bit is faulty (if any)
454 * we might need the xor result more than once,
455 * so keep them in a local var
456 */
457 #ifdef CONFIG_MTD_NAND_ECC_SMC
458 b0 = read_ecc[0] ^ calc_ecc[0];
459 b1 = read_ecc[1] ^ calc_ecc[1];
460 #else
461 b0 = read_ecc[1] ^ calc_ecc[1];
462 b1 = read_ecc[0] ^ calc_ecc[0];
463 #endif
464 b2 = read_ecc[2] ^ calc_ecc[2];
465
466 /* check if there are any bitfaults */
467
468 /* repeated if statements are slightly more efficient than switch ... */
469 /* ordered in order of likelihood */
470
471 if ((b0 | b1 | b2) == 0)
472 return 0; /* no error */
473
474 if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
475 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
476 ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
477 (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
478 /* single bit error */
479 /*
480 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
481 * byte, cp 5/3/1 indicate the faulty bit.
482 * A lookup table (called addressbits) is used to filter
483 * the bits from the byte they are in.
484 * A marginal optimisation is possible by having three
485 * different lookup tables.
486 * One as we have now (for b0), one for b2
487 * (that would avoid the >> 1), and one for b1 (with all values
488 * << 4). However it was felt that introducing two more tables
489 * hardly justify the gain.
490 *
491 * The b2 shift is there to get rid of the lowest two bits.
492 * We could also do addressbits[b2] >> 1 but for the
493 * performance it does not make any difference
494 */
495 if (eccsize_mult == 1)
496 byte_addr = (addressbits[b1] << 4) + addressbits[b0];
497 else
498 byte_addr = (addressbits[b2 & 0x3] << 8) +
499 (addressbits[b1] << 4) + addressbits[b0];
500 bit_addr = addressbits[b2 >> 2];
501 /* flip the bit */
502 buf[byte_addr] ^= (1 << bit_addr);
503 return 1;
504
505 }
506 /* count nr of bits; use table lookup, faster than calculating it */
507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
508 return 1; /* error in ECC data; no action needed */
509
510 printk(KERN_ERR "uncorrectable error : ");
511 return -1;
512 }
513 EXPORT_SYMBOL(__nand_correct_data);
514
515 /**
516 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
517 * @mtd: MTD block structure
518 * @buf: raw data read from the chip
519 * @read_ecc: ECC from the chip
520 * @calc_ecc: the ECC calculated from raw data
521 *
522 * Detect and correct a 1 bit error for 256/512 byte block
523 */
nand_correct_data(struct mtd_info * mtd,unsigned char * buf,unsigned char * read_ecc,unsigned char * calc_ecc)524 int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
525 unsigned char *read_ecc, unsigned char *calc_ecc)
526 {
527 return __nand_correct_data(buf, read_ecc, calc_ecc,
528 ((struct nand_chip *)mtd->priv)->ecc.size);
529 }
530 EXPORT_SYMBOL(nand_correct_data);
531
532 MODULE_LICENSE("GPL");
533 MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
534 MODULE_DESCRIPTION("Generic NAND ECC support");
535