1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Overview:
4 * This is the generic MTD driver for NAND flash devices. It should be
5 * capable of working with almost all NAND chips currently available.
6 *
7 * Additional technical information is available on
8 * http://www.linux-mtd.infradead.org/doc/nand.html
9 *
10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
12 *
13 * Credits:
14 * David Woodhouse for adding multichip support
15 *
16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17 * rework for 2K page size chips
18 *
19 * TODO:
20 * Enable cached programming for 2k page size chips
21 * Check, if mtd->ecctype should be set to MTD_ECC_HW
22 * if we have HW ECC support.
23 * BBT table is not serialized, has to be fixed
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand-ecc-sw-hamming.h>
39 #include <linux/mtd/nand-ecc-sw-bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
42 #include <linux/io.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/of.h>
45 #include <linux/of_gpio.h>
46 #include <linux/gpio/consumer.h>
47
48 #include "internals.h"
49
nand_pairing_dist3_get_info(struct mtd_info * mtd,int page,struct mtd_pairing_info * info)50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
51 struct mtd_pairing_info *info)
52 {
53 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
54 int dist = 3;
55
56 if (page == lastpage)
57 dist = 2;
58
59 if (!page || (page & 1)) {
60 info->group = 0;
61 info->pair = (page + 1) / 2;
62 } else {
63 info->group = 1;
64 info->pair = (page + 1 - dist) / 2;
65 }
66
67 return 0;
68 }
69
nand_pairing_dist3_get_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
71 const struct mtd_pairing_info *info)
72 {
73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
74 int page = info->pair * 2;
75 int dist = 3;
76
77 if (!info->group && !info->pair)
78 return 0;
79
80 if (info->pair == lastpair && info->group)
81 dist = 2;
82
83 if (!info->group)
84 page--;
85 else if (info->pair)
86 page += dist - 1;
87
88 if (page >= mtd->erasesize / mtd->writesize)
89 return -EINVAL;
90
91 return page;
92 }
93
94 const struct mtd_pairing_scheme dist3_pairing_scheme = {
95 .ngroups = 2,
96 .get_info = nand_pairing_dist3_get_info,
97 .get_wunit = nand_pairing_dist3_get_wunit,
98 };
99
check_offs_len(struct nand_chip * chip,loff_t ofs,uint64_t len)100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
101 {
102 int ret = 0;
103
104 /* Start address must align on block boundary */
105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
106 pr_debug("%s: unaligned address\n", __func__);
107 ret = -EINVAL;
108 }
109
110 /* Length must align on block boundary */
111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
112 pr_debug("%s: length not block aligned\n", __func__);
113 ret = -EINVAL;
114 }
115
116 return ret;
117 }
118
119 /**
120 * nand_extract_bits - Copy unaligned bits from one buffer to another one
121 * @dst: destination buffer
122 * @dst_off: bit offset at which the writing starts
123 * @src: source buffer
124 * @src_off: bit offset at which the reading starts
125 * @nbits: number of bits to copy from @src to @dst
126 *
127 * Copy bits from one memory region to another (overlap authorized).
128 */
nand_extract_bits(u8 * dst,unsigned int dst_off,const u8 * src,unsigned int src_off,unsigned int nbits)129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
130 unsigned int src_off, unsigned int nbits)
131 {
132 unsigned int tmp, n;
133
134 dst += dst_off / 8;
135 dst_off %= 8;
136 src += src_off / 8;
137 src_off %= 8;
138
139 while (nbits) {
140 n = min3(8 - dst_off, 8 - src_off, nbits);
141
142 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
144 *dst |= tmp << dst_off;
145
146 dst_off += n;
147 if (dst_off >= 8) {
148 dst++;
149 dst_off -= 8;
150 }
151
152 src_off += n;
153 if (src_off >= 8) {
154 src++;
155 src_off -= 8;
156 }
157
158 nbits -= n;
159 }
160 }
161 EXPORT_SYMBOL_GPL(nand_extract_bits);
162
163 /**
164 * nand_select_target() - Select a NAND target (A.K.A. die)
165 * @chip: NAND chip object
166 * @cs: the CS line to select. Note that this CS id is always from the chip
167 * PoV, not the controller one
168 *
169 * Select a NAND target so that further operations executed on @chip go to the
170 * selected NAND target.
171 */
nand_select_target(struct nand_chip * chip,unsigned int cs)172 void nand_select_target(struct nand_chip *chip, unsigned int cs)
173 {
174 /*
175 * cs should always lie between 0 and nanddev_ntargets(), when that's
176 * not the case it's a bug and the caller should be fixed.
177 */
178 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
179 return;
180
181 chip->cur_cs = cs;
182
183 if (chip->legacy.select_chip)
184 chip->legacy.select_chip(chip, cs);
185 }
186 EXPORT_SYMBOL_GPL(nand_select_target);
187
188 /**
189 * nand_deselect_target() - Deselect the currently selected target
190 * @chip: NAND chip object
191 *
192 * Deselect the currently selected NAND target. The result of operations
193 * executed on @chip after the target has been deselected is undefined.
194 */
nand_deselect_target(struct nand_chip * chip)195 void nand_deselect_target(struct nand_chip *chip)
196 {
197 if (chip->legacy.select_chip)
198 chip->legacy.select_chip(chip, -1);
199
200 chip->cur_cs = -1;
201 }
202 EXPORT_SYMBOL_GPL(nand_deselect_target);
203
204 /**
205 * nand_release_device - [GENERIC] release chip
206 * @chip: NAND chip object
207 *
208 * Release chip lock and wake up anyone waiting on the device.
209 */
nand_release_device(struct nand_chip * chip)210 static void nand_release_device(struct nand_chip *chip)
211 {
212 /* Release the controller and the chip */
213 mutex_unlock(&chip->controller->lock);
214 mutex_unlock(&chip->lock);
215 }
216
217 /**
218 * nand_bbm_get_next_page - Get the next page for bad block markers
219 * @chip: NAND chip object
220 * @page: First page to start checking for bad block marker usage
221 *
222 * Returns an integer that corresponds to the page offset within a block, for
223 * a page that is used to store bad block markers. If no more pages are
224 * available, -EINVAL is returned.
225 */
nand_bbm_get_next_page(struct nand_chip * chip,int page)226 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
227 {
228 struct mtd_info *mtd = nand_to_mtd(chip);
229 int last_page = ((mtd->erasesize - mtd->writesize) >>
230 chip->page_shift) & chip->pagemask;
231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
232 | NAND_BBM_LASTPAGE;
233
234 if (page == 0 && !(chip->options & bbm_flags))
235 return 0;
236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
237 return 0;
238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
239 return 1;
240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
241 return last_page;
242
243 return -EINVAL;
244 }
245
246 /**
247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
248 * @chip: NAND chip object
249 * @ofs: offset from device start
250 *
251 * Check, if the block is bad.
252 */
nand_block_bad(struct nand_chip * chip,loff_t ofs)253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
254 {
255 int first_page, page_offset;
256 int res;
257 u8 bad;
258
259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
260 page_offset = nand_bbm_get_next_page(chip, 0);
261
262 while (page_offset >= 0) {
263 res = chip->ecc.read_oob(chip, first_page + page_offset);
264 if (res < 0)
265 return res;
266
267 bad = chip->oob_poi[chip->badblockpos];
268
269 if (likely(chip->badblockbits == 8))
270 res = bad != 0xFF;
271 else
272 res = hweight8(bad) < chip->badblockbits;
273 if (res)
274 return res;
275
276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
277 }
278
279 return 0;
280 }
281
282 /**
283 * nand_region_is_secured() - Check if the region is secured
284 * @chip: NAND chip object
285 * @offset: Offset of the region to check
286 * @size: Size of the region to check
287 *
288 * Checks if the region is secured by comparing the offset and size with the
289 * list of secure regions obtained from DT. Returns true if the region is
290 * secured else false.
291 */
nand_region_is_secured(struct nand_chip * chip,loff_t offset,u64 size)292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
293 {
294 int i;
295
296 /* Skip touching the secure regions if present */
297 for (i = 0; i < chip->nr_secure_regions; i++) {
298 const struct nand_secure_region *region = &chip->secure_regions[i];
299
300 if (offset + size <= region->offset ||
301 offset >= region->offset + region->size)
302 continue;
303
304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
305 __func__, offset, offset + size);
306
307 return true;
308 }
309
310 return false;
311 }
312
nand_isbad_bbm(struct nand_chip * chip,loff_t ofs)313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
314 {
315 struct mtd_info *mtd = nand_to_mtd(chip);
316
317 if (chip->options & NAND_NO_BBM_QUIRK)
318 return 0;
319
320 /* Check if the region is secured */
321 if (nand_region_is_secured(chip, ofs, mtd->erasesize))
322 return -EIO;
323
324 if (mtd_check_expert_analysis_mode())
325 return 0;
326
327 if (chip->legacy.block_bad)
328 return chip->legacy.block_bad(chip, ofs);
329
330 return nand_block_bad(chip, ofs);
331 }
332
333 /**
334 * nand_get_device - [GENERIC] Get chip for selected access
335 * @chip: NAND chip structure
336 *
337 * Lock the device and its controller for exclusive access
338 *
339 * Return: -EBUSY if the chip has been suspended, 0 otherwise
340 */
nand_get_device(struct nand_chip * chip)341 static void nand_get_device(struct nand_chip *chip)
342 {
343 /* Wait until the device is resumed. */
344 while (1) {
345 mutex_lock(&chip->lock);
346 if (!chip->suspended) {
347 mutex_lock(&chip->controller->lock);
348 return;
349 }
350 mutex_unlock(&chip->lock);
351
352 wait_event(chip->resume_wq, !chip->suspended);
353 }
354 }
355
356 /**
357 * nand_check_wp - [GENERIC] check if the chip is write protected
358 * @chip: NAND chip object
359 *
360 * Check, if the device is write protected. The function expects, that the
361 * device is already selected.
362 */
nand_check_wp(struct nand_chip * chip)363 static int nand_check_wp(struct nand_chip *chip)
364 {
365 u8 status;
366 int ret;
367
368 /* Broken xD cards report WP despite being writable */
369 if (chip->options & NAND_BROKEN_XD)
370 return 0;
371
372 /* Check the WP bit */
373 ret = nand_status_op(chip, &status);
374 if (ret)
375 return ret;
376
377 return status & NAND_STATUS_WP ? 0 : 1;
378 }
379
380 /**
381 * nand_fill_oob - [INTERN] Transfer client buffer to oob
382 * @chip: NAND chip object
383 * @oob: oob data buffer
384 * @len: oob data write length
385 * @ops: oob ops structure
386 */
nand_fill_oob(struct nand_chip * chip,uint8_t * oob,size_t len,struct mtd_oob_ops * ops)387 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
388 struct mtd_oob_ops *ops)
389 {
390 struct mtd_info *mtd = nand_to_mtd(chip);
391 int ret;
392
393 /*
394 * Initialise to all 0xFF, to avoid the possibility of left over OOB
395 * data from a previous OOB read.
396 */
397 memset(chip->oob_poi, 0xff, mtd->oobsize);
398
399 switch (ops->mode) {
400
401 case MTD_OPS_PLACE_OOB:
402 case MTD_OPS_RAW:
403 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
404 return oob + len;
405
406 case MTD_OPS_AUTO_OOB:
407 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
408 ops->ooboffs, len);
409 BUG_ON(ret);
410 return oob + len;
411
412 default:
413 BUG();
414 }
415 return NULL;
416 }
417
418 /**
419 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
420 * @chip: NAND chip object
421 * @to: offset to write to
422 * @ops: oob operation description structure
423 *
424 * NAND write out-of-band.
425 */
nand_do_write_oob(struct nand_chip * chip,loff_t to,struct mtd_oob_ops * ops)426 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
427 struct mtd_oob_ops *ops)
428 {
429 struct mtd_info *mtd = nand_to_mtd(chip);
430 int chipnr, page, status, len, ret;
431
432 pr_debug("%s: to = 0x%08x, len = %i\n",
433 __func__, (unsigned int)to, (int)ops->ooblen);
434
435 len = mtd_oobavail(mtd, ops);
436
437 /* Do not allow write past end of page */
438 if ((ops->ooboffs + ops->ooblen) > len) {
439 pr_debug("%s: attempt to write past end of page\n",
440 __func__);
441 return -EINVAL;
442 }
443
444 /* Check if the region is secured */
445 if (nand_region_is_secured(chip, to, ops->ooblen))
446 return -EIO;
447
448 chipnr = (int)(to >> chip->chip_shift);
449
450 /*
451 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
452 * of my DiskOnChip 2000 test units) will clear the whole data page too
453 * if we don't do this. I have no clue why, but I seem to have 'fixed'
454 * it in the doc2000 driver in August 1999. dwmw2.
455 */
456 ret = nand_reset(chip, chipnr);
457 if (ret)
458 return ret;
459
460 nand_select_target(chip, chipnr);
461
462 /* Shift to get page */
463 page = (int)(to >> chip->page_shift);
464
465 /* Check, if it is write protected */
466 if (nand_check_wp(chip)) {
467 nand_deselect_target(chip);
468 return -EROFS;
469 }
470
471 /* Invalidate the page cache, if we write to the cached page */
472 if (page == chip->pagecache.page)
473 chip->pagecache.page = -1;
474
475 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
476
477 if (ops->mode == MTD_OPS_RAW)
478 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
479 else
480 status = chip->ecc.write_oob(chip, page & chip->pagemask);
481
482 nand_deselect_target(chip);
483
484 if (status)
485 return status;
486
487 ops->oobretlen = ops->ooblen;
488
489 return 0;
490 }
491
492 /**
493 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
494 * @chip: NAND chip object
495 * @ofs: offset from device start
496 *
497 * This is the default implementation, which can be overridden by a hardware
498 * specific driver. It provides the details for writing a bad block marker to a
499 * block.
500 */
nand_default_block_markbad(struct nand_chip * chip,loff_t ofs)501 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
502 {
503 struct mtd_info *mtd = nand_to_mtd(chip);
504 struct mtd_oob_ops ops;
505 uint8_t buf[2] = { 0, 0 };
506 int ret = 0, res, page_offset;
507
508 memset(&ops, 0, sizeof(ops));
509 ops.oobbuf = buf;
510 ops.ooboffs = chip->badblockpos;
511 if (chip->options & NAND_BUSWIDTH_16) {
512 ops.ooboffs &= ~0x01;
513 ops.len = ops.ooblen = 2;
514 } else {
515 ops.len = ops.ooblen = 1;
516 }
517 ops.mode = MTD_OPS_PLACE_OOB;
518
519 page_offset = nand_bbm_get_next_page(chip, 0);
520
521 while (page_offset >= 0) {
522 res = nand_do_write_oob(chip,
523 ofs + (page_offset * mtd->writesize),
524 &ops);
525
526 if (!ret)
527 ret = res;
528
529 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
530 }
531
532 return ret;
533 }
534
535 /**
536 * nand_markbad_bbm - mark a block by updating the BBM
537 * @chip: NAND chip object
538 * @ofs: offset of the block to mark bad
539 */
nand_markbad_bbm(struct nand_chip * chip,loff_t ofs)540 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
541 {
542 if (chip->legacy.block_markbad)
543 return chip->legacy.block_markbad(chip, ofs);
544
545 return nand_default_block_markbad(chip, ofs);
546 }
547
548 /**
549 * nand_block_markbad_lowlevel - mark a block bad
550 * @chip: NAND chip object
551 * @ofs: offset from device start
552 *
553 * This function performs the generic NAND bad block marking steps (i.e., bad
554 * block table(s) and/or marker(s)). We only allow the hardware driver to
555 * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
556 *
557 * We try operations in the following order:
558 *
559 * (1) erase the affected block, to allow OOB marker to be written cleanly
560 * (2) write bad block marker to OOB area of affected block (unless flag
561 * NAND_BBT_NO_OOB_BBM is present)
562 * (3) update the BBT
563 *
564 * Note that we retain the first error encountered in (2) or (3), finish the
565 * procedures, and dump the error in the end.
566 */
nand_block_markbad_lowlevel(struct nand_chip * chip,loff_t ofs)567 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
568 {
569 struct mtd_info *mtd = nand_to_mtd(chip);
570 int res, ret = 0;
571
572 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
573 struct erase_info einfo;
574
575 /* Attempt erase before marking OOB */
576 memset(&einfo, 0, sizeof(einfo));
577 einfo.addr = ofs;
578 einfo.len = 1ULL << chip->phys_erase_shift;
579 nand_erase_nand(chip, &einfo, 0);
580
581 /* Write bad block marker to OOB */
582 nand_get_device(chip);
583
584 ret = nand_markbad_bbm(chip, ofs);
585 nand_release_device(chip);
586 }
587
588 /* Mark block bad in BBT */
589 if (chip->bbt) {
590 res = nand_markbad_bbt(chip, ofs);
591 if (!ret)
592 ret = res;
593 }
594
595 if (!ret)
596 mtd->ecc_stats.badblocks++;
597
598 return ret;
599 }
600
601 /**
602 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
603 * @mtd: MTD device structure
604 * @ofs: offset from device start
605 *
606 * Check if the block is marked as reserved.
607 */
nand_block_isreserved(struct mtd_info * mtd,loff_t ofs)608 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
609 {
610 struct nand_chip *chip = mtd_to_nand(mtd);
611
612 if (!chip->bbt)
613 return 0;
614 /* Return info from the table */
615 return nand_isreserved_bbt(chip, ofs);
616 }
617
618 /**
619 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
620 * @chip: NAND chip object
621 * @ofs: offset from device start
622 * @allowbbt: 1, if its allowed to access the bbt area
623 *
624 * Check, if the block is bad. Either by reading the bad block table or
625 * calling of the scan function.
626 */
nand_block_checkbad(struct nand_chip * chip,loff_t ofs,int allowbbt)627 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
628 {
629 /* Return info from the table */
630 if (chip->bbt)
631 return nand_isbad_bbt(chip, ofs, allowbbt);
632
633 return nand_isbad_bbm(chip, ofs);
634 }
635
636 /**
637 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
638 * @chip: NAND chip structure
639 * @timeout_ms: Timeout in ms
640 *
641 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
642 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
643 * returned.
644 *
645 * This helper is intended to be used when the controller does not have access
646 * to the NAND R/B pin.
647 *
648 * Be aware that calling this helper from an ->exec_op() implementation means
649 * ->exec_op() must be re-entrant.
650 *
651 * Return 0 if the NAND chip is ready, a negative error otherwise.
652 */
nand_soft_waitrdy(struct nand_chip * chip,unsigned long timeout_ms)653 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
654 {
655 const struct nand_interface_config *conf;
656 u8 status = 0;
657 int ret;
658
659 if (!nand_has_exec_op(chip))
660 return -ENOTSUPP;
661
662 /* Wait tWB before polling the STATUS reg. */
663 conf = nand_get_interface_config(chip);
664 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
665
666 ret = nand_status_op(chip, NULL);
667 if (ret)
668 return ret;
669
670 /*
671 * +1 below is necessary because if we are now in the last fraction
672 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
673 * small jiffy fraction - possibly leading to false timeout
674 */
675 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
676 do {
677 ret = nand_read_data_op(chip, &status, sizeof(status), true,
678 false);
679 if (ret)
680 break;
681
682 if (status & NAND_STATUS_READY)
683 break;
684
685 /*
686 * Typical lowest execution time for a tR on most NANDs is 10us,
687 * use this as polling delay before doing something smarter (ie.
688 * deriving a delay from the timeout value, timeout_ms/ratio).
689 */
690 udelay(10);
691 } while (time_before(jiffies, timeout_ms));
692
693 /*
694 * We have to exit READ_STATUS mode in order to read real data on the
695 * bus in case the WAITRDY instruction is preceding a DATA_IN
696 * instruction.
697 */
698 nand_exit_status_op(chip);
699
700 if (ret)
701 return ret;
702
703 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
704 };
705 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
706
707 /**
708 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
709 * @chip: NAND chip structure
710 * @gpiod: GPIO descriptor of R/B pin
711 * @timeout_ms: Timeout in ms
712 *
713 * Poll the R/B GPIO pin until it becomes ready. If that does not happen
714 * whitin the specified timeout, -ETIMEDOUT is returned.
715 *
716 * This helper is intended to be used when the controller has access to the
717 * NAND R/B pin over GPIO.
718 *
719 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
720 */
nand_gpio_waitrdy(struct nand_chip * chip,struct gpio_desc * gpiod,unsigned long timeout_ms)721 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
722 unsigned long timeout_ms)
723 {
724
725 /*
726 * Wait until R/B pin indicates chip is ready or timeout occurs.
727 * +1 below is necessary because if we are now in the last fraction
728 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
729 * small jiffy fraction - possibly leading to false timeout.
730 */
731 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
732 do {
733 if (gpiod_get_value_cansleep(gpiod))
734 return 0;
735
736 cond_resched();
737 } while (time_before(jiffies, timeout_ms));
738
739 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
740 };
741 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
742
743 /**
744 * panic_nand_wait - [GENERIC] wait until the command is done
745 * @chip: NAND chip structure
746 * @timeo: timeout
747 *
748 * Wait for command done. This is a helper function for nand_wait used when
749 * we are in interrupt context. May happen when in panic and trying to write
750 * an oops through mtdoops.
751 */
panic_nand_wait(struct nand_chip * chip,unsigned long timeo)752 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
753 {
754 int i;
755 for (i = 0; i < timeo; i++) {
756 if (chip->legacy.dev_ready) {
757 if (chip->legacy.dev_ready(chip))
758 break;
759 } else {
760 int ret;
761 u8 status;
762
763 ret = nand_read_data_op(chip, &status, sizeof(status),
764 true, false);
765 if (ret)
766 return;
767
768 if (status & NAND_STATUS_READY)
769 break;
770 }
771 mdelay(1);
772 }
773 }
774
nand_supports_get_features(struct nand_chip * chip,int addr)775 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
776 {
777 return (chip->parameters.supports_set_get_features &&
778 test_bit(addr, chip->parameters.get_feature_list));
779 }
780
nand_supports_set_features(struct nand_chip * chip,int addr)781 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
782 {
783 return (chip->parameters.supports_set_get_features &&
784 test_bit(addr, chip->parameters.set_feature_list));
785 }
786
787 /**
788 * nand_reset_interface - Reset data interface and timings
789 * @chip: The NAND chip
790 * @chipnr: Internal die id
791 *
792 * Reset the Data interface and timings to ONFI mode 0.
793 *
794 * Returns 0 for success or negative error code otherwise.
795 */
nand_reset_interface(struct nand_chip * chip,int chipnr)796 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
797 {
798 const struct nand_controller_ops *ops = chip->controller->ops;
799 int ret;
800
801 if (!nand_controller_can_setup_interface(chip))
802 return 0;
803
804 /*
805 * The ONFI specification says:
806 * "
807 * To transition from NV-DDR or NV-DDR2 to the SDR data
808 * interface, the host shall use the Reset (FFh) command
809 * using SDR timing mode 0. A device in any timing mode is
810 * required to recognize Reset (FFh) command issued in SDR
811 * timing mode 0.
812 * "
813 *
814 * Configure the data interface in SDR mode and set the
815 * timings to timing mode 0.
816 */
817
818 chip->current_interface_config = nand_get_reset_interface_config();
819 ret = ops->setup_interface(chip, chipnr,
820 chip->current_interface_config);
821 if (ret)
822 pr_err("Failed to configure data interface to SDR timing mode 0\n");
823
824 return ret;
825 }
826
827 /**
828 * nand_setup_interface - Setup the best data interface and timings
829 * @chip: The NAND chip
830 * @chipnr: Internal die id
831 *
832 * Configure what has been reported to be the best data interface and NAND
833 * timings supported by the chip and the driver.
834 *
835 * Returns 0 for success or negative error code otherwise.
836 */
nand_setup_interface(struct nand_chip * chip,int chipnr)837 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
838 {
839 const struct nand_controller_ops *ops = chip->controller->ops;
840 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
841 int ret;
842
843 if (!nand_controller_can_setup_interface(chip))
844 return 0;
845
846 /*
847 * A nand_reset_interface() put both the NAND chip and the NAND
848 * controller in timings mode 0. If the default mode for this chip is
849 * also 0, no need to proceed to the change again. Plus, at probe time,
850 * nand_setup_interface() uses ->set/get_features() which would
851 * fail anyway as the parameter page is not available yet.
852 */
853 if (!chip->best_interface_config)
854 return 0;
855
856 request = chip->best_interface_config->timings.mode;
857 if (nand_interface_is_sdr(chip->best_interface_config))
858 request |= ONFI_DATA_INTERFACE_SDR;
859 else
860 request |= ONFI_DATA_INTERFACE_NVDDR;
861 tmode_param[0] = request;
862
863 /* Change the mode on the chip side (if supported by the NAND chip) */
864 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
865 nand_select_target(chip, chipnr);
866 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
867 tmode_param);
868 nand_deselect_target(chip);
869 if (ret)
870 return ret;
871 }
872
873 /* Change the mode on the controller side */
874 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
875 if (ret)
876 return ret;
877
878 /* Check the mode has been accepted by the chip, if supported */
879 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
880 goto update_interface_config;
881
882 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
883 nand_select_target(chip, chipnr);
884 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
885 tmode_param);
886 nand_deselect_target(chip);
887 if (ret)
888 goto err_reset_chip;
889
890 if (request != tmode_param[0]) {
891 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
892 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
893 chip->best_interface_config->timings.mode);
894 pr_debug("NAND chip would work in %s timing mode %d\n",
895 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
896 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
897 goto err_reset_chip;
898 }
899
900 update_interface_config:
901 chip->current_interface_config = chip->best_interface_config;
902
903 return 0;
904
905 err_reset_chip:
906 /*
907 * Fallback to mode 0 if the chip explicitly did not ack the chosen
908 * timing mode.
909 */
910 nand_reset_interface(chip, chipnr);
911 nand_select_target(chip, chipnr);
912 nand_reset_op(chip);
913 nand_deselect_target(chip);
914
915 return ret;
916 }
917
918 /**
919 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
920 * NAND controller and the NAND chip support
921 * @chip: the NAND chip
922 * @iface: the interface configuration (can eventually be updated)
923 * @spec_timings: specific timings, when not fitting the ONFI specification
924 *
925 * If specific timings are provided, use them. Otherwise, retrieve supported
926 * timing modes from ONFI information.
927 */
nand_choose_best_sdr_timings(struct nand_chip * chip,struct nand_interface_config * iface,struct nand_sdr_timings * spec_timings)928 int nand_choose_best_sdr_timings(struct nand_chip *chip,
929 struct nand_interface_config *iface,
930 struct nand_sdr_timings *spec_timings)
931 {
932 const struct nand_controller_ops *ops = chip->controller->ops;
933 int best_mode = 0, mode, ret = -EOPNOTSUPP;
934
935 iface->type = NAND_SDR_IFACE;
936
937 if (spec_timings) {
938 iface->timings.sdr = *spec_timings;
939 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
940
941 /* Verify the controller supports the requested interface */
942 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
943 iface);
944 if (!ret) {
945 chip->best_interface_config = iface;
946 return ret;
947 }
948
949 /* Fallback to slower modes */
950 best_mode = iface->timings.mode;
951 } else if (chip->parameters.onfi) {
952 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
953 }
954
955 for (mode = best_mode; mode >= 0; mode--) {
956 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
957
958 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
959 iface);
960 if (!ret) {
961 chip->best_interface_config = iface;
962 break;
963 }
964 }
965
966 return ret;
967 }
968
969 /**
970 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
971 * NAND controller and the NAND chip support
972 * @chip: the NAND chip
973 * @iface: the interface configuration (can eventually be updated)
974 * @spec_timings: specific timings, when not fitting the ONFI specification
975 *
976 * If specific timings are provided, use them. Otherwise, retrieve supported
977 * timing modes from ONFI information.
978 */
nand_choose_best_nvddr_timings(struct nand_chip * chip,struct nand_interface_config * iface,struct nand_nvddr_timings * spec_timings)979 int nand_choose_best_nvddr_timings(struct nand_chip *chip,
980 struct nand_interface_config *iface,
981 struct nand_nvddr_timings *spec_timings)
982 {
983 const struct nand_controller_ops *ops = chip->controller->ops;
984 int best_mode = 0, mode, ret = -EOPNOTSUPP;
985
986 iface->type = NAND_NVDDR_IFACE;
987
988 if (spec_timings) {
989 iface->timings.nvddr = *spec_timings;
990 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
991
992 /* Verify the controller supports the requested interface */
993 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
994 iface);
995 if (!ret) {
996 chip->best_interface_config = iface;
997 return ret;
998 }
999
1000 /* Fallback to slower modes */
1001 best_mode = iface->timings.mode;
1002 } else if (chip->parameters.onfi) {
1003 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
1004 }
1005
1006 for (mode = best_mode; mode >= 0; mode--) {
1007 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
1008
1009 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1010 iface);
1011 if (!ret) {
1012 chip->best_interface_config = iface;
1013 break;
1014 }
1015 }
1016
1017 return ret;
1018 }
1019
1020 /**
1021 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
1022 * NAND controller and the NAND chip support
1023 * @chip: the NAND chip
1024 * @iface: the interface configuration (can eventually be updated)
1025 *
1026 * If specific timings are provided, use them. Otherwise, retrieve supported
1027 * timing modes from ONFI information.
1028 */
nand_choose_best_timings(struct nand_chip * chip,struct nand_interface_config * iface)1029 static int nand_choose_best_timings(struct nand_chip *chip,
1030 struct nand_interface_config *iface)
1031 {
1032 int ret;
1033
1034 /* Try the fastest timings: NV-DDR */
1035 ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
1036 if (!ret)
1037 return 0;
1038
1039 /* Fallback to SDR timings otherwise */
1040 return nand_choose_best_sdr_timings(chip, iface, NULL);
1041 }
1042
1043 /**
1044 * nand_choose_interface_config - find the best data interface and timings
1045 * @chip: The NAND chip
1046 *
1047 * Find the best data interface and NAND timings supported by the chip
1048 * and the driver. Eventually let the NAND manufacturer driver propose his own
1049 * set of timings.
1050 *
1051 * After this function nand_chip->interface_config is initialized with the best
1052 * timing mode available.
1053 *
1054 * Returns 0 for success or negative error code otherwise.
1055 */
nand_choose_interface_config(struct nand_chip * chip)1056 static int nand_choose_interface_config(struct nand_chip *chip)
1057 {
1058 struct nand_interface_config *iface;
1059 int ret;
1060
1061 if (!nand_controller_can_setup_interface(chip))
1062 return 0;
1063
1064 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1065 if (!iface)
1066 return -ENOMEM;
1067
1068 if (chip->ops.choose_interface_config)
1069 ret = chip->ops.choose_interface_config(chip, iface);
1070 else
1071 ret = nand_choose_best_timings(chip, iface);
1072
1073 if (ret)
1074 kfree(iface);
1075
1076 return ret;
1077 }
1078
1079 /**
1080 * nand_fill_column_cycles - fill the column cycles of an address
1081 * @chip: The NAND chip
1082 * @addrs: Array of address cycles to fill
1083 * @offset_in_page: The offset in the page
1084 *
1085 * Fills the first or the first two bytes of the @addrs field depending
1086 * on the NAND bus width and the page size.
1087 *
1088 * Returns the number of cycles needed to encode the column, or a negative
1089 * error code in case one of the arguments is invalid.
1090 */
nand_fill_column_cycles(struct nand_chip * chip,u8 * addrs,unsigned int offset_in_page)1091 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1092 unsigned int offset_in_page)
1093 {
1094 struct mtd_info *mtd = nand_to_mtd(chip);
1095
1096 /* Make sure the offset is less than the actual page size. */
1097 if (offset_in_page > mtd->writesize + mtd->oobsize)
1098 return -EINVAL;
1099
1100 /*
1101 * On small page NANDs, there's a dedicated command to access the OOB
1102 * area, and the column address is relative to the start of the OOB
1103 * area, not the start of the page. Asjust the address accordingly.
1104 */
1105 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1106 offset_in_page -= mtd->writesize;
1107
1108 /*
1109 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1110 * wide, then it must be divided by 2.
1111 */
1112 if (chip->options & NAND_BUSWIDTH_16) {
1113 if (WARN_ON(offset_in_page % 2))
1114 return -EINVAL;
1115
1116 offset_in_page /= 2;
1117 }
1118
1119 addrs[0] = offset_in_page;
1120
1121 /*
1122 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1123 * need 2
1124 */
1125 if (mtd->writesize <= 512)
1126 return 1;
1127
1128 addrs[1] = offset_in_page >> 8;
1129
1130 return 2;
1131 }
1132
nand_sp_exec_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1133 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1134 unsigned int offset_in_page, void *buf,
1135 unsigned int len)
1136 {
1137 const struct nand_interface_config *conf =
1138 nand_get_interface_config(chip);
1139 struct mtd_info *mtd = nand_to_mtd(chip);
1140 u8 addrs[4];
1141 struct nand_op_instr instrs[] = {
1142 NAND_OP_CMD(NAND_CMD_READ0, 0),
1143 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1144 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1145 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1146 NAND_OP_DATA_IN(len, buf, 0),
1147 };
1148 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1149 int ret;
1150
1151 /* Drop the DATA_IN instruction if len is set to 0. */
1152 if (!len)
1153 op.ninstrs--;
1154
1155 if (offset_in_page >= mtd->writesize)
1156 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1157 else if (offset_in_page >= 256 &&
1158 !(chip->options & NAND_BUSWIDTH_16))
1159 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1160
1161 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1162 if (ret < 0)
1163 return ret;
1164
1165 addrs[1] = page;
1166 addrs[2] = page >> 8;
1167
1168 if (chip->options & NAND_ROW_ADDR_3) {
1169 addrs[3] = page >> 16;
1170 instrs[1].ctx.addr.naddrs++;
1171 }
1172
1173 return nand_exec_op(chip, &op);
1174 }
1175
nand_lp_exec_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1176 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1177 unsigned int offset_in_page, void *buf,
1178 unsigned int len)
1179 {
1180 const struct nand_interface_config *conf =
1181 nand_get_interface_config(chip);
1182 u8 addrs[5];
1183 struct nand_op_instr instrs[] = {
1184 NAND_OP_CMD(NAND_CMD_READ0, 0),
1185 NAND_OP_ADDR(4, addrs, 0),
1186 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1187 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1188 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1189 NAND_OP_DATA_IN(len, buf, 0),
1190 };
1191 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1192 int ret;
1193
1194 /* Drop the DATA_IN instruction if len is set to 0. */
1195 if (!len)
1196 op.ninstrs--;
1197
1198 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1199 if (ret < 0)
1200 return ret;
1201
1202 addrs[2] = page;
1203 addrs[3] = page >> 8;
1204
1205 if (chip->options & NAND_ROW_ADDR_3) {
1206 addrs[4] = page >> 16;
1207 instrs[1].ctx.addr.naddrs++;
1208 }
1209
1210 return nand_exec_op(chip, &op);
1211 }
1212
1213 /**
1214 * nand_read_page_op - Do a READ PAGE operation
1215 * @chip: The NAND chip
1216 * @page: page to read
1217 * @offset_in_page: offset within the page
1218 * @buf: buffer used to store the data
1219 * @len: length of the buffer
1220 *
1221 * This function issues a READ PAGE operation.
1222 * This function does not select/unselect the CS line.
1223 *
1224 * Returns 0 on success, a negative error code otherwise.
1225 */
nand_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1226 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1227 unsigned int offset_in_page, void *buf, unsigned int len)
1228 {
1229 struct mtd_info *mtd = nand_to_mtd(chip);
1230
1231 if (len && !buf)
1232 return -EINVAL;
1233
1234 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1235 return -EINVAL;
1236
1237 if (nand_has_exec_op(chip)) {
1238 if (mtd->writesize > 512)
1239 return nand_lp_exec_read_page_op(chip, page,
1240 offset_in_page, buf,
1241 len);
1242
1243 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1244 buf, len);
1245 }
1246
1247 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1248 if (len)
1249 chip->legacy.read_buf(chip, buf, len);
1250
1251 return 0;
1252 }
1253 EXPORT_SYMBOL_GPL(nand_read_page_op);
1254
1255 /**
1256 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1257 * @chip: The NAND chip
1258 * @page: parameter page to read
1259 * @buf: buffer used to store the data
1260 * @len: length of the buffer
1261 *
1262 * This function issues a READ PARAMETER PAGE operation.
1263 * This function does not select/unselect the CS line.
1264 *
1265 * Returns 0 on success, a negative error code otherwise.
1266 */
nand_read_param_page_op(struct nand_chip * chip,u8 page,void * buf,unsigned int len)1267 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1268 unsigned int len)
1269 {
1270 unsigned int i;
1271 u8 *p = buf;
1272
1273 if (len && !buf)
1274 return -EINVAL;
1275
1276 if (nand_has_exec_op(chip)) {
1277 const struct nand_interface_config *conf =
1278 nand_get_interface_config(chip);
1279 struct nand_op_instr instrs[] = {
1280 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1281 NAND_OP_ADDR(1, &page,
1282 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1283 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1284 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1285 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1286 };
1287 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1288
1289 /* Drop the DATA_IN instruction if len is set to 0. */
1290 if (!len)
1291 op.ninstrs--;
1292
1293 return nand_exec_op(chip, &op);
1294 }
1295
1296 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1297 for (i = 0; i < len; i++)
1298 p[i] = chip->legacy.read_byte(chip);
1299
1300 return 0;
1301 }
1302
1303 /**
1304 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1305 * @chip: The NAND chip
1306 * @offset_in_page: offset within the page
1307 * @buf: buffer used to store the data
1308 * @len: length of the buffer
1309 * @force_8bit: force 8-bit bus access
1310 *
1311 * This function issues a CHANGE READ COLUMN operation.
1312 * This function does not select/unselect the CS line.
1313 *
1314 * Returns 0 on success, a negative error code otherwise.
1315 */
nand_change_read_column_op(struct nand_chip * chip,unsigned int offset_in_page,void * buf,unsigned int len,bool force_8bit)1316 int nand_change_read_column_op(struct nand_chip *chip,
1317 unsigned int offset_in_page, void *buf,
1318 unsigned int len, bool force_8bit)
1319 {
1320 struct mtd_info *mtd = nand_to_mtd(chip);
1321
1322 if (len && !buf)
1323 return -EINVAL;
1324
1325 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1326 return -EINVAL;
1327
1328 /* Small page NANDs do not support column change. */
1329 if (mtd->writesize <= 512)
1330 return -ENOTSUPP;
1331
1332 if (nand_has_exec_op(chip)) {
1333 const struct nand_interface_config *conf =
1334 nand_get_interface_config(chip);
1335 u8 addrs[2] = {};
1336 struct nand_op_instr instrs[] = {
1337 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1338 NAND_OP_ADDR(2, addrs, 0),
1339 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1340 NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1341 NAND_OP_DATA_IN(len, buf, 0),
1342 };
1343 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1344 int ret;
1345
1346 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1347 if (ret < 0)
1348 return ret;
1349
1350 /* Drop the DATA_IN instruction if len is set to 0. */
1351 if (!len)
1352 op.ninstrs--;
1353
1354 instrs[3].ctx.data.force_8bit = force_8bit;
1355
1356 return nand_exec_op(chip, &op);
1357 }
1358
1359 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1360 if (len)
1361 chip->legacy.read_buf(chip, buf, len);
1362
1363 return 0;
1364 }
1365 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1366
1367 /**
1368 * nand_read_oob_op - Do a READ OOB operation
1369 * @chip: The NAND chip
1370 * @page: page to read
1371 * @offset_in_oob: offset within the OOB area
1372 * @buf: buffer used to store the data
1373 * @len: length of the buffer
1374 *
1375 * This function issues a READ OOB operation.
1376 * This function does not select/unselect the CS line.
1377 *
1378 * Returns 0 on success, a negative error code otherwise.
1379 */
nand_read_oob_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_oob,void * buf,unsigned int len)1380 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1381 unsigned int offset_in_oob, void *buf, unsigned int len)
1382 {
1383 struct mtd_info *mtd = nand_to_mtd(chip);
1384
1385 if (len && !buf)
1386 return -EINVAL;
1387
1388 if (offset_in_oob + len > mtd->oobsize)
1389 return -EINVAL;
1390
1391 if (nand_has_exec_op(chip))
1392 return nand_read_page_op(chip, page,
1393 mtd->writesize + offset_in_oob,
1394 buf, len);
1395
1396 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1397 if (len)
1398 chip->legacy.read_buf(chip, buf, len);
1399
1400 return 0;
1401 }
1402 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1403
nand_exec_prog_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len,bool prog)1404 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1405 unsigned int offset_in_page, const void *buf,
1406 unsigned int len, bool prog)
1407 {
1408 const struct nand_interface_config *conf =
1409 nand_get_interface_config(chip);
1410 struct mtd_info *mtd = nand_to_mtd(chip);
1411 u8 addrs[5] = {};
1412 struct nand_op_instr instrs[] = {
1413 /*
1414 * The first instruction will be dropped if we're dealing
1415 * with a large page NAND and adjusted if we're dealing
1416 * with a small page NAND and the page offset is > 255.
1417 */
1418 NAND_OP_CMD(NAND_CMD_READ0, 0),
1419 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1420 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
1421 NAND_OP_DATA_OUT(len, buf, 0),
1422 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1423 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1424 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
1425 };
1426 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1427 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1428
1429 if (naddrs < 0)
1430 return naddrs;
1431
1432 addrs[naddrs++] = page;
1433 addrs[naddrs++] = page >> 8;
1434 if (chip->options & NAND_ROW_ADDR_3)
1435 addrs[naddrs++] = page >> 16;
1436
1437 instrs[2].ctx.addr.naddrs = naddrs;
1438
1439 /* Drop the last two instructions if we're not programming the page. */
1440 if (!prog) {
1441 op.ninstrs -= 2;
1442 /* Also drop the DATA_OUT instruction if empty. */
1443 if (!len)
1444 op.ninstrs--;
1445 }
1446
1447 if (mtd->writesize <= 512) {
1448 /*
1449 * Small pages need some more tweaking: we have to adjust the
1450 * first instruction depending on the page offset we're trying
1451 * to access.
1452 */
1453 if (offset_in_page >= mtd->writesize)
1454 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1455 else if (offset_in_page >= 256 &&
1456 !(chip->options & NAND_BUSWIDTH_16))
1457 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1458 } else {
1459 /*
1460 * Drop the first command if we're dealing with a large page
1461 * NAND.
1462 */
1463 op.instrs++;
1464 op.ninstrs--;
1465 }
1466
1467 return nand_exec_op(chip, &op);
1468 }
1469
1470 /**
1471 * nand_prog_page_begin_op - starts a PROG PAGE operation
1472 * @chip: The NAND chip
1473 * @page: page to write
1474 * @offset_in_page: offset within the page
1475 * @buf: buffer containing the data to write to the page
1476 * @len: length of the buffer
1477 *
1478 * This function issues the first half of a PROG PAGE operation.
1479 * This function does not select/unselect the CS line.
1480 *
1481 * Returns 0 on success, a negative error code otherwise.
1482 */
nand_prog_page_begin_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1483 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1484 unsigned int offset_in_page, const void *buf,
1485 unsigned int len)
1486 {
1487 struct mtd_info *mtd = nand_to_mtd(chip);
1488
1489 if (len && !buf)
1490 return -EINVAL;
1491
1492 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1493 return -EINVAL;
1494
1495 if (nand_has_exec_op(chip))
1496 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1497 len, false);
1498
1499 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1500
1501 if (buf)
1502 chip->legacy.write_buf(chip, buf, len);
1503
1504 return 0;
1505 }
1506 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1507
1508 /**
1509 * nand_prog_page_end_op - ends a PROG PAGE operation
1510 * @chip: The NAND chip
1511 *
1512 * This function issues the second half of a PROG PAGE operation.
1513 * This function does not select/unselect the CS line.
1514 *
1515 * Returns 0 on success, a negative error code otherwise.
1516 */
nand_prog_page_end_op(struct nand_chip * chip)1517 int nand_prog_page_end_op(struct nand_chip *chip)
1518 {
1519 int ret;
1520 u8 status;
1521
1522 if (nand_has_exec_op(chip)) {
1523 const struct nand_interface_config *conf =
1524 nand_get_interface_config(chip);
1525 struct nand_op_instr instrs[] = {
1526 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1527 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1528 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
1529 0),
1530 };
1531 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1532
1533 ret = nand_exec_op(chip, &op);
1534 if (ret)
1535 return ret;
1536
1537 ret = nand_status_op(chip, &status);
1538 if (ret)
1539 return ret;
1540 } else {
1541 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1542 ret = chip->legacy.waitfunc(chip);
1543 if (ret < 0)
1544 return ret;
1545
1546 status = ret;
1547 }
1548
1549 if (status & NAND_STATUS_FAIL)
1550 return -EIO;
1551
1552 return 0;
1553 }
1554 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1555
1556 /**
1557 * nand_prog_page_op - Do a full PROG PAGE operation
1558 * @chip: The NAND chip
1559 * @page: page to write
1560 * @offset_in_page: offset within the page
1561 * @buf: buffer containing the data to write to the page
1562 * @len: length of the buffer
1563 *
1564 * This function issues a full PROG PAGE operation.
1565 * This function does not select/unselect the CS line.
1566 *
1567 * Returns 0 on success, a negative error code otherwise.
1568 */
nand_prog_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1569 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1570 unsigned int offset_in_page, const void *buf,
1571 unsigned int len)
1572 {
1573 struct mtd_info *mtd = nand_to_mtd(chip);
1574 u8 status;
1575 int ret;
1576
1577 if (!len || !buf)
1578 return -EINVAL;
1579
1580 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1581 return -EINVAL;
1582
1583 if (nand_has_exec_op(chip)) {
1584 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1585 len, true);
1586 if (ret)
1587 return ret;
1588
1589 ret = nand_status_op(chip, &status);
1590 if (ret)
1591 return ret;
1592 } else {
1593 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1594 page);
1595 chip->legacy.write_buf(chip, buf, len);
1596 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1597 ret = chip->legacy.waitfunc(chip);
1598 if (ret < 0)
1599 return ret;
1600
1601 status = ret;
1602 }
1603
1604 if (status & NAND_STATUS_FAIL)
1605 return -EIO;
1606
1607 return 0;
1608 }
1609 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1610
1611 /**
1612 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1613 * @chip: The NAND chip
1614 * @offset_in_page: offset within the page
1615 * @buf: buffer containing the data to send to the NAND
1616 * @len: length of the buffer
1617 * @force_8bit: force 8-bit bus access
1618 *
1619 * This function issues a CHANGE WRITE COLUMN operation.
1620 * This function does not select/unselect the CS line.
1621 *
1622 * Returns 0 on success, a negative error code otherwise.
1623 */
nand_change_write_column_op(struct nand_chip * chip,unsigned int offset_in_page,const void * buf,unsigned int len,bool force_8bit)1624 int nand_change_write_column_op(struct nand_chip *chip,
1625 unsigned int offset_in_page,
1626 const void *buf, unsigned int len,
1627 bool force_8bit)
1628 {
1629 struct mtd_info *mtd = nand_to_mtd(chip);
1630
1631 if (len && !buf)
1632 return -EINVAL;
1633
1634 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1635 return -EINVAL;
1636
1637 /* Small page NANDs do not support column change. */
1638 if (mtd->writesize <= 512)
1639 return -ENOTSUPP;
1640
1641 if (nand_has_exec_op(chip)) {
1642 const struct nand_interface_config *conf =
1643 nand_get_interface_config(chip);
1644 u8 addrs[2];
1645 struct nand_op_instr instrs[] = {
1646 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1647 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1648 NAND_OP_DATA_OUT(len, buf, 0),
1649 };
1650 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1651 int ret;
1652
1653 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1654 if (ret < 0)
1655 return ret;
1656
1657 instrs[2].ctx.data.force_8bit = force_8bit;
1658
1659 /* Drop the DATA_OUT instruction if len is set to 0. */
1660 if (!len)
1661 op.ninstrs--;
1662
1663 return nand_exec_op(chip, &op);
1664 }
1665
1666 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1667 if (len)
1668 chip->legacy.write_buf(chip, buf, len);
1669
1670 return 0;
1671 }
1672 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1673
1674 /**
1675 * nand_readid_op - Do a READID operation
1676 * @chip: The NAND chip
1677 * @addr: address cycle to pass after the READID command
1678 * @buf: buffer used to store the ID
1679 * @len: length of the buffer
1680 *
1681 * This function sends a READID command and reads back the ID returned by the
1682 * NAND.
1683 * This function does not select/unselect the CS line.
1684 *
1685 * Returns 0 on success, a negative error code otherwise.
1686 */
nand_readid_op(struct nand_chip * chip,u8 addr,void * buf,unsigned int len)1687 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1688 unsigned int len)
1689 {
1690 unsigned int i;
1691 u8 *id = buf, *ddrbuf = NULL;
1692
1693 if (len && !buf)
1694 return -EINVAL;
1695
1696 if (nand_has_exec_op(chip)) {
1697 const struct nand_interface_config *conf =
1698 nand_get_interface_config(chip);
1699 struct nand_op_instr instrs[] = {
1700 NAND_OP_CMD(NAND_CMD_READID, 0),
1701 NAND_OP_ADDR(1, &addr,
1702 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1703 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1704 };
1705 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1706 int ret;
1707
1708 /* READ_ID data bytes are received twice in NV-DDR mode */
1709 if (len && nand_interface_is_nvddr(conf)) {
1710 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
1711 if (!ddrbuf)
1712 return -ENOMEM;
1713
1714 instrs[2].ctx.data.len *= 2;
1715 instrs[2].ctx.data.buf.in = ddrbuf;
1716 }
1717
1718 /* Drop the DATA_IN instruction if len is set to 0. */
1719 if (!len)
1720 op.ninstrs--;
1721
1722 ret = nand_exec_op(chip, &op);
1723 if (!ret && len && nand_interface_is_nvddr(conf)) {
1724 for (i = 0; i < len; i++)
1725 id[i] = ddrbuf[i * 2];
1726 }
1727
1728 kfree(ddrbuf);
1729
1730 return ret;
1731 }
1732
1733 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1734
1735 for (i = 0; i < len; i++)
1736 id[i] = chip->legacy.read_byte(chip);
1737
1738 return 0;
1739 }
1740 EXPORT_SYMBOL_GPL(nand_readid_op);
1741
1742 /**
1743 * nand_status_op - Do a STATUS operation
1744 * @chip: The NAND chip
1745 * @status: out variable to store the NAND status
1746 *
1747 * This function sends a STATUS command and reads back the status returned by
1748 * the NAND.
1749 * This function does not select/unselect the CS line.
1750 *
1751 * Returns 0 on success, a negative error code otherwise.
1752 */
nand_status_op(struct nand_chip * chip,u8 * status)1753 int nand_status_op(struct nand_chip *chip, u8 *status)
1754 {
1755 if (nand_has_exec_op(chip)) {
1756 const struct nand_interface_config *conf =
1757 nand_get_interface_config(chip);
1758 u8 ddrstatus[2];
1759 struct nand_op_instr instrs[] = {
1760 NAND_OP_CMD(NAND_CMD_STATUS,
1761 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1762 NAND_OP_8BIT_DATA_IN(1, status, 0),
1763 };
1764 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1765 int ret;
1766
1767 /* The status data byte will be received twice in NV-DDR mode */
1768 if (status && nand_interface_is_nvddr(conf)) {
1769 instrs[1].ctx.data.len *= 2;
1770 instrs[1].ctx.data.buf.in = ddrstatus;
1771 }
1772
1773 if (!status)
1774 op.ninstrs--;
1775
1776 ret = nand_exec_op(chip, &op);
1777 if (!ret && status && nand_interface_is_nvddr(conf))
1778 *status = ddrstatus[0];
1779
1780 return ret;
1781 }
1782
1783 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1784 if (status)
1785 *status = chip->legacy.read_byte(chip);
1786
1787 return 0;
1788 }
1789 EXPORT_SYMBOL_GPL(nand_status_op);
1790
1791 /**
1792 * nand_exit_status_op - Exit a STATUS operation
1793 * @chip: The NAND chip
1794 *
1795 * This function sends a READ0 command to cancel the effect of the STATUS
1796 * command to avoid reading only the status until a new read command is sent.
1797 *
1798 * This function does not select/unselect the CS line.
1799 *
1800 * Returns 0 on success, a negative error code otherwise.
1801 */
nand_exit_status_op(struct nand_chip * chip)1802 int nand_exit_status_op(struct nand_chip *chip)
1803 {
1804 if (nand_has_exec_op(chip)) {
1805 struct nand_op_instr instrs[] = {
1806 NAND_OP_CMD(NAND_CMD_READ0, 0),
1807 };
1808 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809
1810 return nand_exec_op(chip, &op);
1811 }
1812
1813 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1814
1815 return 0;
1816 }
1817
1818 /**
1819 * nand_erase_op - Do an erase operation
1820 * @chip: The NAND chip
1821 * @eraseblock: block to erase
1822 *
1823 * This function sends an ERASE command and waits for the NAND to be ready
1824 * before returning.
1825 * This function does not select/unselect the CS line.
1826 *
1827 * Returns 0 on success, a negative error code otherwise.
1828 */
nand_erase_op(struct nand_chip * chip,unsigned int eraseblock)1829 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1830 {
1831 unsigned int page = eraseblock <<
1832 (chip->phys_erase_shift - chip->page_shift);
1833 int ret;
1834 u8 status;
1835
1836 if (nand_has_exec_op(chip)) {
1837 const struct nand_interface_config *conf =
1838 nand_get_interface_config(chip);
1839 u8 addrs[3] = { page, page >> 8, page >> 16 };
1840 struct nand_op_instr instrs[] = {
1841 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1842 NAND_OP_ADDR(2, addrs, 0),
1843 NAND_OP_CMD(NAND_CMD_ERASE2,
1844 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1845 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
1846 0),
1847 };
1848 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1849
1850 if (chip->options & NAND_ROW_ADDR_3)
1851 instrs[1].ctx.addr.naddrs++;
1852
1853 ret = nand_exec_op(chip, &op);
1854 if (ret)
1855 return ret;
1856
1857 ret = nand_status_op(chip, &status);
1858 if (ret)
1859 return ret;
1860 } else {
1861 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1862 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1863
1864 ret = chip->legacy.waitfunc(chip);
1865 if (ret < 0)
1866 return ret;
1867
1868 status = ret;
1869 }
1870
1871 if (status & NAND_STATUS_FAIL)
1872 return -EIO;
1873
1874 return 0;
1875 }
1876 EXPORT_SYMBOL_GPL(nand_erase_op);
1877
1878 /**
1879 * nand_set_features_op - Do a SET FEATURES operation
1880 * @chip: The NAND chip
1881 * @feature: feature id
1882 * @data: 4 bytes of data
1883 *
1884 * This function sends a SET FEATURES command and waits for the NAND to be
1885 * ready before returning.
1886 * This function does not select/unselect the CS line.
1887 *
1888 * Returns 0 on success, a negative error code otherwise.
1889 */
nand_set_features_op(struct nand_chip * chip,u8 feature,const void * data)1890 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1891 const void *data)
1892 {
1893 const u8 *params = data;
1894 int i, ret;
1895
1896 if (nand_has_exec_op(chip)) {
1897 const struct nand_interface_config *conf =
1898 nand_get_interface_config(chip);
1899 struct nand_op_instr instrs[] = {
1900 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1901 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
1902 tADL_min)),
1903 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1904 NAND_COMMON_TIMING_NS(conf,
1905 tWB_max)),
1906 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1907 0),
1908 };
1909 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1910
1911 return nand_exec_op(chip, &op);
1912 }
1913
1914 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1915 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1916 chip->legacy.write_byte(chip, params[i]);
1917
1918 ret = chip->legacy.waitfunc(chip);
1919 if (ret < 0)
1920 return ret;
1921
1922 if (ret & NAND_STATUS_FAIL)
1923 return -EIO;
1924
1925 return 0;
1926 }
1927
1928 /**
1929 * nand_get_features_op - Do a GET FEATURES operation
1930 * @chip: The NAND chip
1931 * @feature: feature id
1932 * @data: 4 bytes of data
1933 *
1934 * This function sends a GET FEATURES command and waits for the NAND to be
1935 * ready before returning.
1936 * This function does not select/unselect the CS line.
1937 *
1938 * Returns 0 on success, a negative error code otherwise.
1939 */
nand_get_features_op(struct nand_chip * chip,u8 feature,void * data)1940 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1941 void *data)
1942 {
1943 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
1944 int i;
1945
1946 if (nand_has_exec_op(chip)) {
1947 const struct nand_interface_config *conf =
1948 nand_get_interface_config(chip);
1949 struct nand_op_instr instrs[] = {
1950 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1951 NAND_OP_ADDR(1, &feature,
1952 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1953 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1954 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1955 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1956 data, 0),
1957 };
1958 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1959 int ret;
1960
1961 /* GET_FEATURE data bytes are received twice in NV-DDR mode */
1962 if (nand_interface_is_nvddr(conf)) {
1963 instrs[3].ctx.data.len *= 2;
1964 instrs[3].ctx.data.buf.in = ddrbuf;
1965 }
1966
1967 ret = nand_exec_op(chip, &op);
1968 if (nand_interface_is_nvddr(conf)) {
1969 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
1970 params[i] = ddrbuf[i * 2];
1971 }
1972
1973 return ret;
1974 }
1975
1976 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1977 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1978 params[i] = chip->legacy.read_byte(chip);
1979
1980 return 0;
1981 }
1982
nand_wait_rdy_op(struct nand_chip * chip,unsigned int timeout_ms,unsigned int delay_ns)1983 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1984 unsigned int delay_ns)
1985 {
1986 if (nand_has_exec_op(chip)) {
1987 struct nand_op_instr instrs[] = {
1988 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1989 PSEC_TO_NSEC(delay_ns)),
1990 };
1991 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1992
1993 return nand_exec_op(chip, &op);
1994 }
1995
1996 /* Apply delay or wait for ready/busy pin */
1997 if (!chip->legacy.dev_ready)
1998 udelay(chip->legacy.chip_delay);
1999 else
2000 nand_wait_ready(chip);
2001
2002 return 0;
2003 }
2004
2005 /**
2006 * nand_reset_op - Do a reset operation
2007 * @chip: The NAND chip
2008 *
2009 * This function sends a RESET command and waits for the NAND to be ready
2010 * before returning.
2011 * This function does not select/unselect the CS line.
2012 *
2013 * Returns 0 on success, a negative error code otherwise.
2014 */
nand_reset_op(struct nand_chip * chip)2015 int nand_reset_op(struct nand_chip *chip)
2016 {
2017 if (nand_has_exec_op(chip)) {
2018 const struct nand_interface_config *conf =
2019 nand_get_interface_config(chip);
2020 struct nand_op_instr instrs[] = {
2021 NAND_OP_CMD(NAND_CMD_RESET,
2022 NAND_COMMON_TIMING_NS(conf, tWB_max)),
2023 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
2024 0),
2025 };
2026 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2027
2028 return nand_exec_op(chip, &op);
2029 }
2030
2031 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2032
2033 return 0;
2034 }
2035 EXPORT_SYMBOL_GPL(nand_reset_op);
2036
2037 /**
2038 * nand_read_data_op - Read data from the NAND
2039 * @chip: The NAND chip
2040 * @buf: buffer used to store the data
2041 * @len: length of the buffer
2042 * @force_8bit: force 8-bit bus access
2043 * @check_only: do not actually run the command, only checks if the
2044 * controller driver supports it
2045 *
2046 * This function does a raw data read on the bus. Usually used after launching
2047 * another NAND operation like nand_read_page_op().
2048 * This function does not select/unselect the CS line.
2049 *
2050 * Returns 0 on success, a negative error code otherwise.
2051 */
nand_read_data_op(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit,bool check_only)2052 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2053 bool force_8bit, bool check_only)
2054 {
2055 if (!len || !buf)
2056 return -EINVAL;
2057
2058 if (nand_has_exec_op(chip)) {
2059 const struct nand_interface_config *conf =
2060 nand_get_interface_config(chip);
2061 struct nand_op_instr instrs[] = {
2062 NAND_OP_DATA_IN(len, buf, 0),
2063 };
2064 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2065 u8 *ddrbuf = NULL;
2066 int ret, i;
2067
2068 instrs[0].ctx.data.force_8bit = force_8bit;
2069
2070 /*
2071 * Parameter payloads (ID, status, features, etc) do not go
2072 * through the same pipeline as regular data, hence the
2073 * force_8bit flag must be set and this also indicates that in
2074 * case NV-DDR timings are being used the data will be received
2075 * twice.
2076 */
2077 if (force_8bit && nand_interface_is_nvddr(conf)) {
2078 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
2079 if (!ddrbuf)
2080 return -ENOMEM;
2081
2082 instrs[0].ctx.data.len *= 2;
2083 instrs[0].ctx.data.buf.in = ddrbuf;
2084 }
2085
2086 if (check_only) {
2087 ret = nand_check_op(chip, &op);
2088 kfree(ddrbuf);
2089 return ret;
2090 }
2091
2092 ret = nand_exec_op(chip, &op);
2093 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
2094 u8 *dst = buf;
2095
2096 for (i = 0; i < len; i++)
2097 dst[i] = ddrbuf[i * 2];
2098 }
2099
2100 kfree(ddrbuf);
2101
2102 return ret;
2103 }
2104
2105 if (check_only)
2106 return 0;
2107
2108 if (force_8bit) {
2109 u8 *p = buf;
2110 unsigned int i;
2111
2112 for (i = 0; i < len; i++)
2113 p[i] = chip->legacy.read_byte(chip);
2114 } else {
2115 chip->legacy.read_buf(chip, buf, len);
2116 }
2117
2118 return 0;
2119 }
2120 EXPORT_SYMBOL_GPL(nand_read_data_op);
2121
2122 /**
2123 * nand_write_data_op - Write data from the NAND
2124 * @chip: The NAND chip
2125 * @buf: buffer containing the data to send on the bus
2126 * @len: length of the buffer
2127 * @force_8bit: force 8-bit bus access
2128 *
2129 * This function does a raw data write on the bus. Usually used after launching
2130 * another NAND operation like nand_write_page_begin_op().
2131 * This function does not select/unselect the CS line.
2132 *
2133 * Returns 0 on success, a negative error code otherwise.
2134 */
nand_write_data_op(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)2135 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2136 unsigned int len, bool force_8bit)
2137 {
2138 if (!len || !buf)
2139 return -EINVAL;
2140
2141 if (nand_has_exec_op(chip)) {
2142 struct nand_op_instr instrs[] = {
2143 NAND_OP_DATA_OUT(len, buf, 0),
2144 };
2145 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2146
2147 instrs[0].ctx.data.force_8bit = force_8bit;
2148
2149 return nand_exec_op(chip, &op);
2150 }
2151
2152 if (force_8bit) {
2153 const u8 *p = buf;
2154 unsigned int i;
2155
2156 for (i = 0; i < len; i++)
2157 chip->legacy.write_byte(chip, p[i]);
2158 } else {
2159 chip->legacy.write_buf(chip, buf, len);
2160 }
2161
2162 return 0;
2163 }
2164 EXPORT_SYMBOL_GPL(nand_write_data_op);
2165
2166 /**
2167 * struct nand_op_parser_ctx - Context used by the parser
2168 * @instrs: array of all the instructions that must be addressed
2169 * @ninstrs: length of the @instrs array
2170 * @subop: Sub-operation to be passed to the NAND controller
2171 *
2172 * This structure is used by the core to split NAND operations into
2173 * sub-operations that can be handled by the NAND controller.
2174 */
2175 struct nand_op_parser_ctx {
2176 const struct nand_op_instr *instrs;
2177 unsigned int ninstrs;
2178 struct nand_subop subop;
2179 };
2180
2181 /**
2182 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2183 * @pat: the parser pattern element that matches @instr
2184 * @instr: pointer to the instruction to check
2185 * @start_offset: this is an in/out parameter. If @instr has already been
2186 * split, then @start_offset is the offset from which to start
2187 * (either an address cycle or an offset in the data buffer).
2188 * Conversely, if the function returns true (ie. instr must be
2189 * split), this parameter is updated to point to the first
2190 * data/address cycle that has not been taken care of.
2191 *
2192 * Some NAND controllers are limited and cannot send X address cycles with a
2193 * unique operation, or cannot read/write more than Y bytes at the same time.
2194 * In this case, split the instruction that does not fit in a single
2195 * controller-operation into two or more chunks.
2196 *
2197 * Returns true if the instruction must be split, false otherwise.
2198 * The @start_offset parameter is also updated to the offset at which the next
2199 * bundle of instruction must start (if an address or a data instruction).
2200 */
2201 static bool
nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem * pat,const struct nand_op_instr * instr,unsigned int * start_offset)2202 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2203 const struct nand_op_instr *instr,
2204 unsigned int *start_offset)
2205 {
2206 switch (pat->type) {
2207 case NAND_OP_ADDR_INSTR:
2208 if (!pat->ctx.addr.maxcycles)
2209 break;
2210
2211 if (instr->ctx.addr.naddrs - *start_offset >
2212 pat->ctx.addr.maxcycles) {
2213 *start_offset += pat->ctx.addr.maxcycles;
2214 return true;
2215 }
2216 break;
2217
2218 case NAND_OP_DATA_IN_INSTR:
2219 case NAND_OP_DATA_OUT_INSTR:
2220 if (!pat->ctx.data.maxlen)
2221 break;
2222
2223 if (instr->ctx.data.len - *start_offset >
2224 pat->ctx.data.maxlen) {
2225 *start_offset += pat->ctx.data.maxlen;
2226 return true;
2227 }
2228 break;
2229
2230 default:
2231 break;
2232 }
2233
2234 return false;
2235 }
2236
2237 /**
2238 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2239 * remaining in the parser context
2240 * @pat: the pattern to test
2241 * @ctx: the parser context structure to match with the pattern @pat
2242 *
2243 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2244 * Returns true if this is the case, false ortherwise. When true is returned,
2245 * @ctx->subop is updated with the set of instructions to be passed to the
2246 * controller driver.
2247 */
2248 static bool
nand_op_parser_match_pat(const struct nand_op_parser_pattern * pat,struct nand_op_parser_ctx * ctx)2249 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2250 struct nand_op_parser_ctx *ctx)
2251 {
2252 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2253 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2254 const struct nand_op_instr *instr = ctx->subop.instrs;
2255 unsigned int i, ninstrs;
2256
2257 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2258 /*
2259 * The pattern instruction does not match the operation
2260 * instruction. If the instruction is marked optional in the
2261 * pattern definition, we skip the pattern element and continue
2262 * to the next one. If the element is mandatory, there's no
2263 * match and we can return false directly.
2264 */
2265 if (instr->type != pat->elems[i].type) {
2266 if (!pat->elems[i].optional)
2267 return false;
2268
2269 continue;
2270 }
2271
2272 /*
2273 * Now check the pattern element constraints. If the pattern is
2274 * not able to handle the whole instruction in a single step,
2275 * we have to split it.
2276 * The last_instr_end_off value comes back updated to point to
2277 * the position where we have to split the instruction (the
2278 * start of the next subop chunk).
2279 */
2280 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2281 &instr_offset)) {
2282 ninstrs++;
2283 i++;
2284 break;
2285 }
2286
2287 instr++;
2288 ninstrs++;
2289 instr_offset = 0;
2290 }
2291
2292 /*
2293 * This can happen if all instructions of a pattern are optional.
2294 * Still, if there's not at least one instruction handled by this
2295 * pattern, this is not a match, and we should try the next one (if
2296 * any).
2297 */
2298 if (!ninstrs)
2299 return false;
2300
2301 /*
2302 * We had a match on the pattern head, but the pattern may be longer
2303 * than the instructions we're asked to execute. We need to make sure
2304 * there's no mandatory elements in the pattern tail.
2305 */
2306 for (; i < pat->nelems; i++) {
2307 if (!pat->elems[i].optional)
2308 return false;
2309 }
2310
2311 /*
2312 * We have a match: update the subop structure accordingly and return
2313 * true.
2314 */
2315 ctx->subop.ninstrs = ninstrs;
2316 ctx->subop.last_instr_end_off = instr_offset;
2317
2318 return true;
2319 }
2320
2321 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
nand_op_parser_trace(const struct nand_op_parser_ctx * ctx)2322 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2323 {
2324 const struct nand_op_instr *instr;
2325 char *prefix = " ";
2326 unsigned int i;
2327
2328 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2329
2330 for (i = 0; i < ctx->ninstrs; i++) {
2331 instr = &ctx->instrs[i];
2332
2333 if (instr == &ctx->subop.instrs[0])
2334 prefix = " ->";
2335
2336 nand_op_trace(prefix, instr);
2337
2338 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2339 prefix = " ";
2340 }
2341 }
2342 #else
nand_op_parser_trace(const struct nand_op_parser_ctx * ctx)2343 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2344 {
2345 /* NOP */
2346 }
2347 #endif
2348
nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx * a,const struct nand_op_parser_ctx * b)2349 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2350 const struct nand_op_parser_ctx *b)
2351 {
2352 if (a->subop.ninstrs < b->subop.ninstrs)
2353 return -1;
2354 else if (a->subop.ninstrs > b->subop.ninstrs)
2355 return 1;
2356
2357 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2358 return -1;
2359 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2360 return 1;
2361
2362 return 0;
2363 }
2364
2365 /**
2366 * nand_op_parser_exec_op - exec_op parser
2367 * @chip: the NAND chip
2368 * @parser: patterns description provided by the controller driver
2369 * @op: the NAND operation to address
2370 * @check_only: when true, the function only checks if @op can be handled but
2371 * does not execute the operation
2372 *
2373 * Helper function designed to ease integration of NAND controller drivers that
2374 * only support a limited set of instruction sequences. The supported sequences
2375 * are described in @parser, and the framework takes care of splitting @op into
2376 * multiple sub-operations (if required) and pass them back to the ->exec()
2377 * callback of the matching pattern if @check_only is set to false.
2378 *
2379 * NAND controller drivers should call this function from their own ->exec_op()
2380 * implementation.
2381 *
2382 * Returns 0 on success, a negative error code otherwise. A failure can be
2383 * caused by an unsupported operation (none of the supported patterns is able
2384 * to handle the requested operation), or an error returned by one of the
2385 * matching pattern->exec() hook.
2386 */
nand_op_parser_exec_op(struct nand_chip * chip,const struct nand_op_parser * parser,const struct nand_operation * op,bool check_only)2387 int nand_op_parser_exec_op(struct nand_chip *chip,
2388 const struct nand_op_parser *parser,
2389 const struct nand_operation *op, bool check_only)
2390 {
2391 struct nand_op_parser_ctx ctx = {
2392 .subop.cs = op->cs,
2393 .subop.instrs = op->instrs,
2394 .instrs = op->instrs,
2395 .ninstrs = op->ninstrs,
2396 };
2397 unsigned int i;
2398
2399 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2400 const struct nand_op_parser_pattern *pattern;
2401 struct nand_op_parser_ctx best_ctx;
2402 int ret, best_pattern = -1;
2403
2404 for (i = 0; i < parser->npatterns; i++) {
2405 struct nand_op_parser_ctx test_ctx = ctx;
2406
2407 pattern = &parser->patterns[i];
2408 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2409 continue;
2410
2411 if (best_pattern >= 0 &&
2412 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2413 continue;
2414
2415 best_pattern = i;
2416 best_ctx = test_ctx;
2417 }
2418
2419 if (best_pattern < 0) {
2420 pr_debug("->exec_op() parser: pattern not found!\n");
2421 return -ENOTSUPP;
2422 }
2423
2424 ctx = best_ctx;
2425 nand_op_parser_trace(&ctx);
2426
2427 if (!check_only) {
2428 pattern = &parser->patterns[best_pattern];
2429 ret = pattern->exec(chip, &ctx.subop);
2430 if (ret)
2431 return ret;
2432 }
2433
2434 /*
2435 * Update the context structure by pointing to the start of the
2436 * next subop.
2437 */
2438 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2439 if (ctx.subop.last_instr_end_off)
2440 ctx.subop.instrs -= 1;
2441
2442 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2443 }
2444
2445 return 0;
2446 }
2447 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2448
nand_instr_is_data(const struct nand_op_instr * instr)2449 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2450 {
2451 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2452 instr->type == NAND_OP_DATA_OUT_INSTR);
2453 }
2454
nand_subop_instr_is_valid(const struct nand_subop * subop,unsigned int instr_idx)2455 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2456 unsigned int instr_idx)
2457 {
2458 return subop && instr_idx < subop->ninstrs;
2459 }
2460
nand_subop_get_start_off(const struct nand_subop * subop,unsigned int instr_idx)2461 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2462 unsigned int instr_idx)
2463 {
2464 if (instr_idx)
2465 return 0;
2466
2467 return subop->first_instr_start_off;
2468 }
2469
2470 /**
2471 * nand_subop_get_addr_start_off - Get the start offset in an address array
2472 * @subop: The entire sub-operation
2473 * @instr_idx: Index of the instruction inside the sub-operation
2474 *
2475 * During driver development, one could be tempted to directly use the
2476 * ->addr.addrs field of address instructions. This is wrong as address
2477 * instructions might be split.
2478 *
2479 * Given an address instruction, returns the offset of the first cycle to issue.
2480 */
nand_subop_get_addr_start_off(const struct nand_subop * subop,unsigned int instr_idx)2481 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2482 unsigned int instr_idx)
2483 {
2484 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2485 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2486 return 0;
2487
2488 return nand_subop_get_start_off(subop, instr_idx);
2489 }
2490 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2491
2492 /**
2493 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2494 * @subop: The entire sub-operation
2495 * @instr_idx: Index of the instruction inside the sub-operation
2496 *
2497 * During driver development, one could be tempted to directly use the
2498 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2499 * might be split.
2500 *
2501 * Given an address instruction, returns the number of address cycle to issue.
2502 */
nand_subop_get_num_addr_cyc(const struct nand_subop * subop,unsigned int instr_idx)2503 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2504 unsigned int instr_idx)
2505 {
2506 int start_off, end_off;
2507
2508 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2509 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2510 return 0;
2511
2512 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2513
2514 if (instr_idx == subop->ninstrs - 1 &&
2515 subop->last_instr_end_off)
2516 end_off = subop->last_instr_end_off;
2517 else
2518 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2519
2520 return end_off - start_off;
2521 }
2522 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2523
2524 /**
2525 * nand_subop_get_data_start_off - Get the start offset in a data array
2526 * @subop: The entire sub-operation
2527 * @instr_idx: Index of the instruction inside the sub-operation
2528 *
2529 * During driver development, one could be tempted to directly use the
2530 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2531 * instructions might be split.
2532 *
2533 * Given a data instruction, returns the offset to start from.
2534 */
nand_subop_get_data_start_off(const struct nand_subop * subop,unsigned int instr_idx)2535 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2536 unsigned int instr_idx)
2537 {
2538 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2539 !nand_instr_is_data(&subop->instrs[instr_idx])))
2540 return 0;
2541
2542 return nand_subop_get_start_off(subop, instr_idx);
2543 }
2544 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2545
2546 /**
2547 * nand_subop_get_data_len - Get the number of bytes to retrieve
2548 * @subop: The entire sub-operation
2549 * @instr_idx: Index of the instruction inside the sub-operation
2550 *
2551 * During driver development, one could be tempted to directly use the
2552 * ->data->len field of a data instruction. This is wrong as data instructions
2553 * might be split.
2554 *
2555 * Returns the length of the chunk of data to send/receive.
2556 */
nand_subop_get_data_len(const struct nand_subop * subop,unsigned int instr_idx)2557 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2558 unsigned int instr_idx)
2559 {
2560 int start_off = 0, end_off;
2561
2562 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2563 !nand_instr_is_data(&subop->instrs[instr_idx])))
2564 return 0;
2565
2566 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2567
2568 if (instr_idx == subop->ninstrs - 1 &&
2569 subop->last_instr_end_off)
2570 end_off = subop->last_instr_end_off;
2571 else
2572 end_off = subop->instrs[instr_idx].ctx.data.len;
2573
2574 return end_off - start_off;
2575 }
2576 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2577
2578 /**
2579 * nand_reset - Reset and initialize a NAND device
2580 * @chip: The NAND chip
2581 * @chipnr: Internal die id
2582 *
2583 * Save the timings data structure, then apply SDR timings mode 0 (see
2584 * nand_reset_interface for details), do the reset operation, and apply
2585 * back the previous timings.
2586 *
2587 * Returns 0 on success, a negative error code otherwise.
2588 */
nand_reset(struct nand_chip * chip,int chipnr)2589 int nand_reset(struct nand_chip *chip, int chipnr)
2590 {
2591 int ret;
2592
2593 ret = nand_reset_interface(chip, chipnr);
2594 if (ret)
2595 return ret;
2596
2597 /*
2598 * The CS line has to be released before we can apply the new NAND
2599 * interface settings, hence this weird nand_select_target()
2600 * nand_deselect_target() dance.
2601 */
2602 nand_select_target(chip, chipnr);
2603 ret = nand_reset_op(chip);
2604 nand_deselect_target(chip);
2605 if (ret)
2606 return ret;
2607
2608 ret = nand_setup_interface(chip, chipnr);
2609 if (ret)
2610 return ret;
2611
2612 return 0;
2613 }
2614 EXPORT_SYMBOL_GPL(nand_reset);
2615
2616 /**
2617 * nand_get_features - wrapper to perform a GET_FEATURE
2618 * @chip: NAND chip info structure
2619 * @addr: feature address
2620 * @subfeature_param: the subfeature parameters, a four bytes array
2621 *
2622 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2623 * operation cannot be handled.
2624 */
nand_get_features(struct nand_chip * chip,int addr,u8 * subfeature_param)2625 int nand_get_features(struct nand_chip *chip, int addr,
2626 u8 *subfeature_param)
2627 {
2628 if (!nand_supports_get_features(chip, addr))
2629 return -ENOTSUPP;
2630
2631 if (chip->legacy.get_features)
2632 return chip->legacy.get_features(chip, addr, subfeature_param);
2633
2634 return nand_get_features_op(chip, addr, subfeature_param);
2635 }
2636
2637 /**
2638 * nand_set_features - wrapper to perform a SET_FEATURE
2639 * @chip: NAND chip info structure
2640 * @addr: feature address
2641 * @subfeature_param: the subfeature parameters, a four bytes array
2642 *
2643 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2644 * operation cannot be handled.
2645 */
nand_set_features(struct nand_chip * chip,int addr,u8 * subfeature_param)2646 int nand_set_features(struct nand_chip *chip, int addr,
2647 u8 *subfeature_param)
2648 {
2649 if (!nand_supports_set_features(chip, addr))
2650 return -ENOTSUPP;
2651
2652 if (chip->legacy.set_features)
2653 return chip->legacy.set_features(chip, addr, subfeature_param);
2654
2655 return nand_set_features_op(chip, addr, subfeature_param);
2656 }
2657
2658 /**
2659 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2660 * @buf: buffer to test
2661 * @len: buffer length
2662 * @bitflips_threshold: maximum number of bitflips
2663 *
2664 * Check if a buffer contains only 0xff, which means the underlying region
2665 * has been erased and is ready to be programmed.
2666 * The bitflips_threshold specify the maximum number of bitflips before
2667 * considering the region is not erased.
2668 * Note: The logic of this function has been extracted from the memweight
2669 * implementation, except that nand_check_erased_buf function exit before
2670 * testing the whole buffer if the number of bitflips exceed the
2671 * bitflips_threshold value.
2672 *
2673 * Returns a positive number of bitflips less than or equal to
2674 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2675 * threshold.
2676 */
nand_check_erased_buf(void * buf,int len,int bitflips_threshold)2677 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2678 {
2679 const unsigned char *bitmap = buf;
2680 int bitflips = 0;
2681 int weight;
2682
2683 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2684 len--, bitmap++) {
2685 weight = hweight8(*bitmap);
2686 bitflips += BITS_PER_BYTE - weight;
2687 if (unlikely(bitflips > bitflips_threshold))
2688 return -EBADMSG;
2689 }
2690
2691 for (; len >= sizeof(long);
2692 len -= sizeof(long), bitmap += sizeof(long)) {
2693 unsigned long d = *((unsigned long *)bitmap);
2694 if (d == ~0UL)
2695 continue;
2696 weight = hweight_long(d);
2697 bitflips += BITS_PER_LONG - weight;
2698 if (unlikely(bitflips > bitflips_threshold))
2699 return -EBADMSG;
2700 }
2701
2702 for (; len > 0; len--, bitmap++) {
2703 weight = hweight8(*bitmap);
2704 bitflips += BITS_PER_BYTE - weight;
2705 if (unlikely(bitflips > bitflips_threshold))
2706 return -EBADMSG;
2707 }
2708
2709 return bitflips;
2710 }
2711
2712 /**
2713 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2714 * 0xff data
2715 * @data: data buffer to test
2716 * @datalen: data length
2717 * @ecc: ECC buffer
2718 * @ecclen: ECC length
2719 * @extraoob: extra OOB buffer
2720 * @extraooblen: extra OOB length
2721 * @bitflips_threshold: maximum number of bitflips
2722 *
2723 * Check if a data buffer and its associated ECC and OOB data contains only
2724 * 0xff pattern, which means the underlying region has been erased and is
2725 * ready to be programmed.
2726 * The bitflips_threshold specify the maximum number of bitflips before
2727 * considering the region as not erased.
2728 *
2729 * Note:
2730 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2731 * different from the NAND page size. When fixing bitflips, ECC engines will
2732 * report the number of errors per chunk, and the NAND core infrastructure
2733 * expect you to return the maximum number of bitflips for the whole page.
2734 * This is why you should always use this function on a single chunk and
2735 * not on the whole page. After checking each chunk you should update your
2736 * max_bitflips value accordingly.
2737 * 2/ When checking for bitflips in erased pages you should not only check
2738 * the payload data but also their associated ECC data, because a user might
2739 * have programmed almost all bits to 1 but a few. In this case, we
2740 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2741 * this case.
2742 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2743 * data are protected by the ECC engine.
2744 * It could also be used if you support subpages and want to attach some
2745 * extra OOB data to an ECC chunk.
2746 *
2747 * Returns a positive number of bitflips less than or equal to
2748 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2749 * threshold. In case of success, the passed buffers are filled with 0xff.
2750 */
nand_check_erased_ecc_chunk(void * data,int datalen,void * ecc,int ecclen,void * extraoob,int extraooblen,int bitflips_threshold)2751 int nand_check_erased_ecc_chunk(void *data, int datalen,
2752 void *ecc, int ecclen,
2753 void *extraoob, int extraooblen,
2754 int bitflips_threshold)
2755 {
2756 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2757
2758 data_bitflips = nand_check_erased_buf(data, datalen,
2759 bitflips_threshold);
2760 if (data_bitflips < 0)
2761 return data_bitflips;
2762
2763 bitflips_threshold -= data_bitflips;
2764
2765 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2766 if (ecc_bitflips < 0)
2767 return ecc_bitflips;
2768
2769 bitflips_threshold -= ecc_bitflips;
2770
2771 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2772 bitflips_threshold);
2773 if (extraoob_bitflips < 0)
2774 return extraoob_bitflips;
2775
2776 if (data_bitflips)
2777 memset(data, 0xff, datalen);
2778
2779 if (ecc_bitflips)
2780 memset(ecc, 0xff, ecclen);
2781
2782 if (extraoob_bitflips)
2783 memset(extraoob, 0xff, extraooblen);
2784
2785 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2786 }
2787 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2788
2789 /**
2790 * nand_read_page_raw_notsupp - dummy read raw page function
2791 * @chip: nand chip info structure
2792 * @buf: buffer to store read data
2793 * @oob_required: caller requires OOB data read to chip->oob_poi
2794 * @page: page number to read
2795 *
2796 * Returns -ENOTSUPP unconditionally.
2797 */
nand_read_page_raw_notsupp(struct nand_chip * chip,u8 * buf,int oob_required,int page)2798 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2799 int oob_required, int page)
2800 {
2801 return -ENOTSUPP;
2802 }
2803
2804 /**
2805 * nand_read_page_raw - [INTERN] read raw page data without ecc
2806 * @chip: nand chip info structure
2807 * @buf: buffer to store read data
2808 * @oob_required: caller requires OOB data read to chip->oob_poi
2809 * @page: page number to read
2810 *
2811 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2812 */
nand_read_page_raw(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2813 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2814 int page)
2815 {
2816 struct mtd_info *mtd = nand_to_mtd(chip);
2817 int ret;
2818
2819 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2820 if (ret)
2821 return ret;
2822
2823 if (oob_required) {
2824 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2825 false, false);
2826 if (ret)
2827 return ret;
2828 }
2829
2830 return 0;
2831 }
2832 EXPORT_SYMBOL(nand_read_page_raw);
2833
2834 /**
2835 * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2836 * @chip: NAND chip info structure
2837 * @buf: buffer to store read data
2838 * @oob_required: caller requires OOB data read to chip->oob_poi
2839 * @page: page number to read
2840 *
2841 * This is a raw page read, ie. without any error detection/correction.
2842 * Monolithic means we are requesting all the relevant data (main plus
2843 * eventually OOB) to be loaded in the NAND cache and sent over the
2844 * bus (from the NAND chip to the NAND controller) in a single
2845 * operation. This is an alternative to nand_read_page_raw(), which
2846 * first reads the main data, and if the OOB data is requested too,
2847 * then reads more data on the bus.
2848 */
nand_monolithic_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)2849 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2850 int oob_required, int page)
2851 {
2852 struct mtd_info *mtd = nand_to_mtd(chip);
2853 unsigned int size = mtd->writesize;
2854 u8 *read_buf = buf;
2855 int ret;
2856
2857 if (oob_required) {
2858 size += mtd->oobsize;
2859
2860 if (buf != chip->data_buf)
2861 read_buf = nand_get_data_buf(chip);
2862 }
2863
2864 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2865 if (ret)
2866 return ret;
2867
2868 if (buf != chip->data_buf)
2869 memcpy(buf, read_buf, mtd->writesize);
2870
2871 return 0;
2872 }
2873 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2874
2875 /**
2876 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2877 * @chip: nand chip info structure
2878 * @buf: buffer to store read data
2879 * @oob_required: caller requires OOB data read to chip->oob_poi
2880 * @page: page number to read
2881 *
2882 * We need a special oob layout and handling even when OOB isn't used.
2883 */
nand_read_page_raw_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2884 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2885 int oob_required, int page)
2886 {
2887 struct mtd_info *mtd = nand_to_mtd(chip);
2888 int eccsize = chip->ecc.size;
2889 int eccbytes = chip->ecc.bytes;
2890 uint8_t *oob = chip->oob_poi;
2891 int steps, size, ret;
2892
2893 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2894 if (ret)
2895 return ret;
2896
2897 for (steps = chip->ecc.steps; steps > 0; steps--) {
2898 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2899 if (ret)
2900 return ret;
2901
2902 buf += eccsize;
2903
2904 if (chip->ecc.prepad) {
2905 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2906 false, false);
2907 if (ret)
2908 return ret;
2909
2910 oob += chip->ecc.prepad;
2911 }
2912
2913 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2914 if (ret)
2915 return ret;
2916
2917 oob += eccbytes;
2918
2919 if (chip->ecc.postpad) {
2920 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2921 false, false);
2922 if (ret)
2923 return ret;
2924
2925 oob += chip->ecc.postpad;
2926 }
2927 }
2928
2929 size = mtd->oobsize - (oob - chip->oob_poi);
2930 if (size) {
2931 ret = nand_read_data_op(chip, oob, size, false, false);
2932 if (ret)
2933 return ret;
2934 }
2935
2936 return 0;
2937 }
2938
2939 /**
2940 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2941 * @chip: nand chip info structure
2942 * @buf: buffer to store read data
2943 * @oob_required: caller requires OOB data read to chip->oob_poi
2944 * @page: page number to read
2945 */
nand_read_page_swecc(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2946 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2947 int oob_required, int page)
2948 {
2949 struct mtd_info *mtd = nand_to_mtd(chip);
2950 int i, eccsize = chip->ecc.size, ret;
2951 int eccbytes = chip->ecc.bytes;
2952 int eccsteps = chip->ecc.steps;
2953 uint8_t *p = buf;
2954 uint8_t *ecc_calc = chip->ecc.calc_buf;
2955 uint8_t *ecc_code = chip->ecc.code_buf;
2956 unsigned int max_bitflips = 0;
2957
2958 chip->ecc.read_page_raw(chip, buf, 1, page);
2959
2960 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2961 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2962
2963 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2964 chip->ecc.total);
2965 if (ret)
2966 return ret;
2967
2968 eccsteps = chip->ecc.steps;
2969 p = buf;
2970
2971 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2972 int stat;
2973
2974 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2975 if (stat < 0) {
2976 mtd->ecc_stats.failed++;
2977 } else {
2978 mtd->ecc_stats.corrected += stat;
2979 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2980 }
2981 }
2982 return max_bitflips;
2983 }
2984
2985 /**
2986 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2987 * @chip: nand chip info structure
2988 * @data_offs: offset of requested data within the page
2989 * @readlen: data length
2990 * @bufpoi: buffer to store read data
2991 * @page: page number to read
2992 */
nand_read_subpage(struct nand_chip * chip,uint32_t data_offs,uint32_t readlen,uint8_t * bufpoi,int page)2993 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2994 uint32_t readlen, uint8_t *bufpoi, int page)
2995 {
2996 struct mtd_info *mtd = nand_to_mtd(chip);
2997 int start_step, end_step, num_steps, ret;
2998 uint8_t *p;
2999 int data_col_addr, i, gaps = 0;
3000 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3001 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3002 int index, section = 0;
3003 unsigned int max_bitflips = 0;
3004 struct mtd_oob_region oobregion = { };
3005
3006 /* Column address within the page aligned to ECC size (256bytes) */
3007 start_step = data_offs / chip->ecc.size;
3008 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3009 num_steps = end_step - start_step + 1;
3010 index = start_step * chip->ecc.bytes;
3011
3012 /* Data size aligned to ECC ecc.size */
3013 datafrag_len = num_steps * chip->ecc.size;
3014 eccfrag_len = num_steps * chip->ecc.bytes;
3015
3016 data_col_addr = start_step * chip->ecc.size;
3017 /* If we read not a page aligned data */
3018 p = bufpoi + data_col_addr;
3019 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3020 if (ret)
3021 return ret;
3022
3023 /* Calculate ECC */
3024 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3025 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
3026
3027 /*
3028 * The performance is faster if we position offsets according to
3029 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3030 */
3031 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
3032 if (ret)
3033 return ret;
3034
3035 if (oobregion.length < eccfrag_len)
3036 gaps = 1;
3037
3038 if (gaps) {
3039 ret = nand_change_read_column_op(chip, mtd->writesize,
3040 chip->oob_poi, mtd->oobsize,
3041 false);
3042 if (ret)
3043 return ret;
3044 } else {
3045 /*
3046 * Send the command to read the particular ECC bytes take care
3047 * about buswidth alignment in read_buf.
3048 */
3049 aligned_pos = oobregion.offset & ~(busw - 1);
3050 aligned_len = eccfrag_len;
3051 if (oobregion.offset & (busw - 1))
3052 aligned_len++;
3053 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3054 (busw - 1))
3055 aligned_len++;
3056
3057 ret = nand_change_read_column_op(chip,
3058 mtd->writesize + aligned_pos,
3059 &chip->oob_poi[aligned_pos],
3060 aligned_len, false);
3061 if (ret)
3062 return ret;
3063 }
3064
3065 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3066 chip->oob_poi, index, eccfrag_len);
3067 if (ret)
3068 return ret;
3069
3070 p = bufpoi + data_col_addr;
3071 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3072 int stat;
3073
3074 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3075 &chip->ecc.calc_buf[i]);
3076 if (stat == -EBADMSG &&
3077 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3078 /* check for empty pages with bitflips */
3079 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3080 &chip->ecc.code_buf[i],
3081 chip->ecc.bytes,
3082 NULL, 0,
3083 chip->ecc.strength);
3084 }
3085
3086 if (stat < 0) {
3087 mtd->ecc_stats.failed++;
3088 } else {
3089 mtd->ecc_stats.corrected += stat;
3090 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3091 }
3092 }
3093 return max_bitflips;
3094 }
3095
3096 /**
3097 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3098 * @chip: nand chip info structure
3099 * @buf: buffer to store read data
3100 * @oob_required: caller requires OOB data read to chip->oob_poi
3101 * @page: page number to read
3102 *
3103 * Not for syndrome calculating ECC controllers which need a special oob layout.
3104 */
nand_read_page_hwecc(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)3105 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3106 int oob_required, int page)
3107 {
3108 struct mtd_info *mtd = nand_to_mtd(chip);
3109 int i, eccsize = chip->ecc.size, ret;
3110 int eccbytes = chip->ecc.bytes;
3111 int eccsteps = chip->ecc.steps;
3112 uint8_t *p = buf;
3113 uint8_t *ecc_calc = chip->ecc.calc_buf;
3114 uint8_t *ecc_code = chip->ecc.code_buf;
3115 unsigned int max_bitflips = 0;
3116
3117 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3118 if (ret)
3119 return ret;
3120
3121 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3122 chip->ecc.hwctl(chip, NAND_ECC_READ);
3123
3124 ret = nand_read_data_op(chip, p, eccsize, false, false);
3125 if (ret)
3126 return ret;
3127
3128 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3129 }
3130
3131 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3132 false);
3133 if (ret)
3134 return ret;
3135
3136 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3137 chip->ecc.total);
3138 if (ret)
3139 return ret;
3140
3141 eccsteps = chip->ecc.steps;
3142 p = buf;
3143
3144 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3145 int stat;
3146
3147 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3148 if (stat == -EBADMSG &&
3149 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3150 /* check for empty pages with bitflips */
3151 stat = nand_check_erased_ecc_chunk(p, eccsize,
3152 &ecc_code[i], eccbytes,
3153 NULL, 0,
3154 chip->ecc.strength);
3155 }
3156
3157 if (stat < 0) {
3158 mtd->ecc_stats.failed++;
3159 } else {
3160 mtd->ecc_stats.corrected += stat;
3161 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3162 }
3163 }
3164 return max_bitflips;
3165 }
3166
3167 /**
3168 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
3169 * data read from OOB area
3170 * @chip: nand chip info structure
3171 * @buf: buffer to store read data
3172 * @oob_required: caller requires OOB data read to chip->oob_poi
3173 * @page: page number to read
3174 *
3175 * Hardware ECC for large page chips, which requires the ECC data to be
3176 * extracted from the OOB before the actual data is read.
3177 */
nand_read_page_hwecc_oob_first(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)3178 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
3179 int oob_required, int page)
3180 {
3181 struct mtd_info *mtd = nand_to_mtd(chip);
3182 int i, eccsize = chip->ecc.size, ret;
3183 int eccbytes = chip->ecc.bytes;
3184 int eccsteps = chip->ecc.steps;
3185 uint8_t *p = buf;
3186 uint8_t *ecc_code = chip->ecc.code_buf;
3187 unsigned int max_bitflips = 0;
3188
3189 /* Read the OOB area first */
3190 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3191 if (ret)
3192 return ret;
3193
3194 /* Move read cursor to start of page */
3195 ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
3196 if (ret)
3197 return ret;
3198
3199 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3200 chip->ecc.total);
3201 if (ret)
3202 return ret;
3203
3204 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3205 int stat;
3206
3207 chip->ecc.hwctl(chip, NAND_ECC_READ);
3208
3209 ret = nand_read_data_op(chip, p, eccsize, false, false);
3210 if (ret)
3211 return ret;
3212
3213 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
3214 if (stat == -EBADMSG &&
3215 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3216 /* check for empty pages with bitflips */
3217 stat = nand_check_erased_ecc_chunk(p, eccsize,
3218 &ecc_code[i],
3219 eccbytes, NULL, 0,
3220 chip->ecc.strength);
3221 }
3222
3223 if (stat < 0) {
3224 mtd->ecc_stats.failed++;
3225 } else {
3226 mtd->ecc_stats.corrected += stat;
3227 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3228 }
3229 }
3230 return max_bitflips;
3231 }
3232 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
3233
3234 /**
3235 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3236 * @chip: nand chip info structure
3237 * @buf: buffer to store read data
3238 * @oob_required: caller requires OOB data read to chip->oob_poi
3239 * @page: page number to read
3240 *
3241 * The hw generator calculates the error syndrome automatically. Therefore we
3242 * need a special oob layout and handling.
3243 */
nand_read_page_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)3244 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3245 int oob_required, int page)
3246 {
3247 struct mtd_info *mtd = nand_to_mtd(chip);
3248 int ret, i, eccsize = chip->ecc.size;
3249 int eccbytes = chip->ecc.bytes;
3250 int eccsteps = chip->ecc.steps;
3251 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3252 uint8_t *p = buf;
3253 uint8_t *oob = chip->oob_poi;
3254 unsigned int max_bitflips = 0;
3255
3256 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3257 if (ret)
3258 return ret;
3259
3260 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3261 int stat;
3262
3263 chip->ecc.hwctl(chip, NAND_ECC_READ);
3264
3265 ret = nand_read_data_op(chip, p, eccsize, false, false);
3266 if (ret)
3267 return ret;
3268
3269 if (chip->ecc.prepad) {
3270 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3271 false, false);
3272 if (ret)
3273 return ret;
3274
3275 oob += chip->ecc.prepad;
3276 }
3277
3278 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3279
3280 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3281 if (ret)
3282 return ret;
3283
3284 stat = chip->ecc.correct(chip, p, oob, NULL);
3285
3286 oob += eccbytes;
3287
3288 if (chip->ecc.postpad) {
3289 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3290 false, false);
3291 if (ret)
3292 return ret;
3293
3294 oob += chip->ecc.postpad;
3295 }
3296
3297 if (stat == -EBADMSG &&
3298 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3299 /* check for empty pages with bitflips */
3300 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3301 oob - eccpadbytes,
3302 eccpadbytes,
3303 NULL, 0,
3304 chip->ecc.strength);
3305 }
3306
3307 if (stat < 0) {
3308 mtd->ecc_stats.failed++;
3309 } else {
3310 mtd->ecc_stats.corrected += stat;
3311 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3312 }
3313 }
3314
3315 /* Calculate remaining oob bytes */
3316 i = mtd->oobsize - (oob - chip->oob_poi);
3317 if (i) {
3318 ret = nand_read_data_op(chip, oob, i, false, false);
3319 if (ret)
3320 return ret;
3321 }
3322
3323 return max_bitflips;
3324 }
3325
3326 /**
3327 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3328 * @chip: NAND chip object
3329 * @oob: oob destination address
3330 * @ops: oob ops structure
3331 * @len: size of oob to transfer
3332 */
nand_transfer_oob(struct nand_chip * chip,uint8_t * oob,struct mtd_oob_ops * ops,size_t len)3333 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3334 struct mtd_oob_ops *ops, size_t len)
3335 {
3336 struct mtd_info *mtd = nand_to_mtd(chip);
3337 int ret;
3338
3339 switch (ops->mode) {
3340
3341 case MTD_OPS_PLACE_OOB:
3342 case MTD_OPS_RAW:
3343 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3344 return oob + len;
3345
3346 case MTD_OPS_AUTO_OOB:
3347 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3348 ops->ooboffs, len);
3349 BUG_ON(ret);
3350 return oob + len;
3351
3352 default:
3353 BUG();
3354 }
3355 return NULL;
3356 }
3357
3358 /**
3359 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3360 * @chip: NAND chip object
3361 * @retry_mode: the retry mode to use
3362 *
3363 * Some vendors supply a special command to shift the Vt threshold, to be used
3364 * when there are too many bitflips in a page (i.e., ECC error). After setting
3365 * a new threshold, the host should retry reading the page.
3366 */
nand_setup_read_retry(struct nand_chip * chip,int retry_mode)3367 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3368 {
3369 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3370
3371 if (retry_mode >= chip->read_retries)
3372 return -EINVAL;
3373
3374 if (!chip->ops.setup_read_retry)
3375 return -EOPNOTSUPP;
3376
3377 return chip->ops.setup_read_retry(chip, retry_mode);
3378 }
3379
nand_wait_readrdy(struct nand_chip * chip)3380 static void nand_wait_readrdy(struct nand_chip *chip)
3381 {
3382 const struct nand_interface_config *conf;
3383
3384 if (!(chip->options & NAND_NEED_READRDY))
3385 return;
3386
3387 conf = nand_get_interface_config(chip);
3388 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
3389 }
3390
3391 /**
3392 * nand_do_read_ops - [INTERN] Read data with ECC
3393 * @chip: NAND chip object
3394 * @from: offset to read from
3395 * @ops: oob ops structure
3396 *
3397 * Internal function. Called with chip held.
3398 */
nand_do_read_ops(struct nand_chip * chip,loff_t from,struct mtd_oob_ops * ops)3399 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3400 struct mtd_oob_ops *ops)
3401 {
3402 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3403 struct mtd_info *mtd = nand_to_mtd(chip);
3404 int ret = 0;
3405 uint32_t readlen = ops->len;
3406 uint32_t oobreadlen = ops->ooblen;
3407 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3408
3409 uint8_t *bufpoi, *oob, *buf;
3410 int use_bounce_buf;
3411 unsigned int max_bitflips = 0;
3412 int retry_mode = 0;
3413 bool ecc_fail = false;
3414
3415 /* Check if the region is secured */
3416 if (nand_region_is_secured(chip, from, readlen))
3417 return -EIO;
3418
3419 chipnr = (int)(from >> chip->chip_shift);
3420 nand_select_target(chip, chipnr);
3421
3422 realpage = (int)(from >> chip->page_shift);
3423 page = realpage & chip->pagemask;
3424
3425 col = (int)(from & (mtd->writesize - 1));
3426
3427 buf = ops->datbuf;
3428 oob = ops->oobbuf;
3429 oob_required = oob ? 1 : 0;
3430
3431 while (1) {
3432 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3433
3434 bytes = min(mtd->writesize - col, readlen);
3435 aligned = (bytes == mtd->writesize);
3436
3437 if (!aligned)
3438 use_bounce_buf = 1;
3439 else if (chip->options & NAND_USES_DMA)
3440 use_bounce_buf = !virt_addr_valid(buf) ||
3441 !IS_ALIGNED((unsigned long)buf,
3442 chip->buf_align);
3443 else
3444 use_bounce_buf = 0;
3445
3446 /* Is the current page in the buffer? */
3447 if (realpage != chip->pagecache.page || oob) {
3448 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3449
3450 if (use_bounce_buf && aligned)
3451 pr_debug("%s: using read bounce buffer for buf@%p\n",
3452 __func__, buf);
3453
3454 read_retry:
3455 /*
3456 * Now read the page into the buffer. Absent an error,
3457 * the read methods return max bitflips per ecc step.
3458 */
3459 if (unlikely(ops->mode == MTD_OPS_RAW))
3460 ret = chip->ecc.read_page_raw(chip, bufpoi,
3461 oob_required,
3462 page);
3463 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3464 !oob)
3465 ret = chip->ecc.read_subpage(chip, col, bytes,
3466 bufpoi, page);
3467 else
3468 ret = chip->ecc.read_page(chip, bufpoi,
3469 oob_required, page);
3470 if (ret < 0) {
3471 if (use_bounce_buf)
3472 /* Invalidate page cache */
3473 chip->pagecache.page = -1;
3474 break;
3475 }
3476
3477 /*
3478 * Copy back the data in the initial buffer when reading
3479 * partial pages or when a bounce buffer is required.
3480 */
3481 if (use_bounce_buf) {
3482 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3483 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3484 (ops->mode != MTD_OPS_RAW)) {
3485 chip->pagecache.page = realpage;
3486 chip->pagecache.bitflips = ret;
3487 } else {
3488 /* Invalidate page cache */
3489 chip->pagecache.page = -1;
3490 }
3491 memcpy(buf, bufpoi + col, bytes);
3492 }
3493
3494 if (unlikely(oob)) {
3495 int toread = min(oobreadlen, max_oobsize);
3496
3497 if (toread) {
3498 oob = nand_transfer_oob(chip, oob, ops,
3499 toread);
3500 oobreadlen -= toread;
3501 }
3502 }
3503
3504 nand_wait_readrdy(chip);
3505
3506 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3507 if (retry_mode + 1 < chip->read_retries) {
3508 retry_mode++;
3509 ret = nand_setup_read_retry(chip,
3510 retry_mode);
3511 if (ret < 0)
3512 break;
3513
3514 /* Reset ecc_stats; retry */
3515 mtd->ecc_stats = ecc_stats;
3516 goto read_retry;
3517 } else {
3518 /* No more retry modes; real failure */
3519 ecc_fail = true;
3520 }
3521 }
3522
3523 buf += bytes;
3524 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3525 } else {
3526 memcpy(buf, chip->data_buf + col, bytes);
3527 buf += bytes;
3528 max_bitflips = max_t(unsigned int, max_bitflips,
3529 chip->pagecache.bitflips);
3530 }
3531
3532 readlen -= bytes;
3533
3534 /* Reset to retry mode 0 */
3535 if (retry_mode) {
3536 ret = nand_setup_read_retry(chip, 0);
3537 if (ret < 0)
3538 break;
3539 retry_mode = 0;
3540 }
3541
3542 if (!readlen)
3543 break;
3544
3545 /* For subsequent reads align to page boundary */
3546 col = 0;
3547 /* Increment page address */
3548 realpage++;
3549
3550 page = realpage & chip->pagemask;
3551 /* Check, if we cross a chip boundary */
3552 if (!page) {
3553 chipnr++;
3554 nand_deselect_target(chip);
3555 nand_select_target(chip, chipnr);
3556 }
3557 }
3558 nand_deselect_target(chip);
3559
3560 ops->retlen = ops->len - (size_t) readlen;
3561 if (oob)
3562 ops->oobretlen = ops->ooblen - oobreadlen;
3563
3564 if (ret < 0)
3565 return ret;
3566
3567 if (ecc_fail)
3568 return -EBADMSG;
3569
3570 return max_bitflips;
3571 }
3572
3573 /**
3574 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3575 * @chip: nand chip info structure
3576 * @page: page number to read
3577 */
nand_read_oob_std(struct nand_chip * chip,int page)3578 int nand_read_oob_std(struct nand_chip *chip, int page)
3579 {
3580 struct mtd_info *mtd = nand_to_mtd(chip);
3581
3582 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3583 }
3584 EXPORT_SYMBOL(nand_read_oob_std);
3585
3586 /**
3587 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3588 * with syndromes
3589 * @chip: nand chip info structure
3590 * @page: page number to read
3591 */
nand_read_oob_syndrome(struct nand_chip * chip,int page)3592 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3593 {
3594 struct mtd_info *mtd = nand_to_mtd(chip);
3595 int length = mtd->oobsize;
3596 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3597 int eccsize = chip->ecc.size;
3598 uint8_t *bufpoi = chip->oob_poi;
3599 int i, toread, sndrnd = 0, pos, ret;
3600
3601 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3602 if (ret)
3603 return ret;
3604
3605 for (i = 0; i < chip->ecc.steps; i++) {
3606 if (sndrnd) {
3607 int ret;
3608
3609 pos = eccsize + i * (eccsize + chunk);
3610 if (mtd->writesize > 512)
3611 ret = nand_change_read_column_op(chip, pos,
3612 NULL, 0,
3613 false);
3614 else
3615 ret = nand_read_page_op(chip, page, pos, NULL,
3616 0);
3617
3618 if (ret)
3619 return ret;
3620 } else
3621 sndrnd = 1;
3622 toread = min_t(int, length, chunk);
3623
3624 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3625 if (ret)
3626 return ret;
3627
3628 bufpoi += toread;
3629 length -= toread;
3630 }
3631 if (length > 0) {
3632 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3633 if (ret)
3634 return ret;
3635 }
3636
3637 return 0;
3638 }
3639
3640 /**
3641 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3642 * @chip: nand chip info structure
3643 * @page: page number to write
3644 */
nand_write_oob_std(struct nand_chip * chip,int page)3645 int nand_write_oob_std(struct nand_chip *chip, int page)
3646 {
3647 struct mtd_info *mtd = nand_to_mtd(chip);
3648
3649 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3650 mtd->oobsize);
3651 }
3652 EXPORT_SYMBOL(nand_write_oob_std);
3653
3654 /**
3655 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3656 * with syndrome - only for large page flash
3657 * @chip: nand chip info structure
3658 * @page: page number to write
3659 */
nand_write_oob_syndrome(struct nand_chip * chip,int page)3660 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3661 {
3662 struct mtd_info *mtd = nand_to_mtd(chip);
3663 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3664 int eccsize = chip->ecc.size, length = mtd->oobsize;
3665 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3666 const uint8_t *bufpoi = chip->oob_poi;
3667
3668 /*
3669 * data-ecc-data-ecc ... ecc-oob
3670 * or
3671 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3672 */
3673 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3674 pos = steps * (eccsize + chunk);
3675 steps = 0;
3676 } else
3677 pos = eccsize;
3678
3679 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3680 if (ret)
3681 return ret;
3682
3683 for (i = 0; i < steps; i++) {
3684 if (sndcmd) {
3685 if (mtd->writesize <= 512) {
3686 uint32_t fill = 0xFFFFFFFF;
3687
3688 len = eccsize;
3689 while (len > 0) {
3690 int num = min_t(int, len, 4);
3691
3692 ret = nand_write_data_op(chip, &fill,
3693 num, false);
3694 if (ret)
3695 return ret;
3696
3697 len -= num;
3698 }
3699 } else {
3700 pos = eccsize + i * (eccsize + chunk);
3701 ret = nand_change_write_column_op(chip, pos,
3702 NULL, 0,
3703 false);
3704 if (ret)
3705 return ret;
3706 }
3707 } else
3708 sndcmd = 1;
3709 len = min_t(int, length, chunk);
3710
3711 ret = nand_write_data_op(chip, bufpoi, len, false);
3712 if (ret)
3713 return ret;
3714
3715 bufpoi += len;
3716 length -= len;
3717 }
3718 if (length > 0) {
3719 ret = nand_write_data_op(chip, bufpoi, length, false);
3720 if (ret)
3721 return ret;
3722 }
3723
3724 return nand_prog_page_end_op(chip);
3725 }
3726
3727 /**
3728 * nand_do_read_oob - [INTERN] NAND read out-of-band
3729 * @chip: NAND chip object
3730 * @from: offset to read from
3731 * @ops: oob operations description structure
3732 *
3733 * NAND read out-of-band data from the spare area.
3734 */
nand_do_read_oob(struct nand_chip * chip,loff_t from,struct mtd_oob_ops * ops)3735 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3736 struct mtd_oob_ops *ops)
3737 {
3738 struct mtd_info *mtd = nand_to_mtd(chip);
3739 unsigned int max_bitflips = 0;
3740 int page, realpage, chipnr;
3741 struct mtd_ecc_stats stats;
3742 int readlen = ops->ooblen;
3743 int len;
3744 uint8_t *buf = ops->oobbuf;
3745 int ret = 0;
3746
3747 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3748 __func__, (unsigned long long)from, readlen);
3749
3750 /* Check if the region is secured */
3751 if (nand_region_is_secured(chip, from, readlen))
3752 return -EIO;
3753
3754 stats = mtd->ecc_stats;
3755
3756 len = mtd_oobavail(mtd, ops);
3757
3758 chipnr = (int)(from >> chip->chip_shift);
3759 nand_select_target(chip, chipnr);
3760
3761 /* Shift to get page */
3762 realpage = (int)(from >> chip->page_shift);
3763 page = realpage & chip->pagemask;
3764
3765 while (1) {
3766 if (ops->mode == MTD_OPS_RAW)
3767 ret = chip->ecc.read_oob_raw(chip, page);
3768 else
3769 ret = chip->ecc.read_oob(chip, page);
3770
3771 if (ret < 0)
3772 break;
3773
3774 len = min(len, readlen);
3775 buf = nand_transfer_oob(chip, buf, ops, len);
3776
3777 nand_wait_readrdy(chip);
3778
3779 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3780
3781 readlen -= len;
3782 if (!readlen)
3783 break;
3784
3785 /* Increment page address */
3786 realpage++;
3787
3788 page = realpage & chip->pagemask;
3789 /* Check, if we cross a chip boundary */
3790 if (!page) {
3791 chipnr++;
3792 nand_deselect_target(chip);
3793 nand_select_target(chip, chipnr);
3794 }
3795 }
3796 nand_deselect_target(chip);
3797
3798 ops->oobretlen = ops->ooblen - readlen;
3799
3800 if (ret < 0)
3801 return ret;
3802
3803 if (mtd->ecc_stats.failed - stats.failed)
3804 return -EBADMSG;
3805
3806 return max_bitflips;
3807 }
3808
3809 /**
3810 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3811 * @mtd: MTD device structure
3812 * @from: offset to read from
3813 * @ops: oob operation description structure
3814 *
3815 * NAND read data and/or out-of-band data.
3816 */
nand_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)3817 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3818 struct mtd_oob_ops *ops)
3819 {
3820 struct nand_chip *chip = mtd_to_nand(mtd);
3821 int ret;
3822
3823 ops->retlen = 0;
3824
3825 if (ops->mode != MTD_OPS_PLACE_OOB &&
3826 ops->mode != MTD_OPS_AUTO_OOB &&
3827 ops->mode != MTD_OPS_RAW)
3828 return -ENOTSUPP;
3829
3830 nand_get_device(chip);
3831
3832 if (!ops->datbuf)
3833 ret = nand_do_read_oob(chip, from, ops);
3834 else
3835 ret = nand_do_read_ops(chip, from, ops);
3836
3837 nand_release_device(chip);
3838 return ret;
3839 }
3840
3841 /**
3842 * nand_write_page_raw_notsupp - dummy raw page write function
3843 * @chip: nand chip info structure
3844 * @buf: data buffer
3845 * @oob_required: must write chip->oob_poi to OOB
3846 * @page: page number to write
3847 *
3848 * Returns -ENOTSUPP unconditionally.
3849 */
nand_write_page_raw_notsupp(struct nand_chip * chip,const u8 * buf,int oob_required,int page)3850 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3851 int oob_required, int page)
3852 {
3853 return -ENOTSUPP;
3854 }
3855
3856 /**
3857 * nand_write_page_raw - [INTERN] raw page write function
3858 * @chip: nand chip info structure
3859 * @buf: data buffer
3860 * @oob_required: must write chip->oob_poi to OOB
3861 * @page: page number to write
3862 *
3863 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3864 */
nand_write_page_raw(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3865 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3866 int oob_required, int page)
3867 {
3868 struct mtd_info *mtd = nand_to_mtd(chip);
3869 int ret;
3870
3871 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3872 if (ret)
3873 return ret;
3874
3875 if (oob_required) {
3876 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3877 false);
3878 if (ret)
3879 return ret;
3880 }
3881
3882 return nand_prog_page_end_op(chip);
3883 }
3884 EXPORT_SYMBOL(nand_write_page_raw);
3885
3886 /**
3887 * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3888 * @chip: NAND chip info structure
3889 * @buf: data buffer to write
3890 * @oob_required: must write chip->oob_poi to OOB
3891 * @page: page number to write
3892 *
3893 * This is a raw page write, ie. without any error detection/correction.
3894 * Monolithic means we are requesting all the relevant data (main plus
3895 * eventually OOB) to be sent over the bus and effectively programmed
3896 * into the NAND chip arrays in a single operation. This is an
3897 * alternative to nand_write_page_raw(), which first sends the main
3898 * data, then eventually send the OOB data by latching more data
3899 * cycles on the NAND bus, and finally sends the program command to
3900 * synchronyze the NAND chip cache.
3901 */
nand_monolithic_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)3902 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3903 int oob_required, int page)
3904 {
3905 struct mtd_info *mtd = nand_to_mtd(chip);
3906 unsigned int size = mtd->writesize;
3907 u8 *write_buf = (u8 *)buf;
3908
3909 if (oob_required) {
3910 size += mtd->oobsize;
3911
3912 if (buf != chip->data_buf) {
3913 write_buf = nand_get_data_buf(chip);
3914 memcpy(write_buf, buf, mtd->writesize);
3915 }
3916 }
3917
3918 return nand_prog_page_op(chip, page, 0, write_buf, size);
3919 }
3920 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3921
3922 /**
3923 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3924 * @chip: nand chip info structure
3925 * @buf: data buffer
3926 * @oob_required: must write chip->oob_poi to OOB
3927 * @page: page number to write
3928 *
3929 * We need a special oob layout and handling even when ECC isn't checked.
3930 */
nand_write_page_raw_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3931 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3932 const uint8_t *buf, int oob_required,
3933 int page)
3934 {
3935 struct mtd_info *mtd = nand_to_mtd(chip);
3936 int eccsize = chip->ecc.size;
3937 int eccbytes = chip->ecc.bytes;
3938 uint8_t *oob = chip->oob_poi;
3939 int steps, size, ret;
3940
3941 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3942 if (ret)
3943 return ret;
3944
3945 for (steps = chip->ecc.steps; steps > 0; steps--) {
3946 ret = nand_write_data_op(chip, buf, eccsize, false);
3947 if (ret)
3948 return ret;
3949
3950 buf += eccsize;
3951
3952 if (chip->ecc.prepad) {
3953 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3954 false);
3955 if (ret)
3956 return ret;
3957
3958 oob += chip->ecc.prepad;
3959 }
3960
3961 ret = nand_write_data_op(chip, oob, eccbytes, false);
3962 if (ret)
3963 return ret;
3964
3965 oob += eccbytes;
3966
3967 if (chip->ecc.postpad) {
3968 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3969 false);
3970 if (ret)
3971 return ret;
3972
3973 oob += chip->ecc.postpad;
3974 }
3975 }
3976
3977 size = mtd->oobsize - (oob - chip->oob_poi);
3978 if (size) {
3979 ret = nand_write_data_op(chip, oob, size, false);
3980 if (ret)
3981 return ret;
3982 }
3983
3984 return nand_prog_page_end_op(chip);
3985 }
3986 /**
3987 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3988 * @chip: nand chip info structure
3989 * @buf: data buffer
3990 * @oob_required: must write chip->oob_poi to OOB
3991 * @page: page number to write
3992 */
nand_write_page_swecc(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3993 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3994 int oob_required, int page)
3995 {
3996 struct mtd_info *mtd = nand_to_mtd(chip);
3997 int i, eccsize = chip->ecc.size, ret;
3998 int eccbytes = chip->ecc.bytes;
3999 int eccsteps = chip->ecc.steps;
4000 uint8_t *ecc_calc = chip->ecc.calc_buf;
4001 const uint8_t *p = buf;
4002
4003 /* Software ECC calculation */
4004 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4005 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4006
4007 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4008 chip->ecc.total);
4009 if (ret)
4010 return ret;
4011
4012 return chip->ecc.write_page_raw(chip, buf, 1, page);
4013 }
4014
4015 /**
4016 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4017 * @chip: nand chip info structure
4018 * @buf: data buffer
4019 * @oob_required: must write chip->oob_poi to OOB
4020 * @page: page number to write
4021 */
nand_write_page_hwecc(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)4022 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
4023 int oob_required, int page)
4024 {
4025 struct mtd_info *mtd = nand_to_mtd(chip);
4026 int i, eccsize = chip->ecc.size, ret;
4027 int eccbytes = chip->ecc.bytes;
4028 int eccsteps = chip->ecc.steps;
4029 uint8_t *ecc_calc = chip->ecc.calc_buf;
4030 const uint8_t *p = buf;
4031
4032 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4033 if (ret)
4034 return ret;
4035
4036 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4037 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4038
4039 ret = nand_write_data_op(chip, p, eccsize, false);
4040 if (ret)
4041 return ret;
4042
4043 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4044 }
4045
4046 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4047 chip->ecc.total);
4048 if (ret)
4049 return ret;
4050
4051 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4052 if (ret)
4053 return ret;
4054
4055 return nand_prog_page_end_op(chip);
4056 }
4057
4058
4059 /**
4060 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4061 * @chip: nand chip info structure
4062 * @offset: column address of subpage within the page
4063 * @data_len: data length
4064 * @buf: data buffer
4065 * @oob_required: must write chip->oob_poi to OOB
4066 * @page: page number to write
4067 */
nand_write_subpage_hwecc(struct nand_chip * chip,uint32_t offset,uint32_t data_len,const uint8_t * buf,int oob_required,int page)4068 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
4069 uint32_t data_len, const uint8_t *buf,
4070 int oob_required, int page)
4071 {
4072 struct mtd_info *mtd = nand_to_mtd(chip);
4073 uint8_t *oob_buf = chip->oob_poi;
4074 uint8_t *ecc_calc = chip->ecc.calc_buf;
4075 int ecc_size = chip->ecc.size;
4076 int ecc_bytes = chip->ecc.bytes;
4077 int ecc_steps = chip->ecc.steps;
4078 uint32_t start_step = offset / ecc_size;
4079 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4080 int oob_bytes = mtd->oobsize / ecc_steps;
4081 int step, ret;
4082
4083 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4084 if (ret)
4085 return ret;
4086
4087 for (step = 0; step < ecc_steps; step++) {
4088 /* configure controller for WRITE access */
4089 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4090
4091 /* write data (untouched subpages already masked by 0xFF) */
4092 ret = nand_write_data_op(chip, buf, ecc_size, false);
4093 if (ret)
4094 return ret;
4095
4096 /* mask ECC of un-touched subpages by padding 0xFF */
4097 if ((step < start_step) || (step > end_step))
4098 memset(ecc_calc, 0xff, ecc_bytes);
4099 else
4100 chip->ecc.calculate(chip, buf, ecc_calc);
4101
4102 /* mask OOB of un-touched subpages by padding 0xFF */
4103 /* if oob_required, preserve OOB metadata of written subpage */
4104 if (!oob_required || (step < start_step) || (step > end_step))
4105 memset(oob_buf, 0xff, oob_bytes);
4106
4107 buf += ecc_size;
4108 ecc_calc += ecc_bytes;
4109 oob_buf += oob_bytes;
4110 }
4111
4112 /* copy calculated ECC for whole page to chip->buffer->oob */
4113 /* this include masked-value(0xFF) for unwritten subpages */
4114 ecc_calc = chip->ecc.calc_buf;
4115 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4116 chip->ecc.total);
4117 if (ret)
4118 return ret;
4119
4120 /* write OOB buffer to NAND device */
4121 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4122 if (ret)
4123 return ret;
4124
4125 return nand_prog_page_end_op(chip);
4126 }
4127
4128
4129 /**
4130 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4131 * @chip: nand chip info structure
4132 * @buf: data buffer
4133 * @oob_required: must write chip->oob_poi to OOB
4134 * @page: page number to write
4135 *
4136 * The hw generator calculates the error syndrome automatically. Therefore we
4137 * need a special oob layout and handling.
4138 */
nand_write_page_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)4139 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4140 int oob_required, int page)
4141 {
4142 struct mtd_info *mtd = nand_to_mtd(chip);
4143 int i, eccsize = chip->ecc.size;
4144 int eccbytes = chip->ecc.bytes;
4145 int eccsteps = chip->ecc.steps;
4146 const uint8_t *p = buf;
4147 uint8_t *oob = chip->oob_poi;
4148 int ret;
4149
4150 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4151 if (ret)
4152 return ret;
4153
4154 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4155 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4156
4157 ret = nand_write_data_op(chip, p, eccsize, false);
4158 if (ret)
4159 return ret;
4160
4161 if (chip->ecc.prepad) {
4162 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4163 false);
4164 if (ret)
4165 return ret;
4166
4167 oob += chip->ecc.prepad;
4168 }
4169
4170 chip->ecc.calculate(chip, p, oob);
4171
4172 ret = nand_write_data_op(chip, oob, eccbytes, false);
4173 if (ret)
4174 return ret;
4175
4176 oob += eccbytes;
4177
4178 if (chip->ecc.postpad) {
4179 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4180 false);
4181 if (ret)
4182 return ret;
4183
4184 oob += chip->ecc.postpad;
4185 }
4186 }
4187
4188 /* Calculate remaining oob bytes */
4189 i = mtd->oobsize - (oob - chip->oob_poi);
4190 if (i) {
4191 ret = nand_write_data_op(chip, oob, i, false);
4192 if (ret)
4193 return ret;
4194 }
4195
4196 return nand_prog_page_end_op(chip);
4197 }
4198
4199 /**
4200 * nand_write_page - write one page
4201 * @chip: NAND chip descriptor
4202 * @offset: address offset within the page
4203 * @data_len: length of actual data to be written
4204 * @buf: the data to write
4205 * @oob_required: must write chip->oob_poi to OOB
4206 * @page: page number to write
4207 * @raw: use _raw version of write_page
4208 */
nand_write_page(struct nand_chip * chip,uint32_t offset,int data_len,const uint8_t * buf,int oob_required,int page,int raw)4209 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4210 int data_len, const uint8_t *buf, int oob_required,
4211 int page, int raw)
4212 {
4213 struct mtd_info *mtd = nand_to_mtd(chip);
4214 int status, subpage;
4215
4216 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4217 chip->ecc.write_subpage)
4218 subpage = offset || (data_len < mtd->writesize);
4219 else
4220 subpage = 0;
4221
4222 if (unlikely(raw))
4223 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4224 page);
4225 else if (subpage)
4226 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4227 oob_required, page);
4228 else
4229 status = chip->ecc.write_page(chip, buf, oob_required, page);
4230
4231 if (status < 0)
4232 return status;
4233
4234 return 0;
4235 }
4236
4237 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4238
4239 /**
4240 * nand_do_write_ops - [INTERN] NAND write with ECC
4241 * @chip: NAND chip object
4242 * @to: offset to write to
4243 * @ops: oob operations description structure
4244 *
4245 * NAND write with ECC.
4246 */
nand_do_write_ops(struct nand_chip * chip,loff_t to,struct mtd_oob_ops * ops)4247 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4248 struct mtd_oob_ops *ops)
4249 {
4250 struct mtd_info *mtd = nand_to_mtd(chip);
4251 int chipnr, realpage, page, column;
4252 uint32_t writelen = ops->len;
4253
4254 uint32_t oobwritelen = ops->ooblen;
4255 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4256
4257 uint8_t *oob = ops->oobbuf;
4258 uint8_t *buf = ops->datbuf;
4259 int ret;
4260 int oob_required = oob ? 1 : 0;
4261
4262 ops->retlen = 0;
4263 if (!writelen)
4264 return 0;
4265
4266 /* Reject writes, which are not page aligned */
4267 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4268 pr_notice("%s: attempt to write non page aligned data\n",
4269 __func__);
4270 return -EINVAL;
4271 }
4272
4273 /* Check if the region is secured */
4274 if (nand_region_is_secured(chip, to, writelen))
4275 return -EIO;
4276
4277 column = to & (mtd->writesize - 1);
4278
4279 chipnr = (int)(to >> chip->chip_shift);
4280 nand_select_target(chip, chipnr);
4281
4282 /* Check, if it is write protected */
4283 if (nand_check_wp(chip)) {
4284 ret = -EIO;
4285 goto err_out;
4286 }
4287
4288 realpage = (int)(to >> chip->page_shift);
4289 page = realpage & chip->pagemask;
4290
4291 /* Invalidate the page cache, when we write to the cached page */
4292 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4293 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4294 chip->pagecache.page = -1;
4295
4296 /* Don't allow multipage oob writes with offset */
4297 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4298 ret = -EINVAL;
4299 goto err_out;
4300 }
4301
4302 while (1) {
4303 int bytes = mtd->writesize;
4304 uint8_t *wbuf = buf;
4305 int use_bounce_buf;
4306 int part_pagewr = (column || writelen < mtd->writesize);
4307
4308 if (part_pagewr)
4309 use_bounce_buf = 1;
4310 else if (chip->options & NAND_USES_DMA)
4311 use_bounce_buf = !virt_addr_valid(buf) ||
4312 !IS_ALIGNED((unsigned long)buf,
4313 chip->buf_align);
4314 else
4315 use_bounce_buf = 0;
4316
4317 /*
4318 * Copy the data from the initial buffer when doing partial page
4319 * writes or when a bounce buffer is required.
4320 */
4321 if (use_bounce_buf) {
4322 pr_debug("%s: using write bounce buffer for buf@%p\n",
4323 __func__, buf);
4324 if (part_pagewr)
4325 bytes = min_t(int, bytes - column, writelen);
4326 wbuf = nand_get_data_buf(chip);
4327 memset(wbuf, 0xff, mtd->writesize);
4328 memcpy(&wbuf[column], buf, bytes);
4329 }
4330
4331 if (unlikely(oob)) {
4332 size_t len = min(oobwritelen, oobmaxlen);
4333 oob = nand_fill_oob(chip, oob, len, ops);
4334 oobwritelen -= len;
4335 } else {
4336 /* We still need to erase leftover OOB data */
4337 memset(chip->oob_poi, 0xff, mtd->oobsize);
4338 }
4339
4340 ret = nand_write_page(chip, column, bytes, wbuf,
4341 oob_required, page,
4342 (ops->mode == MTD_OPS_RAW));
4343 if (ret)
4344 break;
4345
4346 writelen -= bytes;
4347 if (!writelen)
4348 break;
4349
4350 column = 0;
4351 buf += bytes;
4352 realpage++;
4353
4354 page = realpage & chip->pagemask;
4355 /* Check, if we cross a chip boundary */
4356 if (!page) {
4357 chipnr++;
4358 nand_deselect_target(chip);
4359 nand_select_target(chip, chipnr);
4360 }
4361 }
4362
4363 ops->retlen = ops->len - writelen;
4364 if (unlikely(oob))
4365 ops->oobretlen = ops->ooblen;
4366
4367 err_out:
4368 nand_deselect_target(chip);
4369 return ret;
4370 }
4371
4372 /**
4373 * panic_nand_write - [MTD Interface] NAND write with ECC
4374 * @mtd: MTD device structure
4375 * @to: offset to write to
4376 * @len: number of bytes to write
4377 * @retlen: pointer to variable to store the number of written bytes
4378 * @buf: the data to write
4379 *
4380 * NAND write with ECC. Used when performing writes in interrupt context, this
4381 * may for example be called by mtdoops when writing an oops while in panic.
4382 */
panic_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const uint8_t * buf)4383 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4384 size_t *retlen, const uint8_t *buf)
4385 {
4386 struct nand_chip *chip = mtd_to_nand(mtd);
4387 int chipnr = (int)(to >> chip->chip_shift);
4388 struct mtd_oob_ops ops;
4389 int ret;
4390
4391 nand_select_target(chip, chipnr);
4392
4393 /* Wait for the device to get ready */
4394 panic_nand_wait(chip, 400);
4395
4396 memset(&ops, 0, sizeof(ops));
4397 ops.len = len;
4398 ops.datbuf = (uint8_t *)buf;
4399 ops.mode = MTD_OPS_PLACE_OOB;
4400
4401 ret = nand_do_write_ops(chip, to, &ops);
4402
4403 *retlen = ops.retlen;
4404 return ret;
4405 }
4406
4407 /**
4408 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4409 * @mtd: MTD device structure
4410 * @to: offset to write to
4411 * @ops: oob operation description structure
4412 */
nand_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)4413 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4414 struct mtd_oob_ops *ops)
4415 {
4416 struct nand_chip *chip = mtd_to_nand(mtd);
4417 int ret = 0;
4418
4419 ops->retlen = 0;
4420
4421 nand_get_device(chip);
4422
4423 switch (ops->mode) {
4424 case MTD_OPS_PLACE_OOB:
4425 case MTD_OPS_AUTO_OOB:
4426 case MTD_OPS_RAW:
4427 break;
4428
4429 default:
4430 goto out;
4431 }
4432
4433 if (!ops->datbuf)
4434 ret = nand_do_write_oob(chip, to, ops);
4435 else
4436 ret = nand_do_write_ops(chip, to, ops);
4437
4438 out:
4439 nand_release_device(chip);
4440 return ret;
4441 }
4442
4443 /**
4444 * nand_erase - [MTD Interface] erase block(s)
4445 * @mtd: MTD device structure
4446 * @instr: erase instruction
4447 *
4448 * Erase one ore more blocks.
4449 */
nand_erase(struct mtd_info * mtd,struct erase_info * instr)4450 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4451 {
4452 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4453 }
4454
4455 /**
4456 * nand_erase_nand - [INTERN] erase block(s)
4457 * @chip: NAND chip object
4458 * @instr: erase instruction
4459 * @allowbbt: allow erasing the bbt area
4460 *
4461 * Erase one ore more blocks.
4462 */
nand_erase_nand(struct nand_chip * chip,struct erase_info * instr,int allowbbt)4463 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4464 int allowbbt)
4465 {
4466 int page, pages_per_block, ret, chipnr;
4467 loff_t len;
4468
4469 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4470 __func__, (unsigned long long)instr->addr,
4471 (unsigned long long)instr->len);
4472
4473 if (check_offs_len(chip, instr->addr, instr->len))
4474 return -EINVAL;
4475
4476 /* Check if the region is secured */
4477 if (nand_region_is_secured(chip, instr->addr, instr->len))
4478 return -EIO;
4479
4480 /* Grab the lock and see if the device is available */
4481 nand_get_device(chip);
4482
4483 /* Shift to get first page */
4484 page = (int)(instr->addr >> chip->page_shift);
4485 chipnr = (int)(instr->addr >> chip->chip_shift);
4486
4487 /* Calculate pages in each block */
4488 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4489
4490 /* Select the NAND device */
4491 nand_select_target(chip, chipnr);
4492
4493 /* Check, if it is write protected */
4494 if (nand_check_wp(chip)) {
4495 pr_debug("%s: device is write protected!\n",
4496 __func__);
4497 ret = -EIO;
4498 goto erase_exit;
4499 }
4500
4501 /* Loop through the pages */
4502 len = instr->len;
4503
4504 while (len) {
4505 loff_t ofs = (loff_t)page << chip->page_shift;
4506
4507 /* Check if we have a bad block, we do not erase bad blocks! */
4508 if (nand_block_checkbad(chip, ((loff_t) page) <<
4509 chip->page_shift, allowbbt)) {
4510 pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
4511 __func__, (unsigned long long)ofs);
4512 ret = -EIO;
4513 goto erase_exit;
4514 }
4515
4516 /*
4517 * Invalidate the page cache, if we erase the block which
4518 * contains the current cached page.
4519 */
4520 if (page <= chip->pagecache.page && chip->pagecache.page <
4521 (page + pages_per_block))
4522 chip->pagecache.page = -1;
4523
4524 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4525 (chip->phys_erase_shift - chip->page_shift));
4526 if (ret) {
4527 pr_debug("%s: failed erase, page 0x%08x\n",
4528 __func__, page);
4529 instr->fail_addr = ofs;
4530 goto erase_exit;
4531 }
4532
4533 /* Increment page address and decrement length */
4534 len -= (1ULL << chip->phys_erase_shift);
4535 page += pages_per_block;
4536
4537 /* Check, if we cross a chip boundary */
4538 if (len && !(page & chip->pagemask)) {
4539 chipnr++;
4540 nand_deselect_target(chip);
4541 nand_select_target(chip, chipnr);
4542 }
4543 }
4544
4545 ret = 0;
4546 erase_exit:
4547
4548 /* Deselect and wake up anyone waiting on the device */
4549 nand_deselect_target(chip);
4550 nand_release_device(chip);
4551
4552 /* Return more or less happy */
4553 return ret;
4554 }
4555
4556 /**
4557 * nand_sync - [MTD Interface] sync
4558 * @mtd: MTD device structure
4559 *
4560 * Sync is actually a wait for chip ready function.
4561 */
nand_sync(struct mtd_info * mtd)4562 static void nand_sync(struct mtd_info *mtd)
4563 {
4564 struct nand_chip *chip = mtd_to_nand(mtd);
4565
4566 pr_debug("%s: called\n", __func__);
4567
4568 /* Grab the lock and see if the device is available */
4569 nand_get_device(chip);
4570 /* Release it and go back */
4571 nand_release_device(chip);
4572 }
4573
4574 /**
4575 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4576 * @mtd: MTD device structure
4577 * @offs: offset relative to mtd start
4578 */
nand_block_isbad(struct mtd_info * mtd,loff_t offs)4579 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4580 {
4581 struct nand_chip *chip = mtd_to_nand(mtd);
4582 int chipnr = (int)(offs >> chip->chip_shift);
4583 int ret;
4584
4585 /* Select the NAND device */
4586 nand_get_device(chip);
4587
4588 nand_select_target(chip, chipnr);
4589
4590 ret = nand_block_checkbad(chip, offs, 0);
4591
4592 nand_deselect_target(chip);
4593 nand_release_device(chip);
4594
4595 return ret;
4596 }
4597
4598 /**
4599 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4600 * @mtd: MTD device structure
4601 * @ofs: offset relative to mtd start
4602 */
nand_block_markbad(struct mtd_info * mtd,loff_t ofs)4603 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4604 {
4605 int ret;
4606
4607 ret = nand_block_isbad(mtd, ofs);
4608 if (ret) {
4609 /* If it was bad already, return success and do nothing */
4610 if (ret > 0)
4611 return 0;
4612 return ret;
4613 }
4614
4615 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4616 }
4617
4618 /**
4619 * nand_suspend - [MTD Interface] Suspend the NAND flash
4620 * @mtd: MTD device structure
4621 *
4622 * Returns 0 for success or negative error code otherwise.
4623 */
nand_suspend(struct mtd_info * mtd)4624 static int nand_suspend(struct mtd_info *mtd)
4625 {
4626 struct nand_chip *chip = mtd_to_nand(mtd);
4627 int ret = 0;
4628
4629 mutex_lock(&chip->lock);
4630 if (chip->ops.suspend)
4631 ret = chip->ops.suspend(chip);
4632 if (!ret)
4633 chip->suspended = 1;
4634 mutex_unlock(&chip->lock);
4635
4636 return ret;
4637 }
4638
4639 /**
4640 * nand_resume - [MTD Interface] Resume the NAND flash
4641 * @mtd: MTD device structure
4642 */
nand_resume(struct mtd_info * mtd)4643 static void nand_resume(struct mtd_info *mtd)
4644 {
4645 struct nand_chip *chip = mtd_to_nand(mtd);
4646
4647 mutex_lock(&chip->lock);
4648 if (chip->suspended) {
4649 if (chip->ops.resume)
4650 chip->ops.resume(chip);
4651 chip->suspended = 0;
4652 } else {
4653 pr_err("%s called for a chip which is not in suspended state\n",
4654 __func__);
4655 }
4656 mutex_unlock(&chip->lock);
4657
4658 wake_up_all(&chip->resume_wq);
4659 }
4660
4661 /**
4662 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4663 * prevent further operations
4664 * @mtd: MTD device structure
4665 */
nand_shutdown(struct mtd_info * mtd)4666 static void nand_shutdown(struct mtd_info *mtd)
4667 {
4668 nand_suspend(mtd);
4669 }
4670
4671 /**
4672 * nand_lock - [MTD Interface] Lock the NAND flash
4673 * @mtd: MTD device structure
4674 * @ofs: offset byte address
4675 * @len: number of bytes to lock (must be a multiple of block/page size)
4676 */
nand_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)4677 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4678 {
4679 struct nand_chip *chip = mtd_to_nand(mtd);
4680
4681 if (!chip->ops.lock_area)
4682 return -ENOTSUPP;
4683
4684 return chip->ops.lock_area(chip, ofs, len);
4685 }
4686
4687 /**
4688 * nand_unlock - [MTD Interface] Unlock the NAND flash
4689 * @mtd: MTD device structure
4690 * @ofs: offset byte address
4691 * @len: number of bytes to unlock (must be a multiple of block/page size)
4692 */
nand_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)4693 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4694 {
4695 struct nand_chip *chip = mtd_to_nand(mtd);
4696
4697 if (!chip->ops.unlock_area)
4698 return -ENOTSUPP;
4699
4700 return chip->ops.unlock_area(chip, ofs, len);
4701 }
4702
4703 /* Set default functions */
nand_set_defaults(struct nand_chip * chip)4704 static void nand_set_defaults(struct nand_chip *chip)
4705 {
4706 /* If no controller is provided, use the dummy, legacy one. */
4707 if (!chip->controller) {
4708 chip->controller = &chip->legacy.dummy_controller;
4709 nand_controller_init(chip->controller);
4710 }
4711
4712 nand_legacy_set_defaults(chip);
4713
4714 if (!chip->buf_align)
4715 chip->buf_align = 1;
4716 }
4717
4718 /* Sanitize ONFI strings so we can safely print them */
sanitize_string(uint8_t * s,size_t len)4719 void sanitize_string(uint8_t *s, size_t len)
4720 {
4721 ssize_t i;
4722
4723 /* Null terminate */
4724 s[len - 1] = 0;
4725
4726 /* Remove non printable chars */
4727 for (i = 0; i < len - 1; i++) {
4728 if (s[i] < ' ' || s[i] > 127)
4729 s[i] = '?';
4730 }
4731
4732 /* Remove trailing spaces */
4733 strim(s);
4734 }
4735
4736 /*
4737 * nand_id_has_period - Check if an ID string has a given wraparound period
4738 * @id_data: the ID string
4739 * @arrlen: the length of the @id_data array
4740 * @period: the period of repitition
4741 *
4742 * Check if an ID string is repeated within a given sequence of bytes at
4743 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4744 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4745 * if the repetition has a period of @period; otherwise, returns zero.
4746 */
nand_id_has_period(u8 * id_data,int arrlen,int period)4747 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4748 {
4749 int i, j;
4750 for (i = 0; i < period; i++)
4751 for (j = i + period; j < arrlen; j += period)
4752 if (id_data[i] != id_data[j])
4753 return 0;
4754 return 1;
4755 }
4756
4757 /*
4758 * nand_id_len - Get the length of an ID string returned by CMD_READID
4759 * @id_data: the ID string
4760 * @arrlen: the length of the @id_data array
4761
4762 * Returns the length of the ID string, according to known wraparound/trailing
4763 * zero patterns. If no pattern exists, returns the length of the array.
4764 */
nand_id_len(u8 * id_data,int arrlen)4765 static int nand_id_len(u8 *id_data, int arrlen)
4766 {
4767 int last_nonzero, period;
4768
4769 /* Find last non-zero byte */
4770 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4771 if (id_data[last_nonzero])
4772 break;
4773
4774 /* All zeros */
4775 if (last_nonzero < 0)
4776 return 0;
4777
4778 /* Calculate wraparound period */
4779 for (period = 1; period < arrlen; period++)
4780 if (nand_id_has_period(id_data, arrlen, period))
4781 break;
4782
4783 /* There's a repeated pattern */
4784 if (period < arrlen)
4785 return period;
4786
4787 /* There are trailing zeros */
4788 if (last_nonzero < arrlen - 1)
4789 return last_nonzero + 1;
4790
4791 /* No pattern detected */
4792 return arrlen;
4793 }
4794
4795 /* Extract the bits of per cell from the 3rd byte of the extended ID */
nand_get_bits_per_cell(u8 cellinfo)4796 static int nand_get_bits_per_cell(u8 cellinfo)
4797 {
4798 int bits;
4799
4800 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4801 bits >>= NAND_CI_CELLTYPE_SHIFT;
4802 return bits + 1;
4803 }
4804
4805 /*
4806 * Many new NAND share similar device ID codes, which represent the size of the
4807 * chip. The rest of the parameters must be decoded according to generic or
4808 * manufacturer-specific "extended ID" decoding patterns.
4809 */
nand_decode_ext_id(struct nand_chip * chip)4810 void nand_decode_ext_id(struct nand_chip *chip)
4811 {
4812 struct nand_memory_organization *memorg;
4813 struct mtd_info *mtd = nand_to_mtd(chip);
4814 int extid;
4815 u8 *id_data = chip->id.data;
4816
4817 memorg = nanddev_get_memorg(&chip->base);
4818
4819 /* The 3rd id byte holds MLC / multichip data */
4820 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4821 /* The 4th id byte is the important one */
4822 extid = id_data[3];
4823
4824 /* Calc pagesize */
4825 memorg->pagesize = 1024 << (extid & 0x03);
4826 mtd->writesize = memorg->pagesize;
4827 extid >>= 2;
4828 /* Calc oobsize */
4829 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4830 mtd->oobsize = memorg->oobsize;
4831 extid >>= 2;
4832 /* Calc blocksize. Blocksize is multiples of 64KiB */
4833 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4834 memorg->pagesize;
4835 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4836 extid >>= 2;
4837 /* Get buswidth information */
4838 if (extid & 0x1)
4839 chip->options |= NAND_BUSWIDTH_16;
4840 }
4841 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4842
4843 /*
4844 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4845 * decodes a matching ID table entry and assigns the MTD size parameters for
4846 * the chip.
4847 */
nand_decode_id(struct nand_chip * chip,struct nand_flash_dev * type)4848 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4849 {
4850 struct mtd_info *mtd = nand_to_mtd(chip);
4851 struct nand_memory_organization *memorg;
4852
4853 memorg = nanddev_get_memorg(&chip->base);
4854
4855 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4856 mtd->erasesize = type->erasesize;
4857 memorg->pagesize = type->pagesize;
4858 mtd->writesize = memorg->pagesize;
4859 memorg->oobsize = memorg->pagesize / 32;
4860 mtd->oobsize = memorg->oobsize;
4861
4862 /* All legacy ID NAND are small-page, SLC */
4863 memorg->bits_per_cell = 1;
4864 }
4865
4866 /*
4867 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4868 * heuristic patterns using various detected parameters (e.g., manufacturer,
4869 * page size, cell-type information).
4870 */
nand_decode_bbm_options(struct nand_chip * chip)4871 static void nand_decode_bbm_options(struct nand_chip *chip)
4872 {
4873 struct mtd_info *mtd = nand_to_mtd(chip);
4874
4875 /* Set the bad block position */
4876 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4877 chip->badblockpos = NAND_BBM_POS_LARGE;
4878 else
4879 chip->badblockpos = NAND_BBM_POS_SMALL;
4880 }
4881
is_full_id_nand(struct nand_flash_dev * type)4882 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4883 {
4884 return type->id_len;
4885 }
4886
find_full_id_nand(struct nand_chip * chip,struct nand_flash_dev * type)4887 static bool find_full_id_nand(struct nand_chip *chip,
4888 struct nand_flash_dev *type)
4889 {
4890 struct nand_device *base = &chip->base;
4891 struct nand_ecc_props requirements;
4892 struct mtd_info *mtd = nand_to_mtd(chip);
4893 struct nand_memory_organization *memorg;
4894 u8 *id_data = chip->id.data;
4895
4896 memorg = nanddev_get_memorg(&chip->base);
4897
4898 if (!strncmp(type->id, id_data, type->id_len)) {
4899 memorg->pagesize = type->pagesize;
4900 mtd->writesize = memorg->pagesize;
4901 memorg->pages_per_eraseblock = type->erasesize /
4902 type->pagesize;
4903 mtd->erasesize = type->erasesize;
4904 memorg->oobsize = type->oobsize;
4905 mtd->oobsize = memorg->oobsize;
4906
4907 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4908 memorg->eraseblocks_per_lun =
4909 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4910 memorg->pagesize *
4911 memorg->pages_per_eraseblock);
4912 chip->options |= type->options;
4913 requirements.strength = NAND_ECC_STRENGTH(type);
4914 requirements.step_size = NAND_ECC_STEP(type);
4915 nanddev_set_ecc_requirements(base, &requirements);
4916
4917 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4918 if (!chip->parameters.model)
4919 return false;
4920
4921 return true;
4922 }
4923 return false;
4924 }
4925
4926 /*
4927 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4928 * compliant and does not have a full-id or legacy-id entry in the nand_ids
4929 * table.
4930 */
nand_manufacturer_detect(struct nand_chip * chip)4931 static void nand_manufacturer_detect(struct nand_chip *chip)
4932 {
4933 /*
4934 * Try manufacturer detection if available and use
4935 * nand_decode_ext_id() otherwise.
4936 */
4937 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4938 chip->manufacturer.desc->ops->detect) {
4939 struct nand_memory_organization *memorg;
4940
4941 memorg = nanddev_get_memorg(&chip->base);
4942
4943 /* The 3rd id byte holds MLC / multichip data */
4944 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4945 chip->manufacturer.desc->ops->detect(chip);
4946 } else {
4947 nand_decode_ext_id(chip);
4948 }
4949 }
4950
4951 /*
4952 * Manufacturer initialization. This function is called for all NANDs including
4953 * ONFI and JEDEC compliant ones.
4954 * Manufacturer drivers should put all their specific initialization code in
4955 * their ->init() hook.
4956 */
nand_manufacturer_init(struct nand_chip * chip)4957 static int nand_manufacturer_init(struct nand_chip *chip)
4958 {
4959 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4960 !chip->manufacturer.desc->ops->init)
4961 return 0;
4962
4963 return chip->manufacturer.desc->ops->init(chip);
4964 }
4965
4966 /*
4967 * Manufacturer cleanup. This function is called for all NANDs including
4968 * ONFI and JEDEC compliant ones.
4969 * Manufacturer drivers should put all their specific cleanup code in their
4970 * ->cleanup() hook.
4971 */
nand_manufacturer_cleanup(struct nand_chip * chip)4972 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4973 {
4974 /* Release manufacturer private data */
4975 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4976 chip->manufacturer.desc->ops->cleanup)
4977 chip->manufacturer.desc->ops->cleanup(chip);
4978 }
4979
4980 static const char *
nand_manufacturer_name(const struct nand_manufacturer_desc * manufacturer_desc)4981 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4982 {
4983 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4984 }
4985
4986 /*
4987 * Get the flash and manufacturer id and lookup if the type is supported.
4988 */
nand_detect(struct nand_chip * chip,struct nand_flash_dev * type)4989 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4990 {
4991 const struct nand_manufacturer_desc *manufacturer_desc;
4992 struct mtd_info *mtd = nand_to_mtd(chip);
4993 struct nand_memory_organization *memorg;
4994 int busw, ret;
4995 u8 *id_data = chip->id.data;
4996 u8 maf_id, dev_id;
4997 u64 targetsize;
4998
4999 /*
5000 * Let's start by initializing memorg fields that might be left
5001 * unassigned by the ID-based detection logic.
5002 */
5003 memorg = nanddev_get_memorg(&chip->base);
5004 memorg->planes_per_lun = 1;
5005 memorg->luns_per_target = 1;
5006
5007 /*
5008 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5009 * after power-up.
5010 */
5011 ret = nand_reset(chip, 0);
5012 if (ret)
5013 return ret;
5014
5015 /* Select the device */
5016 nand_select_target(chip, 0);
5017
5018 /* Send the command for reading device ID */
5019 ret = nand_readid_op(chip, 0, id_data, 2);
5020 if (ret)
5021 return ret;
5022
5023 /* Read manufacturer and device IDs */
5024 maf_id = id_data[0];
5025 dev_id = id_data[1];
5026
5027 /*
5028 * Try again to make sure, as some systems the bus-hold or other
5029 * interface concerns can cause random data which looks like a
5030 * possibly credible NAND flash to appear. If the two results do
5031 * not match, ignore the device completely.
5032 */
5033
5034 /* Read entire ID string */
5035 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5036 if (ret)
5037 return ret;
5038
5039 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5040 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5041 maf_id, dev_id, id_data[0], id_data[1]);
5042 return -ENODEV;
5043 }
5044
5045 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5046
5047 /* Try to identify manufacturer */
5048 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
5049 chip->manufacturer.desc = manufacturer_desc;
5050
5051 if (!type)
5052 type = nand_flash_ids;
5053
5054 /*
5055 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5056 * override it.
5057 * This is required to make sure initial NAND bus width set by the
5058 * NAND controller driver is coherent with the real NAND bus width
5059 * (extracted by auto-detection code).
5060 */
5061 busw = chip->options & NAND_BUSWIDTH_16;
5062
5063 /*
5064 * The flag is only set (never cleared), reset it to its default value
5065 * before starting auto-detection.
5066 */
5067 chip->options &= ~NAND_BUSWIDTH_16;
5068
5069 for (; type->name != NULL; type++) {
5070 if (is_full_id_nand(type)) {
5071 if (find_full_id_nand(chip, type))
5072 goto ident_done;
5073 } else if (dev_id == type->dev_id) {
5074 break;
5075 }
5076 }
5077
5078 if (!type->name || !type->pagesize) {
5079 /* Check if the chip is ONFI compliant */
5080 ret = nand_onfi_detect(chip);
5081 if (ret < 0)
5082 return ret;
5083 else if (ret)
5084 goto ident_done;
5085
5086 /* Check if the chip is JEDEC compliant */
5087 ret = nand_jedec_detect(chip);
5088 if (ret < 0)
5089 return ret;
5090 else if (ret)
5091 goto ident_done;
5092 }
5093
5094 if (!type->name)
5095 return -ENODEV;
5096
5097 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
5098 if (!chip->parameters.model)
5099 return -ENOMEM;
5100
5101 if (!type->pagesize)
5102 nand_manufacturer_detect(chip);
5103 else
5104 nand_decode_id(chip, type);
5105
5106 /* Get chip options */
5107 chip->options |= type->options;
5108
5109 memorg->eraseblocks_per_lun =
5110 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
5111 memorg->pagesize *
5112 memorg->pages_per_eraseblock);
5113
5114 ident_done:
5115 if (!mtd->name)
5116 mtd->name = chip->parameters.model;
5117
5118 if (chip->options & NAND_BUSWIDTH_AUTO) {
5119 WARN_ON(busw & NAND_BUSWIDTH_16);
5120 nand_set_defaults(chip);
5121 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5122 /*
5123 * Check, if buswidth is correct. Hardware drivers should set
5124 * chip correct!
5125 */
5126 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5127 maf_id, dev_id);
5128 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5129 mtd->name);
5130 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5131 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5132 ret = -EINVAL;
5133
5134 goto free_detect_allocation;
5135 }
5136
5137 nand_decode_bbm_options(chip);
5138
5139 /* Calculate the address shift from the page size */
5140 chip->page_shift = ffs(mtd->writesize) - 1;
5141 /* Convert chipsize to number of pages per chip -1 */
5142 targetsize = nanddev_target_size(&chip->base);
5143 chip->pagemask = (targetsize >> chip->page_shift) - 1;
5144
5145 chip->bbt_erase_shift = chip->phys_erase_shift =
5146 ffs(mtd->erasesize) - 1;
5147 if (targetsize & 0xffffffff)
5148 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5149 else {
5150 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5151 chip->chip_shift += 32 - 1;
5152 }
5153
5154 if (chip->chip_shift - chip->page_shift > 16)
5155 chip->options |= NAND_ROW_ADDR_3;
5156
5157 chip->badblockbits = 8;
5158
5159 nand_legacy_adjust_cmdfunc(chip);
5160
5161 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5162 maf_id, dev_id);
5163 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5164 chip->parameters.model);
5165 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5166 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5167 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5168 return 0;
5169
5170 free_detect_allocation:
5171 kfree(chip->parameters.model);
5172
5173 return ret;
5174 }
5175
5176 static enum nand_ecc_engine_type
of_get_rawnand_ecc_engine_type_legacy(struct device_node * np)5177 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
5178 {
5179 enum nand_ecc_legacy_mode {
5180 NAND_ECC_INVALID,
5181 NAND_ECC_NONE,
5182 NAND_ECC_SOFT,
5183 NAND_ECC_SOFT_BCH,
5184 NAND_ECC_HW,
5185 NAND_ECC_HW_SYNDROME,
5186 NAND_ECC_ON_DIE,
5187 };
5188 const char * const nand_ecc_legacy_modes[] = {
5189 [NAND_ECC_NONE] = "none",
5190 [NAND_ECC_SOFT] = "soft",
5191 [NAND_ECC_SOFT_BCH] = "soft_bch",
5192 [NAND_ECC_HW] = "hw",
5193 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5194 [NAND_ECC_ON_DIE] = "on-die",
5195 };
5196 enum nand_ecc_legacy_mode eng_type;
5197 const char *pm;
5198 int err;
5199
5200 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5201 if (err)
5202 return NAND_ECC_ENGINE_TYPE_INVALID;
5203
5204 for (eng_type = NAND_ECC_NONE;
5205 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
5206 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
5207 switch (eng_type) {
5208 case NAND_ECC_NONE:
5209 return NAND_ECC_ENGINE_TYPE_NONE;
5210 case NAND_ECC_SOFT:
5211 case NAND_ECC_SOFT_BCH:
5212 return NAND_ECC_ENGINE_TYPE_SOFT;
5213 case NAND_ECC_HW:
5214 case NAND_ECC_HW_SYNDROME:
5215 return NAND_ECC_ENGINE_TYPE_ON_HOST;
5216 case NAND_ECC_ON_DIE:
5217 return NAND_ECC_ENGINE_TYPE_ON_DIE;
5218 default:
5219 break;
5220 }
5221 }
5222 }
5223
5224 return NAND_ECC_ENGINE_TYPE_INVALID;
5225 }
5226
5227 static enum nand_ecc_placement
of_get_rawnand_ecc_placement_legacy(struct device_node * np)5228 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
5229 {
5230 const char *pm;
5231 int err;
5232
5233 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5234 if (!err) {
5235 if (!strcasecmp(pm, "hw_syndrome"))
5236 return NAND_ECC_PLACEMENT_INTERLEAVED;
5237 }
5238
5239 return NAND_ECC_PLACEMENT_UNKNOWN;
5240 }
5241
of_get_rawnand_ecc_algo_legacy(struct device_node * np)5242 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
5243 {
5244 const char *pm;
5245 int err;
5246
5247 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5248 if (!err) {
5249 if (!strcasecmp(pm, "soft"))
5250 return NAND_ECC_ALGO_HAMMING;
5251 else if (!strcasecmp(pm, "soft_bch"))
5252 return NAND_ECC_ALGO_BCH;
5253 }
5254
5255 return NAND_ECC_ALGO_UNKNOWN;
5256 }
5257
of_get_nand_ecc_legacy_user_config(struct nand_chip * chip)5258 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
5259 {
5260 struct device_node *dn = nand_get_flash_node(chip);
5261 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
5262
5263 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5264 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
5265
5266 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
5267 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
5268
5269 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
5270 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
5271 }
5272
of_get_nand_bus_width(struct nand_chip * chip)5273 static int of_get_nand_bus_width(struct nand_chip *chip)
5274 {
5275 struct device_node *dn = nand_get_flash_node(chip);
5276 u32 val;
5277 int ret;
5278
5279 ret = of_property_read_u32(dn, "nand-bus-width", &val);
5280 if (ret == -EINVAL)
5281 /* Buswidth defaults to 8 if the property does not exist .*/
5282 return 0;
5283 else if (ret)
5284 return ret;
5285
5286 if (val == 16)
5287 chip->options |= NAND_BUSWIDTH_16;
5288 else if (val != 8)
5289 return -EINVAL;
5290 return 0;
5291 }
5292
of_get_nand_secure_regions(struct nand_chip * chip)5293 static int of_get_nand_secure_regions(struct nand_chip *chip)
5294 {
5295 struct device_node *dn = nand_get_flash_node(chip);
5296 struct property *prop;
5297 int nr_elem, i, j;
5298
5299 /* Only proceed if the "secure-regions" property is present in DT */
5300 prop = of_find_property(dn, "secure-regions", NULL);
5301 if (!prop)
5302 return 0;
5303
5304 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
5305 if (nr_elem <= 0)
5306 return nr_elem;
5307
5308 chip->nr_secure_regions = nr_elem / 2;
5309 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
5310 GFP_KERNEL);
5311 if (!chip->secure_regions)
5312 return -ENOMEM;
5313
5314 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
5315 of_property_read_u64_index(dn, "secure-regions", j,
5316 &chip->secure_regions[i].offset);
5317 of_property_read_u64_index(dn, "secure-regions", j + 1,
5318 &chip->secure_regions[i].size);
5319 }
5320
5321 return 0;
5322 }
5323
5324 /**
5325 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
5326 * @dev: Device that will be parsed. Also used for managed allocations.
5327 * @cs_array: Array of GPIO desc pointers allocated on success
5328 * @ncs_array: Number of entries in @cs_array updated on success.
5329 * @return 0 on success, an error otherwise.
5330 */
rawnand_dt_parse_gpio_cs(struct device * dev,struct gpio_desc *** cs_array,unsigned int * ncs_array)5331 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
5332 unsigned int *ncs_array)
5333 {
5334 struct device_node *np = dev->of_node;
5335 struct gpio_desc **descs;
5336 int ndescs, i;
5337
5338 ndescs = of_gpio_named_count(np, "cs-gpios");
5339 if (ndescs < 0) {
5340 dev_dbg(dev, "No valid cs-gpios property\n");
5341 return 0;
5342 }
5343
5344 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
5345 if (!descs)
5346 return -ENOMEM;
5347
5348 for (i = 0; i < ndescs; i++) {
5349 descs[i] = gpiod_get_index_optional(dev, "cs", i,
5350 GPIOD_OUT_HIGH);
5351 if (IS_ERR(descs[i]))
5352 return PTR_ERR(descs[i]);
5353 }
5354
5355 *ncs_array = ndescs;
5356 *cs_array = descs;
5357
5358 return 0;
5359 }
5360 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
5361
rawnand_dt_init(struct nand_chip * chip)5362 static int rawnand_dt_init(struct nand_chip *chip)
5363 {
5364 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5365 struct device_node *dn = nand_get_flash_node(chip);
5366 int ret;
5367
5368 if (!dn)
5369 return 0;
5370
5371 ret = of_get_nand_bus_width(chip);
5372 if (ret)
5373 return ret;
5374
5375 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5376 chip->options |= NAND_IS_BOOT_MEDIUM;
5377
5378 if (of_property_read_bool(dn, "nand-on-flash-bbt"))
5379 chip->bbt_options |= NAND_BBT_USE_FLASH;
5380
5381 of_get_nand_ecc_user_config(nand);
5382 of_get_nand_ecc_legacy_user_config(chip);
5383
5384 /*
5385 * If neither the user nor the NAND controller have requested a specific
5386 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5387 */
5388 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5389
5390 /*
5391 * Use the user requested engine type, unless there is none, in this
5392 * case default to the NAND controller choice, otherwise fallback to
5393 * the raw NAND default one.
5394 */
5395 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5396 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5397 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5398 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5399
5400 chip->ecc.placement = nand->ecc.user_conf.placement;
5401 chip->ecc.algo = nand->ecc.user_conf.algo;
5402 chip->ecc.strength = nand->ecc.user_conf.strength;
5403 chip->ecc.size = nand->ecc.user_conf.step_size;
5404
5405 return 0;
5406 }
5407
5408 /**
5409 * nand_scan_ident - Scan for the NAND device
5410 * @chip: NAND chip object
5411 * @maxchips: number of chips to scan for
5412 * @table: alternative NAND ID table
5413 *
5414 * This is the first phase of the normal nand_scan() function. It reads the
5415 * flash ID and sets up MTD fields accordingly.
5416 *
5417 * This helper used to be called directly from controller drivers that needed
5418 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5419 * prevented dynamic allocations during this phase which was unconvenient and
5420 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5421 */
nand_scan_ident(struct nand_chip * chip,unsigned int maxchips,struct nand_flash_dev * table)5422 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5423 struct nand_flash_dev *table)
5424 {
5425 struct mtd_info *mtd = nand_to_mtd(chip);
5426 struct nand_memory_organization *memorg;
5427 int nand_maf_id, nand_dev_id;
5428 unsigned int i;
5429 int ret;
5430
5431 memorg = nanddev_get_memorg(&chip->base);
5432
5433 /* Assume all dies are deselected when we enter nand_scan_ident(). */
5434 chip->cur_cs = -1;
5435
5436 mutex_init(&chip->lock);
5437 init_waitqueue_head(&chip->resume_wq);
5438
5439 /* Enforce the right timings for reset/detection */
5440 chip->current_interface_config = nand_get_reset_interface_config();
5441
5442 ret = rawnand_dt_init(chip);
5443 if (ret)
5444 return ret;
5445
5446 if (!mtd->name && mtd->dev.parent)
5447 mtd->name = dev_name(mtd->dev.parent);
5448
5449 /* Set the default functions */
5450 nand_set_defaults(chip);
5451
5452 ret = nand_legacy_check_hooks(chip);
5453 if (ret)
5454 return ret;
5455
5456 memorg->ntargets = maxchips;
5457
5458 /* Read the flash type */
5459 ret = nand_detect(chip, table);
5460 if (ret) {
5461 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5462 pr_warn("No NAND device found\n");
5463 nand_deselect_target(chip);
5464 return ret;
5465 }
5466
5467 nand_maf_id = chip->id.data[0];
5468 nand_dev_id = chip->id.data[1];
5469
5470 nand_deselect_target(chip);
5471
5472 /* Check for a chip array */
5473 for (i = 1; i < maxchips; i++) {
5474 u8 id[2];
5475
5476 /* See comment in nand_get_flash_type for reset */
5477 ret = nand_reset(chip, i);
5478 if (ret)
5479 break;
5480
5481 nand_select_target(chip, i);
5482 /* Send the command for reading device ID */
5483 ret = nand_readid_op(chip, 0, id, sizeof(id));
5484 if (ret)
5485 break;
5486 /* Read manufacturer and device IDs */
5487 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5488 nand_deselect_target(chip);
5489 break;
5490 }
5491 nand_deselect_target(chip);
5492 }
5493 if (i > 1)
5494 pr_info("%d chips detected\n", i);
5495
5496 /* Store the number of chips and calc total size for mtd */
5497 memorg->ntargets = i;
5498 mtd->size = i * nanddev_target_size(&chip->base);
5499
5500 return 0;
5501 }
5502
nand_scan_ident_cleanup(struct nand_chip * chip)5503 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5504 {
5505 kfree(chip->parameters.model);
5506 kfree(chip->parameters.onfi);
5507 }
5508
rawnand_sw_hamming_init(struct nand_chip * chip)5509 int rawnand_sw_hamming_init(struct nand_chip *chip)
5510 {
5511 struct nand_ecc_sw_hamming_conf *engine_conf;
5512 struct nand_device *base = &chip->base;
5513 int ret;
5514
5515 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5516 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5517 base->ecc.user_conf.strength = chip->ecc.strength;
5518 base->ecc.user_conf.step_size = chip->ecc.size;
5519
5520 ret = nand_ecc_sw_hamming_init_ctx(base);
5521 if (ret)
5522 return ret;
5523
5524 engine_conf = base->ecc.ctx.priv;
5525
5526 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5527 engine_conf->sm_order = true;
5528
5529 chip->ecc.size = base->ecc.ctx.conf.step_size;
5530 chip->ecc.strength = base->ecc.ctx.conf.strength;
5531 chip->ecc.total = base->ecc.ctx.total;
5532 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5533 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5534
5535 return 0;
5536 }
5537 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5538
rawnand_sw_hamming_calculate(struct nand_chip * chip,const unsigned char * buf,unsigned char * code)5539 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5540 const unsigned char *buf,
5541 unsigned char *code)
5542 {
5543 struct nand_device *base = &chip->base;
5544
5545 return nand_ecc_sw_hamming_calculate(base, buf, code);
5546 }
5547 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5548
rawnand_sw_hamming_correct(struct nand_chip * chip,unsigned char * buf,unsigned char * read_ecc,unsigned char * calc_ecc)5549 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5550 unsigned char *buf,
5551 unsigned char *read_ecc,
5552 unsigned char *calc_ecc)
5553 {
5554 struct nand_device *base = &chip->base;
5555
5556 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5557 }
5558 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5559
rawnand_sw_hamming_cleanup(struct nand_chip * chip)5560 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5561 {
5562 struct nand_device *base = &chip->base;
5563
5564 nand_ecc_sw_hamming_cleanup_ctx(base);
5565 }
5566 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5567
rawnand_sw_bch_init(struct nand_chip * chip)5568 int rawnand_sw_bch_init(struct nand_chip *chip)
5569 {
5570 struct nand_device *base = &chip->base;
5571 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
5572 int ret;
5573
5574 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5575 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5576 base->ecc.user_conf.step_size = chip->ecc.size;
5577 base->ecc.user_conf.strength = chip->ecc.strength;
5578
5579 ret = nand_ecc_sw_bch_init_ctx(base);
5580 if (ret)
5581 return ret;
5582
5583 chip->ecc.size = ecc_conf->step_size;
5584 chip->ecc.strength = ecc_conf->strength;
5585 chip->ecc.total = base->ecc.ctx.total;
5586 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5587 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5588
5589 return 0;
5590 }
5591 EXPORT_SYMBOL(rawnand_sw_bch_init);
5592
rawnand_sw_bch_calculate(struct nand_chip * chip,const unsigned char * buf,unsigned char * code)5593 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5594 const unsigned char *buf,
5595 unsigned char *code)
5596 {
5597 struct nand_device *base = &chip->base;
5598
5599 return nand_ecc_sw_bch_calculate(base, buf, code);
5600 }
5601
rawnand_sw_bch_correct(struct nand_chip * chip,unsigned char * buf,unsigned char * read_ecc,unsigned char * calc_ecc)5602 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5603 unsigned char *read_ecc, unsigned char *calc_ecc)
5604 {
5605 struct nand_device *base = &chip->base;
5606
5607 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5608 }
5609 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5610
rawnand_sw_bch_cleanup(struct nand_chip * chip)5611 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5612 {
5613 struct nand_device *base = &chip->base;
5614
5615 nand_ecc_sw_bch_cleanup_ctx(base);
5616 }
5617 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5618
nand_set_ecc_on_host_ops(struct nand_chip * chip)5619 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5620 {
5621 struct nand_ecc_ctrl *ecc = &chip->ecc;
5622
5623 switch (ecc->placement) {
5624 case NAND_ECC_PLACEMENT_UNKNOWN:
5625 case NAND_ECC_PLACEMENT_OOB:
5626 /* Use standard hwecc read page function? */
5627 if (!ecc->read_page)
5628 ecc->read_page = nand_read_page_hwecc;
5629 if (!ecc->write_page)
5630 ecc->write_page = nand_write_page_hwecc;
5631 if (!ecc->read_page_raw)
5632 ecc->read_page_raw = nand_read_page_raw;
5633 if (!ecc->write_page_raw)
5634 ecc->write_page_raw = nand_write_page_raw;
5635 if (!ecc->read_oob)
5636 ecc->read_oob = nand_read_oob_std;
5637 if (!ecc->write_oob)
5638 ecc->write_oob = nand_write_oob_std;
5639 if (!ecc->read_subpage)
5640 ecc->read_subpage = nand_read_subpage;
5641 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5642 ecc->write_subpage = nand_write_subpage_hwecc;
5643 fallthrough;
5644
5645 case NAND_ECC_PLACEMENT_INTERLEAVED:
5646 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5647 (!ecc->read_page ||
5648 ecc->read_page == nand_read_page_hwecc ||
5649 !ecc->write_page ||
5650 ecc->write_page == nand_write_page_hwecc)) {
5651 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5652 return -EINVAL;
5653 }
5654 /* Use standard syndrome read/write page function? */
5655 if (!ecc->read_page)
5656 ecc->read_page = nand_read_page_syndrome;
5657 if (!ecc->write_page)
5658 ecc->write_page = nand_write_page_syndrome;
5659 if (!ecc->read_page_raw)
5660 ecc->read_page_raw = nand_read_page_raw_syndrome;
5661 if (!ecc->write_page_raw)
5662 ecc->write_page_raw = nand_write_page_raw_syndrome;
5663 if (!ecc->read_oob)
5664 ecc->read_oob = nand_read_oob_syndrome;
5665 if (!ecc->write_oob)
5666 ecc->write_oob = nand_write_oob_syndrome;
5667 break;
5668
5669 default:
5670 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5671 ecc->placement);
5672 return -EINVAL;
5673 }
5674
5675 return 0;
5676 }
5677
nand_set_ecc_soft_ops(struct nand_chip * chip)5678 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5679 {
5680 struct mtd_info *mtd = nand_to_mtd(chip);
5681 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5682 struct nand_ecc_ctrl *ecc = &chip->ecc;
5683 int ret;
5684
5685 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5686 return -EINVAL;
5687
5688 switch (ecc->algo) {
5689 case NAND_ECC_ALGO_HAMMING:
5690 ecc->calculate = rawnand_sw_hamming_calculate;
5691 ecc->correct = rawnand_sw_hamming_correct;
5692 ecc->read_page = nand_read_page_swecc;
5693 ecc->read_subpage = nand_read_subpage;
5694 ecc->write_page = nand_write_page_swecc;
5695 if (!ecc->read_page_raw)
5696 ecc->read_page_raw = nand_read_page_raw;
5697 if (!ecc->write_page_raw)
5698 ecc->write_page_raw = nand_write_page_raw;
5699 ecc->read_oob = nand_read_oob_std;
5700 ecc->write_oob = nand_write_oob_std;
5701 if (!ecc->size)
5702 ecc->size = 256;
5703 ecc->bytes = 3;
5704 ecc->strength = 1;
5705
5706 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5707 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5708
5709 ret = rawnand_sw_hamming_init(chip);
5710 if (ret) {
5711 WARN(1, "Hamming ECC initialization failed!\n");
5712 return ret;
5713 }
5714
5715 return 0;
5716 case NAND_ECC_ALGO_BCH:
5717 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5718 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5719 return -EINVAL;
5720 }
5721 ecc->calculate = rawnand_sw_bch_calculate;
5722 ecc->correct = rawnand_sw_bch_correct;
5723 ecc->read_page = nand_read_page_swecc;
5724 ecc->read_subpage = nand_read_subpage;
5725 ecc->write_page = nand_write_page_swecc;
5726 if (!ecc->read_page_raw)
5727 ecc->read_page_raw = nand_read_page_raw;
5728 if (!ecc->write_page_raw)
5729 ecc->write_page_raw = nand_write_page_raw;
5730 ecc->read_oob = nand_read_oob_std;
5731 ecc->write_oob = nand_write_oob_std;
5732
5733 /*
5734 * We can only maximize ECC config when the default layout is
5735 * used, otherwise we don't know how many bytes can really be
5736 * used.
5737 */
5738 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5739 mtd->ooblayout != nand_get_large_page_ooblayout())
5740 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5741
5742 ret = rawnand_sw_bch_init(chip);
5743 if (ret) {
5744 WARN(1, "BCH ECC initialization failed!\n");
5745 return ret;
5746 }
5747
5748 return 0;
5749 default:
5750 WARN(1, "Unsupported ECC algorithm!\n");
5751 return -EINVAL;
5752 }
5753 }
5754
5755 /**
5756 * nand_check_ecc_caps - check the sanity of preset ECC settings
5757 * @chip: nand chip info structure
5758 * @caps: ECC caps info structure
5759 * @oobavail: OOB size that the ECC engine can use
5760 *
5761 * When ECC step size and strength are already set, check if they are supported
5762 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5763 * On success, the calculated ECC bytes is set.
5764 */
5765 static int
nand_check_ecc_caps(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5766 nand_check_ecc_caps(struct nand_chip *chip,
5767 const struct nand_ecc_caps *caps, int oobavail)
5768 {
5769 struct mtd_info *mtd = nand_to_mtd(chip);
5770 const struct nand_ecc_step_info *stepinfo;
5771 int preset_step = chip->ecc.size;
5772 int preset_strength = chip->ecc.strength;
5773 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5774 int i, j;
5775
5776 for (i = 0; i < caps->nstepinfos; i++) {
5777 stepinfo = &caps->stepinfos[i];
5778
5779 if (stepinfo->stepsize != preset_step)
5780 continue;
5781
5782 for (j = 0; j < stepinfo->nstrengths; j++) {
5783 if (stepinfo->strengths[j] != preset_strength)
5784 continue;
5785
5786 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5787 preset_strength);
5788 if (WARN_ON_ONCE(ecc_bytes < 0))
5789 return ecc_bytes;
5790
5791 if (ecc_bytes * nsteps > oobavail) {
5792 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5793 preset_step, preset_strength);
5794 return -ENOSPC;
5795 }
5796
5797 chip->ecc.bytes = ecc_bytes;
5798
5799 return 0;
5800 }
5801 }
5802
5803 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5804 preset_step, preset_strength);
5805
5806 return -ENOTSUPP;
5807 }
5808
5809 /**
5810 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5811 * @chip: nand chip info structure
5812 * @caps: ECC engine caps info structure
5813 * @oobavail: OOB size that the ECC engine can use
5814 *
5815 * If a chip's ECC requirement is provided, try to meet it with the least
5816 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5817 * On success, the chosen ECC settings are set.
5818 */
5819 static int
nand_match_ecc_req(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5820 nand_match_ecc_req(struct nand_chip *chip,
5821 const struct nand_ecc_caps *caps, int oobavail)
5822 {
5823 const struct nand_ecc_props *requirements =
5824 nanddev_get_ecc_requirements(&chip->base);
5825 struct mtd_info *mtd = nand_to_mtd(chip);
5826 const struct nand_ecc_step_info *stepinfo;
5827 int req_step = requirements->step_size;
5828 int req_strength = requirements->strength;
5829 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5830 int best_step, best_strength, best_ecc_bytes;
5831 int best_ecc_bytes_total = INT_MAX;
5832 int i, j;
5833
5834 /* No information provided by the NAND chip */
5835 if (!req_step || !req_strength)
5836 return -ENOTSUPP;
5837
5838 /* number of correctable bits the chip requires in a page */
5839 req_corr = mtd->writesize / req_step * req_strength;
5840
5841 for (i = 0; i < caps->nstepinfos; i++) {
5842 stepinfo = &caps->stepinfos[i];
5843 step_size = stepinfo->stepsize;
5844
5845 for (j = 0; j < stepinfo->nstrengths; j++) {
5846 strength = stepinfo->strengths[j];
5847
5848 /*
5849 * If both step size and strength are smaller than the
5850 * chip's requirement, it is not easy to compare the
5851 * resulted reliability.
5852 */
5853 if (step_size < req_step && strength < req_strength)
5854 continue;
5855
5856 if (mtd->writesize % step_size)
5857 continue;
5858
5859 nsteps = mtd->writesize / step_size;
5860
5861 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5862 if (WARN_ON_ONCE(ecc_bytes < 0))
5863 continue;
5864 ecc_bytes_total = ecc_bytes * nsteps;
5865
5866 if (ecc_bytes_total > oobavail ||
5867 strength * nsteps < req_corr)
5868 continue;
5869
5870 /*
5871 * We assume the best is to meet the chip's requrement
5872 * with the least number of ECC bytes.
5873 */
5874 if (ecc_bytes_total < best_ecc_bytes_total) {
5875 best_ecc_bytes_total = ecc_bytes_total;
5876 best_step = step_size;
5877 best_strength = strength;
5878 best_ecc_bytes = ecc_bytes;
5879 }
5880 }
5881 }
5882
5883 if (best_ecc_bytes_total == INT_MAX)
5884 return -ENOTSUPP;
5885
5886 chip->ecc.size = best_step;
5887 chip->ecc.strength = best_strength;
5888 chip->ecc.bytes = best_ecc_bytes;
5889
5890 return 0;
5891 }
5892
5893 /**
5894 * nand_maximize_ecc - choose the max ECC strength available
5895 * @chip: nand chip info structure
5896 * @caps: ECC engine caps info structure
5897 * @oobavail: OOB size that the ECC engine can use
5898 *
5899 * Choose the max ECC strength that is supported on the controller, and can fit
5900 * within the chip's OOB. On success, the chosen ECC settings are set.
5901 */
5902 static int
nand_maximize_ecc(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5903 nand_maximize_ecc(struct nand_chip *chip,
5904 const struct nand_ecc_caps *caps, int oobavail)
5905 {
5906 struct mtd_info *mtd = nand_to_mtd(chip);
5907 const struct nand_ecc_step_info *stepinfo;
5908 int step_size, strength, nsteps, ecc_bytes, corr;
5909 int best_corr = 0;
5910 int best_step = 0;
5911 int best_strength, best_ecc_bytes;
5912 int i, j;
5913
5914 for (i = 0; i < caps->nstepinfos; i++) {
5915 stepinfo = &caps->stepinfos[i];
5916 step_size = stepinfo->stepsize;
5917
5918 /* If chip->ecc.size is already set, respect it */
5919 if (chip->ecc.size && step_size != chip->ecc.size)
5920 continue;
5921
5922 for (j = 0; j < stepinfo->nstrengths; j++) {
5923 strength = stepinfo->strengths[j];
5924
5925 if (mtd->writesize % step_size)
5926 continue;
5927
5928 nsteps = mtd->writesize / step_size;
5929
5930 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5931 if (WARN_ON_ONCE(ecc_bytes < 0))
5932 continue;
5933
5934 if (ecc_bytes * nsteps > oobavail)
5935 continue;
5936
5937 corr = strength * nsteps;
5938
5939 /*
5940 * If the number of correctable bits is the same,
5941 * bigger step_size has more reliability.
5942 */
5943 if (corr > best_corr ||
5944 (corr == best_corr && step_size > best_step)) {
5945 best_corr = corr;
5946 best_step = step_size;
5947 best_strength = strength;
5948 best_ecc_bytes = ecc_bytes;
5949 }
5950 }
5951 }
5952
5953 if (!best_corr)
5954 return -ENOTSUPP;
5955
5956 chip->ecc.size = best_step;
5957 chip->ecc.strength = best_strength;
5958 chip->ecc.bytes = best_ecc_bytes;
5959
5960 return 0;
5961 }
5962
5963 /**
5964 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5965 * @chip: nand chip info structure
5966 * @caps: ECC engine caps info structure
5967 * @oobavail: OOB size that the ECC engine can use
5968 *
5969 * Choose the ECC configuration according to following logic.
5970 *
5971 * 1. If both ECC step size and ECC strength are already set (usually by DT)
5972 * then check if it is supported by this controller.
5973 * 2. If the user provided the nand-ecc-maximize property, then select maximum
5974 * ECC strength.
5975 * 3. Otherwise, try to match the ECC step size and ECC strength closest
5976 * to the chip's requirement. If available OOB size can't fit the chip
5977 * requirement then fallback to the maximum ECC step size and ECC strength.
5978 *
5979 * On success, the chosen ECC settings are set.
5980 */
nand_ecc_choose_conf(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5981 int nand_ecc_choose_conf(struct nand_chip *chip,
5982 const struct nand_ecc_caps *caps, int oobavail)
5983 {
5984 struct mtd_info *mtd = nand_to_mtd(chip);
5985 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5986
5987 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5988 return -EINVAL;
5989
5990 if (chip->ecc.size && chip->ecc.strength)
5991 return nand_check_ecc_caps(chip, caps, oobavail);
5992
5993 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5994 return nand_maximize_ecc(chip, caps, oobavail);
5995
5996 if (!nand_match_ecc_req(chip, caps, oobavail))
5997 return 0;
5998
5999 return nand_maximize_ecc(chip, caps, oobavail);
6000 }
6001 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
6002
rawnand_erase(struct nand_device * nand,const struct nand_pos * pos)6003 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
6004 {
6005 struct nand_chip *chip = container_of(nand, struct nand_chip,
6006 base);
6007 unsigned int eb = nanddev_pos_to_row(nand, pos);
6008 int ret;
6009
6010 eb >>= nand->rowconv.eraseblock_addr_shift;
6011
6012 nand_select_target(chip, pos->target);
6013 ret = nand_erase_op(chip, eb);
6014 nand_deselect_target(chip);
6015
6016 return ret;
6017 }
6018
rawnand_markbad(struct nand_device * nand,const struct nand_pos * pos)6019 static int rawnand_markbad(struct nand_device *nand,
6020 const struct nand_pos *pos)
6021 {
6022 struct nand_chip *chip = container_of(nand, struct nand_chip,
6023 base);
6024
6025 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6026 }
6027
rawnand_isbad(struct nand_device * nand,const struct nand_pos * pos)6028 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
6029 {
6030 struct nand_chip *chip = container_of(nand, struct nand_chip,
6031 base);
6032 int ret;
6033
6034 nand_select_target(chip, pos->target);
6035 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6036 nand_deselect_target(chip);
6037
6038 return ret;
6039 }
6040
6041 static const struct nand_ops rawnand_ops = {
6042 .erase = rawnand_erase,
6043 .markbad = rawnand_markbad,
6044 .isbad = rawnand_isbad,
6045 };
6046
6047 /**
6048 * nand_scan_tail - Scan for the NAND device
6049 * @chip: NAND chip object
6050 *
6051 * This is the second phase of the normal nand_scan() function. It fills out
6052 * all the uninitialized function pointers with the defaults and scans for a
6053 * bad block table if appropriate.
6054 */
nand_scan_tail(struct nand_chip * chip)6055 static int nand_scan_tail(struct nand_chip *chip)
6056 {
6057 struct mtd_info *mtd = nand_to_mtd(chip);
6058 struct nand_ecc_ctrl *ecc = &chip->ecc;
6059 int ret, i;
6060
6061 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6062 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6063 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6064 return -EINVAL;
6065 }
6066
6067 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6068 if (!chip->data_buf)
6069 return -ENOMEM;
6070
6071 /*
6072 * FIXME: some NAND manufacturer drivers expect the first die to be
6073 * selected when manufacturer->init() is called. They should be fixed
6074 * to explictly select the relevant die when interacting with the NAND
6075 * chip.
6076 */
6077 nand_select_target(chip, 0);
6078 ret = nand_manufacturer_init(chip);
6079 nand_deselect_target(chip);
6080 if (ret)
6081 goto err_free_buf;
6082
6083 /* Set the internal oob buffer location, just after the page data */
6084 chip->oob_poi = chip->data_buf + mtd->writesize;
6085
6086 /*
6087 * If no default placement scheme is given, select an appropriate one.
6088 */
6089 if (!mtd->ooblayout &&
6090 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6091 ecc->algo == NAND_ECC_ALGO_BCH) &&
6092 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6093 ecc->algo == NAND_ECC_ALGO_HAMMING)) {
6094 switch (mtd->oobsize) {
6095 case 8:
6096 case 16:
6097 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
6098 break;
6099 case 64:
6100 case 128:
6101 mtd_set_ooblayout(mtd,
6102 nand_get_large_page_hamming_ooblayout());
6103 break;
6104 default:
6105 /*
6106 * Expose the whole OOB area to users if ECC_NONE
6107 * is passed. We could do that for all kind of
6108 * ->oobsize, but we must keep the old large/small
6109 * page with ECC layout when ->oobsize <= 128 for
6110 * compatibility reasons.
6111 */
6112 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
6113 mtd_set_ooblayout(mtd,
6114 nand_get_large_page_ooblayout());
6115 break;
6116 }
6117
6118 WARN(1, "No oob scheme defined for oobsize %d\n",
6119 mtd->oobsize);
6120 ret = -EINVAL;
6121 goto err_nand_manuf_cleanup;
6122 }
6123 }
6124
6125 /*
6126 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6127 * selected and we have 256 byte pagesize fallback to software ECC
6128 */
6129
6130 switch (ecc->engine_type) {
6131 case NAND_ECC_ENGINE_TYPE_ON_HOST:
6132 ret = nand_set_ecc_on_host_ops(chip);
6133 if (ret)
6134 goto err_nand_manuf_cleanup;
6135
6136 if (mtd->writesize >= ecc->size) {
6137 if (!ecc->strength) {
6138 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6139 ret = -EINVAL;
6140 goto err_nand_manuf_cleanup;
6141 }
6142 break;
6143 }
6144 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6145 ecc->size, mtd->writesize);
6146 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
6147 ecc->algo = NAND_ECC_ALGO_HAMMING;
6148 fallthrough;
6149
6150 case NAND_ECC_ENGINE_TYPE_SOFT:
6151 ret = nand_set_ecc_soft_ops(chip);
6152 if (ret)
6153 goto err_nand_manuf_cleanup;
6154 break;
6155
6156 case NAND_ECC_ENGINE_TYPE_ON_DIE:
6157 if (!ecc->read_page || !ecc->write_page) {
6158 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6159 ret = -EINVAL;
6160 goto err_nand_manuf_cleanup;
6161 }
6162 if (!ecc->read_oob)
6163 ecc->read_oob = nand_read_oob_std;
6164 if (!ecc->write_oob)
6165 ecc->write_oob = nand_write_oob_std;
6166 break;
6167
6168 case NAND_ECC_ENGINE_TYPE_NONE:
6169 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
6170 ecc->read_page = nand_read_page_raw;
6171 ecc->write_page = nand_write_page_raw;
6172 ecc->read_oob = nand_read_oob_std;
6173 ecc->read_page_raw = nand_read_page_raw;
6174 ecc->write_page_raw = nand_write_page_raw;
6175 ecc->write_oob = nand_write_oob_std;
6176 ecc->size = mtd->writesize;
6177 ecc->bytes = 0;
6178 ecc->strength = 0;
6179 break;
6180
6181 default:
6182 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
6183 ret = -EINVAL;
6184 goto err_nand_manuf_cleanup;
6185 }
6186
6187 if (ecc->correct || ecc->calculate) {
6188 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6189 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6190 if (!ecc->calc_buf || !ecc->code_buf) {
6191 ret = -ENOMEM;
6192 goto err_nand_manuf_cleanup;
6193 }
6194 }
6195
6196 /* For many systems, the standard OOB write also works for raw */
6197 if (!ecc->read_oob_raw)
6198 ecc->read_oob_raw = ecc->read_oob;
6199 if (!ecc->write_oob_raw)
6200 ecc->write_oob_raw = ecc->write_oob;
6201
6202 /* propagate ecc info to mtd_info */
6203 mtd->ecc_strength = ecc->strength;
6204 mtd->ecc_step_size = ecc->size;
6205
6206 /*
6207 * Set the number of read / write steps for one page depending on ECC
6208 * mode.
6209 */
6210 if (!ecc->steps)
6211 ecc->steps = mtd->writesize / ecc->size;
6212 if (ecc->steps * ecc->size != mtd->writesize) {
6213 WARN(1, "Invalid ECC parameters\n");
6214 ret = -EINVAL;
6215 goto err_nand_manuf_cleanup;
6216 }
6217
6218 if (!ecc->total) {
6219 ecc->total = ecc->steps * ecc->bytes;
6220 chip->base.ecc.ctx.total = ecc->total;
6221 }
6222
6223 if (ecc->total > mtd->oobsize) {
6224 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6225 ret = -EINVAL;
6226 goto err_nand_manuf_cleanup;
6227 }
6228
6229 /*
6230 * The number of bytes available for a client to place data into
6231 * the out of band area.
6232 */
6233 ret = mtd_ooblayout_count_freebytes(mtd);
6234 if (ret < 0)
6235 ret = 0;
6236
6237 mtd->oobavail = ret;
6238
6239 /* ECC sanity check: warn if it's too weak */
6240 if (!nand_ecc_is_strong_enough(&chip->base))
6241 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
6242 mtd->name, chip->ecc.strength, chip->ecc.size,
6243 nanddev_get_ecc_requirements(&chip->base)->strength,
6244 nanddev_get_ecc_requirements(&chip->base)->step_size);
6245
6246 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6247 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6248 switch (ecc->steps) {
6249 case 2:
6250 mtd->subpage_sft = 1;
6251 break;
6252 case 4:
6253 case 8:
6254 case 16:
6255 mtd->subpage_sft = 2;
6256 break;
6257 }
6258 }
6259 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6260
6261 /* Invalidate the pagebuffer reference */
6262 chip->pagecache.page = -1;
6263
6264 /* Large page NAND with SOFT_ECC should support subpage reads */
6265 switch (ecc->engine_type) {
6266 case NAND_ECC_ENGINE_TYPE_SOFT:
6267 if (chip->page_shift > 9)
6268 chip->options |= NAND_SUBPAGE_READ;
6269 break;
6270
6271 default:
6272 break;
6273 }
6274
6275 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
6276 if (ret)
6277 goto err_nand_manuf_cleanup;
6278
6279 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
6280 if (chip->options & NAND_ROM)
6281 mtd->flags = MTD_CAP_ROM;
6282
6283 /* Fill in remaining MTD driver data */
6284 mtd->_erase = nand_erase;
6285 mtd->_point = NULL;
6286 mtd->_unpoint = NULL;
6287 mtd->_panic_write = panic_nand_write;
6288 mtd->_read_oob = nand_read_oob;
6289 mtd->_write_oob = nand_write_oob;
6290 mtd->_sync = nand_sync;
6291 mtd->_lock = nand_lock;
6292 mtd->_unlock = nand_unlock;
6293 mtd->_suspend = nand_suspend;
6294 mtd->_resume = nand_resume;
6295 mtd->_reboot = nand_shutdown;
6296 mtd->_block_isreserved = nand_block_isreserved;
6297 mtd->_block_isbad = nand_block_isbad;
6298 mtd->_block_markbad = nand_block_markbad;
6299 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
6300
6301 /*
6302 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6303 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6304 * properly set.
6305 */
6306 if (!mtd->bitflip_threshold)
6307 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6308
6309 /* Find the fastest data interface for this chip */
6310 ret = nand_choose_interface_config(chip);
6311 if (ret)
6312 goto err_nanddev_cleanup;
6313
6314 /* Enter fastest possible mode on all dies. */
6315 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6316 ret = nand_setup_interface(chip, i);
6317 if (ret)
6318 goto err_free_interface_config;
6319 }
6320
6321 /*
6322 * Look for secure regions in the NAND chip. These regions are supposed
6323 * to be protected by a secure element like Trustzone. So the read/write
6324 * accesses to these regions will be blocked in the runtime by this
6325 * driver.
6326 */
6327 ret = of_get_nand_secure_regions(chip);
6328 if (ret)
6329 goto err_free_interface_config;
6330
6331 /* Check, if we should skip the bad block table scan */
6332 if (chip->options & NAND_SKIP_BBTSCAN)
6333 return 0;
6334
6335 /* Build bad block table */
6336 ret = nand_create_bbt(chip);
6337 if (ret)
6338 goto err_free_secure_regions;
6339
6340 return 0;
6341
6342 err_free_secure_regions:
6343 kfree(chip->secure_regions);
6344
6345 err_free_interface_config:
6346 kfree(chip->best_interface_config);
6347
6348 err_nanddev_cleanup:
6349 nanddev_cleanup(&chip->base);
6350
6351 err_nand_manuf_cleanup:
6352 nand_manufacturer_cleanup(chip);
6353
6354 err_free_buf:
6355 kfree(chip->data_buf);
6356 kfree(ecc->code_buf);
6357 kfree(ecc->calc_buf);
6358
6359 return ret;
6360 }
6361
nand_attach(struct nand_chip * chip)6362 static int nand_attach(struct nand_chip *chip)
6363 {
6364 if (chip->controller->ops && chip->controller->ops->attach_chip)
6365 return chip->controller->ops->attach_chip(chip);
6366
6367 return 0;
6368 }
6369
nand_detach(struct nand_chip * chip)6370 static void nand_detach(struct nand_chip *chip)
6371 {
6372 if (chip->controller->ops && chip->controller->ops->detach_chip)
6373 chip->controller->ops->detach_chip(chip);
6374 }
6375
6376 /**
6377 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6378 * @chip: NAND chip object
6379 * @maxchips: number of chips to scan for.
6380 * @ids: optional flash IDs table
6381 *
6382 * This fills out all the uninitialized function pointers with the defaults.
6383 * The flash ID is read and the mtd/chip structures are filled with the
6384 * appropriate values.
6385 */
nand_scan_with_ids(struct nand_chip * chip,unsigned int maxchips,struct nand_flash_dev * ids)6386 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6387 struct nand_flash_dev *ids)
6388 {
6389 int ret;
6390
6391 if (!maxchips)
6392 return -EINVAL;
6393
6394 ret = nand_scan_ident(chip, maxchips, ids);
6395 if (ret)
6396 return ret;
6397
6398 ret = nand_attach(chip);
6399 if (ret)
6400 goto cleanup_ident;
6401
6402 ret = nand_scan_tail(chip);
6403 if (ret)
6404 goto detach_chip;
6405
6406 return 0;
6407
6408 detach_chip:
6409 nand_detach(chip);
6410 cleanup_ident:
6411 nand_scan_ident_cleanup(chip);
6412
6413 return ret;
6414 }
6415 EXPORT_SYMBOL(nand_scan_with_ids);
6416
6417 /**
6418 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6419 * @chip: NAND chip object
6420 */
nand_cleanup(struct nand_chip * chip)6421 void nand_cleanup(struct nand_chip *chip)
6422 {
6423 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6424 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6425 rawnand_sw_hamming_cleanup(chip);
6426 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6427 rawnand_sw_bch_cleanup(chip);
6428 }
6429
6430 nanddev_cleanup(&chip->base);
6431
6432 /* Free secure regions data */
6433 kfree(chip->secure_regions);
6434
6435 /* Free bad block table memory */
6436 kfree(chip->bbt);
6437 kfree(chip->data_buf);
6438 kfree(chip->ecc.code_buf);
6439 kfree(chip->ecc.calc_buf);
6440
6441 /* Free bad block descriptor memory */
6442 if (chip->badblock_pattern && chip->badblock_pattern->options
6443 & NAND_BBT_DYNAMICSTRUCT)
6444 kfree(chip->badblock_pattern);
6445
6446 /* Free the data interface */
6447 kfree(chip->best_interface_config);
6448
6449 /* Free manufacturer priv data. */
6450 nand_manufacturer_cleanup(chip);
6451
6452 /* Free controller specific allocations after chip identification */
6453 nand_detach(chip);
6454
6455 /* Free identification phase allocations */
6456 nand_scan_ident_cleanup(chip);
6457 }
6458
6459 EXPORT_SYMBOL_GPL(nand_cleanup);
6460
6461 MODULE_LICENSE("GPL");
6462 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6463 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6464 MODULE_DESCRIPTION("Generic NAND flash driver code");
6465