1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34
35 #include "mtdcore.h"
36
37 struct backing_dev_info *mtd_bdi;
38
39 #ifdef CONFIG_PM_SLEEP
40
mtd_cls_suspend(struct device * dev)41 static int mtd_cls_suspend(struct device *dev)
42 {
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46 }
47
mtd_cls_resume(struct device * dev)48 static int mtd_cls_resume(struct device *dev)
49 {
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55 }
56
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62
63 static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67 };
68
69 static DEFINE_IDR(mtd_idr);
70
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72 should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
__mtd_next_device(int i)76 struct mtd_info *__mtd_next_device(int i)
77 {
78 return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82 static LIST_HEAD(mtd_notifiers);
83
84
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88 * the mtd_info will probably want to use the release() hook...
89 */
mtd_release(struct device * dev)90 static void mtd_release(struct device *dev)
91 {
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95 /* remove /dev/mtdXro node */
96 device_destroy(&mtd_class, index + 1);
97 }
98
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)105 static ssize_t mtd_type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107 {
108 struct mtd_info *mtd = dev_get_drvdata(dev);
109 char *type;
110
111 switch (mtd->type) {
112 case MTD_ABSENT:
113 type = "absent";
114 break;
115 case MTD_RAM:
116 type = "ram";
117 break;
118 case MTD_ROM:
119 type = "rom";
120 break;
121 case MTD_NORFLASH:
122 type = "nor";
123 break;
124 case MTD_NANDFLASH:
125 type = "nand";
126 break;
127 case MTD_DATAFLASH:
128 type = "dataflash";
129 break;
130 case MTD_UBIVOLUME:
131 type = "ubi";
132 break;
133 case MTD_MLCNANDFLASH:
134 type = "mlc-nand";
135 break;
136 default:
137 type = "unknown";
138 }
139
140 return sysfs_emit(buf, "%s\n", type);
141 }
142 MTD_DEVICE_ATTR_RO(type);
143
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)144 static ssize_t mtd_flags_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146 {
147 struct mtd_info *mtd = dev_get_drvdata(dev);
148
149 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150 }
151 MTD_DEVICE_ATTR_RO(flags);
152
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t mtd_size_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155 {
156 struct mtd_info *mtd = dev_get_drvdata(dev);
157
158 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159 }
160 MTD_DEVICE_ATTR_RO(size);
161
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)162 static ssize_t mtd_erasesize_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164 {
165 struct mtd_info *mtd = dev_get_drvdata(dev);
166
167 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168 }
169 MTD_DEVICE_ATTR_RO(erasesize);
170
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)171 static ssize_t mtd_writesize_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
173 {
174 struct mtd_info *mtd = dev_get_drvdata(dev);
175
176 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177 }
178 MTD_DEVICE_ATTR_RO(writesize);
179
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
182 {
183 struct mtd_info *mtd = dev_get_drvdata(dev);
184 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185
186 return sysfs_emit(buf, "%u\n", subpagesize);
187 }
188 MTD_DEVICE_ATTR_RO(subpagesize);
189
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)190 static ssize_t mtd_oobsize_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192 {
193 struct mtd_info *mtd = dev_get_drvdata(dev);
194
195 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196 }
197 MTD_DEVICE_ATTR_RO(oobsize);
198
mtd_oobavail_show(struct device * dev,struct device_attribute * attr,char * buf)199 static ssize_t mtd_oobavail_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201 {
202 struct mtd_info *mtd = dev_get_drvdata(dev);
203
204 return sysfs_emit(buf, "%u\n", mtd->oobavail);
205 }
206 MTD_DEVICE_ATTR_RO(oobavail);
207
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210 {
211 struct mtd_info *mtd = dev_get_drvdata(dev);
212
213 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214 }
215 MTD_DEVICE_ATTR_RO(numeraseregions);
216
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)217 static ssize_t mtd_name_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
219 {
220 struct mtd_info *mtd = dev_get_drvdata(dev);
221
222 return sysfs_emit(buf, "%s\n", mtd->name);
223 }
224 MTD_DEVICE_ATTR_RO(name);
225
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228 {
229 struct mtd_info *mtd = dev_get_drvdata(dev);
230
231 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232 }
233 MTD_DEVICE_ATTR_RO(ecc_strength);
234
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238 {
239 struct mtd_info *mtd = dev_get_drvdata(dev);
240
241 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242 }
243
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 struct device_attribute *attr,
246 const char *buf, size_t count)
247 {
248 struct mtd_info *mtd = dev_get_drvdata(dev);
249 unsigned int bitflip_threshold;
250 int retval;
251
252 retval = kstrtouint(buf, 0, &bitflip_threshold);
253 if (retval)
254 return retval;
255
256 mtd->bitflip_threshold = bitflip_threshold;
257 return count;
258 }
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
260
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263 {
264 struct mtd_info *mtd = dev_get_drvdata(dev);
265
266 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267
268 }
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
270
mtd_corrected_bits_show(struct device * dev,struct device_attribute * attr,char * buf)271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
273 {
274 struct mtd_info *mtd = dev_get_drvdata(dev);
275 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276
277 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278 }
279 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
280
mtd_ecc_failures_show(struct device * dev,struct device_attribute * attr,char * buf)281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283 {
284 struct mtd_info *mtd = dev_get_drvdata(dev);
285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286
287 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288 }
289 MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
290
mtd_bad_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293 {
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296
297 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298 }
299 MTD_DEVICE_ATTR_RO(bad_blocks);
300
mtd_bbt_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303 {
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308 }
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
310
311 static struct attribute *mtd_attrs[] = {
312 &dev_attr_type.attr,
313 &dev_attr_flags.attr,
314 &dev_attr_size.attr,
315 &dev_attr_erasesize.attr,
316 &dev_attr_writesize.attr,
317 &dev_attr_subpagesize.attr,
318 &dev_attr_oobsize.attr,
319 &dev_attr_oobavail.attr,
320 &dev_attr_numeraseregions.attr,
321 &dev_attr_name.attr,
322 &dev_attr_ecc_strength.attr,
323 &dev_attr_ecc_step_size.attr,
324 &dev_attr_corrected_bits.attr,
325 &dev_attr_ecc_failures.attr,
326 &dev_attr_bad_blocks.attr,
327 &dev_attr_bbt_blocks.attr,
328 &dev_attr_bitflip_threshold.attr,
329 NULL,
330 };
331 ATTRIBUTE_GROUPS(mtd);
332
333 static const struct device_type mtd_devtype = {
334 .name = "mtd",
335 .groups = mtd_groups,
336 .release = mtd_release,
337 };
338
339 static bool mtd_expert_analysis_mode;
340
341 #ifdef CONFIG_DEBUG_FS
mtd_check_expert_analysis_mode(void)342 bool mtd_check_expert_analysis_mode(void)
343 {
344 const char *mtd_expert_analysis_warning =
345 "Bad block checks have been entirely disabled.\n"
346 "This is only reserved for post-mortem forensics and debug purposes.\n"
347 "Never enable this mode if you do not know what you are doing!\n";
348
349 return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
350 }
351 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
352 #endif
353
354 static struct dentry *dfs_dir_mtd;
355
mtd_debugfs_populate(struct mtd_info * mtd)356 static void mtd_debugfs_populate(struct mtd_info *mtd)
357 {
358 struct device *dev = &mtd->dev;
359
360 if (IS_ERR_OR_NULL(dfs_dir_mtd))
361 return;
362
363 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
364 }
365
366 #ifndef CONFIG_MMU
mtd_mmap_capabilities(struct mtd_info * mtd)367 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
368 {
369 switch (mtd->type) {
370 case MTD_RAM:
371 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
372 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
373 case MTD_ROM:
374 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
375 NOMMU_MAP_READ;
376 default:
377 return NOMMU_MAP_COPY;
378 }
379 }
380 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
381 #endif
382
mtd_reboot_notifier(struct notifier_block * n,unsigned long state,void * cmd)383 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
384 void *cmd)
385 {
386 struct mtd_info *mtd;
387
388 mtd = container_of(n, struct mtd_info, reboot_notifier);
389 mtd->_reboot(mtd);
390
391 return NOTIFY_DONE;
392 }
393
394 /**
395 * mtd_wunit_to_pairing_info - get pairing information of a wunit
396 * @mtd: pointer to new MTD device info structure
397 * @wunit: write unit we are interested in
398 * @info: returned pairing information
399 *
400 * Retrieve pairing information associated to the wunit.
401 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
402 * paired together, and where programming a page may influence the page it is
403 * paired with.
404 * The notion of page is replaced by the term wunit (write-unit) to stay
405 * consistent with the ->writesize field.
406 *
407 * The @wunit argument can be extracted from an absolute offset using
408 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
409 * to @wunit.
410 *
411 * From the pairing info the MTD user can find all the wunits paired with
412 * @wunit using the following loop:
413 *
414 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
415 * info.pair = i;
416 * mtd_pairing_info_to_wunit(mtd, &info);
417 * ...
418 * }
419 */
mtd_wunit_to_pairing_info(struct mtd_info * mtd,int wunit,struct mtd_pairing_info * info)420 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
421 struct mtd_pairing_info *info)
422 {
423 struct mtd_info *master = mtd_get_master(mtd);
424 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
425
426 if (wunit < 0 || wunit >= npairs)
427 return -EINVAL;
428
429 if (master->pairing && master->pairing->get_info)
430 return master->pairing->get_info(master, wunit, info);
431
432 info->group = 0;
433 info->pair = wunit;
434
435 return 0;
436 }
437 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
438
439 /**
440 * mtd_pairing_info_to_wunit - get wunit from pairing information
441 * @mtd: pointer to new MTD device info structure
442 * @info: pairing information struct
443 *
444 * Returns a positive number representing the wunit associated to the info
445 * struct, or a negative error code.
446 *
447 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
448 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
449 * doc).
450 *
451 * It can also be used to only program the first page of each pair (i.e.
452 * page attached to group 0), which allows one to use an MLC NAND in
453 * software-emulated SLC mode:
454 *
455 * info.group = 0;
456 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
457 * for (info.pair = 0; info.pair < npairs; info.pair++) {
458 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
459 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
460 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
461 * }
462 */
mtd_pairing_info_to_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)463 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
464 const struct mtd_pairing_info *info)
465 {
466 struct mtd_info *master = mtd_get_master(mtd);
467 int ngroups = mtd_pairing_groups(master);
468 int npairs = mtd_wunit_per_eb(master) / ngroups;
469
470 if (!info || info->pair < 0 || info->pair >= npairs ||
471 info->group < 0 || info->group >= ngroups)
472 return -EINVAL;
473
474 if (master->pairing && master->pairing->get_wunit)
475 return mtd->pairing->get_wunit(master, info);
476
477 return info->pair;
478 }
479 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
480
481 /**
482 * mtd_pairing_groups - get the number of pairing groups
483 * @mtd: pointer to new MTD device info structure
484 *
485 * Returns the number of pairing groups.
486 *
487 * This number is usually equal to the number of bits exposed by a single
488 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
489 * to iterate over all pages of a given pair.
490 */
mtd_pairing_groups(struct mtd_info * mtd)491 int mtd_pairing_groups(struct mtd_info *mtd)
492 {
493 struct mtd_info *master = mtd_get_master(mtd);
494
495 if (!master->pairing || !master->pairing->ngroups)
496 return 1;
497
498 return master->pairing->ngroups;
499 }
500 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
501
mtd_nvmem_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)502 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
503 void *val, size_t bytes)
504 {
505 struct mtd_info *mtd = priv;
506 size_t retlen;
507 int err;
508
509 err = mtd_read(mtd, offset, bytes, &retlen, val);
510 if (err && err != -EUCLEAN)
511 return err;
512
513 return retlen == bytes ? 0 : -EIO;
514 }
515
mtd_nvmem_add(struct mtd_info * mtd)516 static int mtd_nvmem_add(struct mtd_info *mtd)
517 {
518 struct device_node *node = mtd_get_of_node(mtd);
519 struct nvmem_config config = {};
520
521 config.id = -1;
522 config.dev = &mtd->dev;
523 config.name = dev_name(&mtd->dev);
524 config.owner = THIS_MODULE;
525 config.reg_read = mtd_nvmem_reg_read;
526 config.size = mtd->size;
527 config.word_size = 1;
528 config.stride = 1;
529 config.read_only = true;
530 config.root_only = true;
531 config.ignore_wp = true;
532 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
533 config.priv = mtd;
534
535 mtd->nvmem = nvmem_register(&config);
536 if (IS_ERR(mtd->nvmem)) {
537 /* Just ignore if there is no NVMEM support in the kernel */
538 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
539 mtd->nvmem = NULL;
540 } else {
541 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
542 return PTR_ERR(mtd->nvmem);
543 }
544 }
545
546 return 0;
547 }
548
549 /**
550 * add_mtd_device - register an MTD device
551 * @mtd: pointer to new MTD device info structure
552 *
553 * Add a device to the list of MTD devices present in the system, and
554 * notify each currently active MTD 'user' of its arrival. Returns
555 * zero on success or non-zero on failure.
556 */
557
add_mtd_device(struct mtd_info * mtd)558 int add_mtd_device(struct mtd_info *mtd)
559 {
560 struct device_node *np = mtd_get_of_node(mtd);
561 struct mtd_info *master = mtd_get_master(mtd);
562 struct mtd_notifier *not;
563 int i, error, ofidx;
564
565 /*
566 * May occur, for instance, on buggy drivers which call
567 * mtd_device_parse_register() multiple times on the same master MTD,
568 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
569 */
570 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
571 return -EEXIST;
572
573 BUG_ON(mtd->writesize == 0);
574
575 /*
576 * MTD drivers should implement ->_{write,read}() or
577 * ->_{write,read}_oob(), but not both.
578 */
579 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
580 (mtd->_read && mtd->_read_oob)))
581 return -EINVAL;
582
583 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
584 !(mtd->flags & MTD_NO_ERASE)))
585 return -EINVAL;
586
587 /*
588 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
589 * master is an MLC NAND and has a proper pairing scheme defined.
590 * We also reject masters that implement ->_writev() for now, because
591 * NAND controller drivers don't implement this hook, and adding the
592 * SLC -> MLC address/length conversion to this path is useless if we
593 * don't have a user.
594 */
595 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
596 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
597 !master->pairing || master->_writev))
598 return -EINVAL;
599
600 mutex_lock(&mtd_table_mutex);
601
602 ofidx = -1;
603 if (np)
604 ofidx = of_alias_get_id(np, "mtd");
605 if (ofidx >= 0)
606 i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
607 else
608 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
609 if (i < 0) {
610 error = i;
611 goto fail_locked;
612 }
613
614 mtd->index = i;
615 mtd->usecount = 0;
616
617 /* default value if not set by driver */
618 if (mtd->bitflip_threshold == 0)
619 mtd->bitflip_threshold = mtd->ecc_strength;
620
621 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
622 int ngroups = mtd_pairing_groups(master);
623
624 mtd->erasesize /= ngroups;
625 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
626 mtd->erasesize;
627 }
628
629 if (is_power_of_2(mtd->erasesize))
630 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
631 else
632 mtd->erasesize_shift = 0;
633
634 if (is_power_of_2(mtd->writesize))
635 mtd->writesize_shift = ffs(mtd->writesize) - 1;
636 else
637 mtd->writesize_shift = 0;
638
639 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
640 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
641
642 /* Some chips always power up locked. Unlock them now */
643 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
644 error = mtd_unlock(mtd, 0, mtd->size);
645 if (error && error != -EOPNOTSUPP)
646 printk(KERN_WARNING
647 "%s: unlock failed, writes may not work\n",
648 mtd->name);
649 /* Ignore unlock failures? */
650 error = 0;
651 }
652
653 /* Caller should have set dev.parent to match the
654 * physical device, if appropriate.
655 */
656 mtd->dev.type = &mtd_devtype;
657 mtd->dev.class = &mtd_class;
658 mtd->dev.devt = MTD_DEVT(i);
659 dev_set_name(&mtd->dev, "mtd%d", i);
660 dev_set_drvdata(&mtd->dev, mtd);
661 of_node_get(mtd_get_of_node(mtd));
662 error = device_register(&mtd->dev);
663 if (error)
664 goto fail_added;
665
666 /* Add the nvmem provider */
667 error = mtd_nvmem_add(mtd);
668 if (error)
669 goto fail_nvmem_add;
670
671 mtd_debugfs_populate(mtd);
672
673 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
674 "mtd%dro", i);
675
676 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
677 /* No need to get a refcount on the module containing
678 the notifier, since we hold the mtd_table_mutex */
679 list_for_each_entry(not, &mtd_notifiers, list)
680 not->add(mtd);
681
682 mutex_unlock(&mtd_table_mutex);
683 /* We _know_ we aren't being removed, because
684 our caller is still holding us here. So none
685 of this try_ nonsense, and no bitching about it
686 either. :) */
687 __module_get(THIS_MODULE);
688 return 0;
689
690 fail_nvmem_add:
691 device_unregister(&mtd->dev);
692 fail_added:
693 of_node_put(mtd_get_of_node(mtd));
694 idr_remove(&mtd_idr, i);
695 fail_locked:
696 mutex_unlock(&mtd_table_mutex);
697 return error;
698 }
699
700 /**
701 * del_mtd_device - unregister an MTD device
702 * @mtd: pointer to MTD device info structure
703 *
704 * Remove a device from the list of MTD devices present in the system,
705 * and notify each currently active MTD 'user' of its departure.
706 * Returns zero on success or 1 on failure, which currently will happen
707 * if the requested device does not appear to be present in the list.
708 */
709
del_mtd_device(struct mtd_info * mtd)710 int del_mtd_device(struct mtd_info *mtd)
711 {
712 int ret;
713 struct mtd_notifier *not;
714
715 mutex_lock(&mtd_table_mutex);
716
717 if (idr_find(&mtd_idr, mtd->index) != mtd) {
718 ret = -ENODEV;
719 goto out_error;
720 }
721
722 /* No need to get a refcount on the module containing
723 the notifier, since we hold the mtd_table_mutex */
724 list_for_each_entry(not, &mtd_notifiers, list)
725 not->remove(mtd);
726
727 if (mtd->usecount) {
728 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
729 mtd->index, mtd->name, mtd->usecount);
730 ret = -EBUSY;
731 } else {
732 debugfs_remove_recursive(mtd->dbg.dfs_dir);
733
734 /* Try to remove the NVMEM provider */
735 nvmem_unregister(mtd->nvmem);
736
737 device_unregister(&mtd->dev);
738
739 /* Clear dev so mtd can be safely re-registered later if desired */
740 memset(&mtd->dev, 0, sizeof(mtd->dev));
741
742 idr_remove(&mtd_idr, mtd->index);
743 of_node_put(mtd_get_of_node(mtd));
744
745 module_put(THIS_MODULE);
746 ret = 0;
747 }
748
749 out_error:
750 mutex_unlock(&mtd_table_mutex);
751 return ret;
752 }
753
754 /*
755 * Set a few defaults based on the parent devices, if not provided by the
756 * driver
757 */
mtd_set_dev_defaults(struct mtd_info * mtd)758 static void mtd_set_dev_defaults(struct mtd_info *mtd)
759 {
760 if (mtd->dev.parent) {
761 if (!mtd->owner && mtd->dev.parent->driver)
762 mtd->owner = mtd->dev.parent->driver->owner;
763 if (!mtd->name)
764 mtd->name = dev_name(mtd->dev.parent);
765 } else {
766 pr_debug("mtd device won't show a device symlink in sysfs\n");
767 }
768
769 INIT_LIST_HEAD(&mtd->partitions);
770 mutex_init(&mtd->master.partitions_lock);
771 mutex_init(&mtd->master.chrdev_lock);
772 }
773
mtd_otp_size(struct mtd_info * mtd,bool is_user)774 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
775 {
776 struct otp_info *info;
777 ssize_t size = 0;
778 unsigned int i;
779 size_t retlen;
780 int ret;
781
782 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
783 if (!info)
784 return -ENOMEM;
785
786 if (is_user)
787 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
788 else
789 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
790 if (ret)
791 goto err;
792
793 for (i = 0; i < retlen / sizeof(*info); i++)
794 size += info[i].length;
795
796 kfree(info);
797 return size;
798
799 err:
800 kfree(info);
801
802 /* ENODATA means there is no OTP region. */
803 return ret == -ENODATA ? 0 : ret;
804 }
805
mtd_otp_nvmem_register(struct mtd_info * mtd,const char * compatible,int size,nvmem_reg_read_t reg_read)806 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
807 const char *compatible,
808 int size,
809 nvmem_reg_read_t reg_read)
810 {
811 struct nvmem_device *nvmem = NULL;
812 struct nvmem_config config = {};
813 struct device_node *np;
814
815 /* DT binding is optional */
816 np = of_get_compatible_child(mtd->dev.of_node, compatible);
817
818 /* OTP nvmem will be registered on the physical device */
819 config.dev = mtd->dev.parent;
820 config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
821 config.id = NVMEM_DEVID_NONE;
822 config.owner = THIS_MODULE;
823 config.type = NVMEM_TYPE_OTP;
824 config.root_only = true;
825 config.ignore_wp = true;
826 config.reg_read = reg_read;
827 config.size = size;
828 config.of_node = np;
829 config.priv = mtd;
830
831 nvmem = nvmem_register(&config);
832 /* Just ignore if there is no NVMEM support in the kernel */
833 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
834 nvmem = NULL;
835
836 of_node_put(np);
837 kfree(config.name);
838
839 return nvmem;
840 }
841
mtd_nvmem_user_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)842 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
843 void *val, size_t bytes)
844 {
845 struct mtd_info *mtd = priv;
846 size_t retlen;
847 int ret;
848
849 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
850 if (ret)
851 return ret;
852
853 return retlen == bytes ? 0 : -EIO;
854 }
855
mtd_nvmem_fact_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)856 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
857 void *val, size_t bytes)
858 {
859 struct mtd_info *mtd = priv;
860 size_t retlen;
861 int ret;
862
863 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
864 if (ret)
865 return ret;
866
867 return retlen == bytes ? 0 : -EIO;
868 }
869
mtd_otp_nvmem_add(struct mtd_info * mtd)870 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
871 {
872 struct nvmem_device *nvmem;
873 ssize_t size;
874 int err;
875
876 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
877 size = mtd_otp_size(mtd, true);
878 if (size < 0)
879 return size;
880
881 if (size > 0) {
882 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
883 mtd_nvmem_user_otp_reg_read);
884 if (IS_ERR(nvmem)) {
885 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
886 return PTR_ERR(nvmem);
887 }
888 mtd->otp_user_nvmem = nvmem;
889 }
890 }
891
892 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
893 size = mtd_otp_size(mtd, false);
894 if (size < 0) {
895 err = size;
896 goto err;
897 }
898
899 if (size > 0) {
900 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
901 mtd_nvmem_fact_otp_reg_read);
902 if (IS_ERR(nvmem)) {
903 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
904 err = PTR_ERR(nvmem);
905 goto err;
906 }
907 mtd->otp_factory_nvmem = nvmem;
908 }
909 }
910
911 return 0;
912
913 err:
914 nvmem_unregister(mtd->otp_user_nvmem);
915 return err;
916 }
917
918 /**
919 * mtd_device_parse_register - parse partitions and register an MTD device.
920 *
921 * @mtd: the MTD device to register
922 * @types: the list of MTD partition probes to try, see
923 * 'parse_mtd_partitions()' for more information
924 * @parser_data: MTD partition parser-specific data
925 * @parts: fallback partition information to register, if parsing fails;
926 * only valid if %nr_parts > %0
927 * @nr_parts: the number of partitions in parts, if zero then the full
928 * MTD device is registered if no partition info is found
929 *
930 * This function aggregates MTD partitions parsing (done by
931 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
932 * basically follows the most common pattern found in many MTD drivers:
933 *
934 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
935 * registered first.
936 * * Then It tries to probe partitions on MTD device @mtd using parsers
937 * specified in @types (if @types is %NULL, then the default list of parsers
938 * is used, see 'parse_mtd_partitions()' for more information). If none are
939 * found this functions tries to fallback to information specified in
940 * @parts/@nr_parts.
941 * * If no partitions were found this function just registers the MTD device
942 * @mtd and exits.
943 *
944 * Returns zero in case of success and a negative error code in case of failure.
945 */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)946 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
947 struct mtd_part_parser_data *parser_data,
948 const struct mtd_partition *parts,
949 int nr_parts)
950 {
951 int ret;
952
953 mtd_set_dev_defaults(mtd);
954
955 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
956 ret = add_mtd_device(mtd);
957 if (ret)
958 return ret;
959 }
960
961 /* Prefer parsed partitions over driver-provided fallback */
962 ret = parse_mtd_partitions(mtd, types, parser_data);
963 if (ret == -EPROBE_DEFER)
964 goto out;
965
966 if (ret > 0)
967 ret = 0;
968 else if (nr_parts)
969 ret = add_mtd_partitions(mtd, parts, nr_parts);
970 else if (!device_is_registered(&mtd->dev))
971 ret = add_mtd_device(mtd);
972 else
973 ret = 0;
974
975 if (ret)
976 goto out;
977
978 /*
979 * FIXME: some drivers unfortunately call this function more than once.
980 * So we have to check if we've already assigned the reboot notifier.
981 *
982 * Generally, we can make multiple calls work for most cases, but it
983 * does cause problems with parse_mtd_partitions() above (e.g.,
984 * cmdlineparts will register partitions more than once).
985 */
986 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
987 "MTD already registered\n");
988 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
989 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
990 register_reboot_notifier(&mtd->reboot_notifier);
991 }
992
993 ret = mtd_otp_nvmem_add(mtd);
994
995 out:
996 if (ret && device_is_registered(&mtd->dev))
997 del_mtd_device(mtd);
998
999 return ret;
1000 }
1001 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1002
1003 /**
1004 * mtd_device_unregister - unregister an existing MTD device.
1005 *
1006 * @master: the MTD device to unregister. This will unregister both the master
1007 * and any partitions if registered.
1008 */
mtd_device_unregister(struct mtd_info * master)1009 int mtd_device_unregister(struct mtd_info *master)
1010 {
1011 int err;
1012
1013 if (master->_reboot) {
1014 unregister_reboot_notifier(&master->reboot_notifier);
1015 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1016 }
1017
1018 nvmem_unregister(master->otp_user_nvmem);
1019 nvmem_unregister(master->otp_factory_nvmem);
1020
1021 err = del_mtd_partitions(master);
1022 if (err)
1023 return err;
1024
1025 if (!device_is_registered(&master->dev))
1026 return 0;
1027
1028 return del_mtd_device(master);
1029 }
1030 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1031
1032 /**
1033 * register_mtd_user - register a 'user' of MTD devices.
1034 * @new: pointer to notifier info structure
1035 *
1036 * Registers a pair of callbacks function to be called upon addition
1037 * or removal of MTD devices. Causes the 'add' callback to be immediately
1038 * invoked for each MTD device currently present in the system.
1039 */
register_mtd_user(struct mtd_notifier * new)1040 void register_mtd_user (struct mtd_notifier *new)
1041 {
1042 struct mtd_info *mtd;
1043
1044 mutex_lock(&mtd_table_mutex);
1045
1046 list_add(&new->list, &mtd_notifiers);
1047
1048 __module_get(THIS_MODULE);
1049
1050 mtd_for_each_device(mtd)
1051 new->add(mtd);
1052
1053 mutex_unlock(&mtd_table_mutex);
1054 }
1055 EXPORT_SYMBOL_GPL(register_mtd_user);
1056
1057 /**
1058 * unregister_mtd_user - unregister a 'user' of MTD devices.
1059 * @old: pointer to notifier info structure
1060 *
1061 * Removes a callback function pair from the list of 'users' to be
1062 * notified upon addition or removal of MTD devices. Causes the
1063 * 'remove' callback to be immediately invoked for each MTD device
1064 * currently present in the system.
1065 */
unregister_mtd_user(struct mtd_notifier * old)1066 int unregister_mtd_user (struct mtd_notifier *old)
1067 {
1068 struct mtd_info *mtd;
1069
1070 mutex_lock(&mtd_table_mutex);
1071
1072 module_put(THIS_MODULE);
1073
1074 mtd_for_each_device(mtd)
1075 old->remove(mtd);
1076
1077 list_del(&old->list);
1078 mutex_unlock(&mtd_table_mutex);
1079 return 0;
1080 }
1081 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1082
1083 /**
1084 * get_mtd_device - obtain a validated handle for an MTD device
1085 * @mtd: last known address of the required MTD device
1086 * @num: internal device number of the required MTD device
1087 *
1088 * Given a number and NULL address, return the num'th entry in the device
1089 * table, if any. Given an address and num == -1, search the device table
1090 * for a device with that address and return if it's still present. Given
1091 * both, return the num'th driver only if its address matches. Return
1092 * error code if not.
1093 */
get_mtd_device(struct mtd_info * mtd,int num)1094 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1095 {
1096 struct mtd_info *ret = NULL, *other;
1097 int err = -ENODEV;
1098
1099 mutex_lock(&mtd_table_mutex);
1100
1101 if (num == -1) {
1102 mtd_for_each_device(other) {
1103 if (other == mtd) {
1104 ret = mtd;
1105 break;
1106 }
1107 }
1108 } else if (num >= 0) {
1109 ret = idr_find(&mtd_idr, num);
1110 if (mtd && mtd != ret)
1111 ret = NULL;
1112 }
1113
1114 if (!ret) {
1115 ret = ERR_PTR(err);
1116 goto out;
1117 }
1118
1119 err = __get_mtd_device(ret);
1120 if (err)
1121 ret = ERR_PTR(err);
1122 out:
1123 mutex_unlock(&mtd_table_mutex);
1124 return ret;
1125 }
1126 EXPORT_SYMBOL_GPL(get_mtd_device);
1127
1128
__get_mtd_device(struct mtd_info * mtd)1129 int __get_mtd_device(struct mtd_info *mtd)
1130 {
1131 struct mtd_info *master = mtd_get_master(mtd);
1132 int err;
1133
1134 if (!try_module_get(master->owner))
1135 return -ENODEV;
1136
1137 if (master->_get_device) {
1138 err = master->_get_device(mtd);
1139
1140 if (err) {
1141 module_put(master->owner);
1142 return err;
1143 }
1144 }
1145
1146 master->usecount++;
1147
1148 while (mtd->parent) {
1149 mtd->usecount++;
1150 mtd = mtd->parent;
1151 }
1152
1153 return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(__get_mtd_device);
1156
1157 /**
1158 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1159 * device name
1160 * @name: MTD device name to open
1161 *
1162 * This function returns MTD device description structure in case of
1163 * success and an error code in case of failure.
1164 */
get_mtd_device_nm(const char * name)1165 struct mtd_info *get_mtd_device_nm(const char *name)
1166 {
1167 int err = -ENODEV;
1168 struct mtd_info *mtd = NULL, *other;
1169
1170 mutex_lock(&mtd_table_mutex);
1171
1172 mtd_for_each_device(other) {
1173 if (!strcmp(name, other->name)) {
1174 mtd = other;
1175 break;
1176 }
1177 }
1178
1179 if (!mtd)
1180 goto out_unlock;
1181
1182 err = __get_mtd_device(mtd);
1183 if (err)
1184 goto out_unlock;
1185
1186 mutex_unlock(&mtd_table_mutex);
1187 return mtd;
1188
1189 out_unlock:
1190 mutex_unlock(&mtd_table_mutex);
1191 return ERR_PTR(err);
1192 }
1193 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1194
put_mtd_device(struct mtd_info * mtd)1195 void put_mtd_device(struct mtd_info *mtd)
1196 {
1197 mutex_lock(&mtd_table_mutex);
1198 __put_mtd_device(mtd);
1199 mutex_unlock(&mtd_table_mutex);
1200
1201 }
1202 EXPORT_SYMBOL_GPL(put_mtd_device);
1203
__put_mtd_device(struct mtd_info * mtd)1204 void __put_mtd_device(struct mtd_info *mtd)
1205 {
1206 struct mtd_info *master = mtd_get_master(mtd);
1207
1208 while (mtd->parent) {
1209 --mtd->usecount;
1210 BUG_ON(mtd->usecount < 0);
1211 mtd = mtd->parent;
1212 }
1213
1214 master->usecount--;
1215
1216 if (master->_put_device)
1217 master->_put_device(master);
1218
1219 module_put(master->owner);
1220 }
1221 EXPORT_SYMBOL_GPL(__put_mtd_device);
1222
1223 /*
1224 * Erase is an synchronous operation. Device drivers are epected to return a
1225 * negative error code if the operation failed and update instr->fail_addr
1226 * to point the portion that was not properly erased.
1227 */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)1228 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1229 {
1230 struct mtd_info *master = mtd_get_master(mtd);
1231 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1232 struct erase_info adjinstr;
1233 int ret;
1234
1235 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1236 adjinstr = *instr;
1237
1238 if (!mtd->erasesize || !master->_erase)
1239 return -ENOTSUPP;
1240
1241 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1242 return -EINVAL;
1243 if (!(mtd->flags & MTD_WRITEABLE))
1244 return -EROFS;
1245
1246 if (!instr->len)
1247 return 0;
1248
1249 ledtrig_mtd_activity();
1250
1251 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1252 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1253 master->erasesize;
1254 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1255 master->erasesize) -
1256 adjinstr.addr;
1257 }
1258
1259 adjinstr.addr += mst_ofs;
1260
1261 ret = master->_erase(master, &adjinstr);
1262
1263 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1264 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1265 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1266 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1267 master);
1268 instr->fail_addr *= mtd->erasesize;
1269 }
1270 }
1271
1272 return ret;
1273 }
1274 EXPORT_SYMBOL_GPL(mtd_erase);
1275
1276 /*
1277 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1278 */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1279 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1280 void **virt, resource_size_t *phys)
1281 {
1282 struct mtd_info *master = mtd_get_master(mtd);
1283
1284 *retlen = 0;
1285 *virt = NULL;
1286 if (phys)
1287 *phys = 0;
1288 if (!master->_point)
1289 return -EOPNOTSUPP;
1290 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1291 return -EINVAL;
1292 if (!len)
1293 return 0;
1294
1295 from = mtd_get_master_ofs(mtd, from);
1296 return master->_point(master, from, len, retlen, virt, phys);
1297 }
1298 EXPORT_SYMBOL_GPL(mtd_point);
1299
1300 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1301 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1302 {
1303 struct mtd_info *master = mtd_get_master(mtd);
1304
1305 if (!master->_unpoint)
1306 return -EOPNOTSUPP;
1307 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1308 return -EINVAL;
1309 if (!len)
1310 return 0;
1311 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1312 }
1313 EXPORT_SYMBOL_GPL(mtd_unpoint);
1314
1315 /*
1316 * Allow NOMMU mmap() to directly map the device (if not NULL)
1317 * - return the address to which the offset maps
1318 * - return -ENOSYS to indicate refusal to do the mapping
1319 */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)1320 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1321 unsigned long offset, unsigned long flags)
1322 {
1323 size_t retlen;
1324 void *virt;
1325 int ret;
1326
1327 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1328 if (ret)
1329 return ret;
1330 if (retlen != len) {
1331 mtd_unpoint(mtd, offset, retlen);
1332 return -ENOSYS;
1333 }
1334 return (unsigned long)virt;
1335 }
1336 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1337
mtd_update_ecc_stats(struct mtd_info * mtd,struct mtd_info * master,const struct mtd_ecc_stats * old_stats)1338 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1339 const struct mtd_ecc_stats *old_stats)
1340 {
1341 struct mtd_ecc_stats diff;
1342
1343 if (master == mtd)
1344 return;
1345
1346 diff = master->ecc_stats;
1347 diff.failed -= old_stats->failed;
1348 diff.corrected -= old_stats->corrected;
1349
1350 while (mtd->parent) {
1351 mtd->ecc_stats.failed += diff.failed;
1352 mtd->ecc_stats.corrected += diff.corrected;
1353 mtd = mtd->parent;
1354 }
1355 }
1356
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1357 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1358 u_char *buf)
1359 {
1360 struct mtd_oob_ops ops = {
1361 .len = len,
1362 .datbuf = buf,
1363 };
1364 int ret;
1365
1366 ret = mtd_read_oob(mtd, from, &ops);
1367 *retlen = ops.retlen;
1368
1369 return ret;
1370 }
1371 EXPORT_SYMBOL_GPL(mtd_read);
1372
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1373 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1374 const u_char *buf)
1375 {
1376 struct mtd_oob_ops ops = {
1377 .len = len,
1378 .datbuf = (u8 *)buf,
1379 };
1380 int ret;
1381
1382 ret = mtd_write_oob(mtd, to, &ops);
1383 *retlen = ops.retlen;
1384
1385 return ret;
1386 }
1387 EXPORT_SYMBOL_GPL(mtd_write);
1388
1389 /*
1390 * In blackbox flight recorder like scenarios we want to make successful writes
1391 * in interrupt context. panic_write() is only intended to be called when its
1392 * known the kernel is about to panic and we need the write to succeed. Since
1393 * the kernel is not going to be running for much longer, this function can
1394 * break locks and delay to ensure the write succeeds (but not sleep).
1395 */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1396 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1397 const u_char *buf)
1398 {
1399 struct mtd_info *master = mtd_get_master(mtd);
1400
1401 *retlen = 0;
1402 if (!master->_panic_write)
1403 return -EOPNOTSUPP;
1404 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1405 return -EINVAL;
1406 if (!(mtd->flags & MTD_WRITEABLE))
1407 return -EROFS;
1408 if (!len)
1409 return 0;
1410 if (!master->oops_panic_write)
1411 master->oops_panic_write = true;
1412
1413 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1414 retlen, buf);
1415 }
1416 EXPORT_SYMBOL_GPL(mtd_panic_write);
1417
mtd_check_oob_ops(struct mtd_info * mtd,loff_t offs,struct mtd_oob_ops * ops)1418 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1419 struct mtd_oob_ops *ops)
1420 {
1421 /*
1422 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1423 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1424 * this case.
1425 */
1426 if (!ops->datbuf)
1427 ops->len = 0;
1428
1429 if (!ops->oobbuf)
1430 ops->ooblen = 0;
1431
1432 if (offs < 0 || offs + ops->len > mtd->size)
1433 return -EINVAL;
1434
1435 if (ops->ooblen) {
1436 size_t maxooblen;
1437
1438 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1439 return -EINVAL;
1440
1441 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1442 mtd_div_by_ws(offs, mtd)) *
1443 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1444 if (ops->ooblen > maxooblen)
1445 return -EINVAL;
1446 }
1447
1448 return 0;
1449 }
1450
mtd_read_oob_std(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1451 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1452 struct mtd_oob_ops *ops)
1453 {
1454 struct mtd_info *master = mtd_get_master(mtd);
1455 int ret;
1456
1457 from = mtd_get_master_ofs(mtd, from);
1458 if (master->_read_oob)
1459 ret = master->_read_oob(master, from, ops);
1460 else
1461 ret = master->_read(master, from, ops->len, &ops->retlen,
1462 ops->datbuf);
1463
1464 return ret;
1465 }
1466
mtd_write_oob_std(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1467 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1468 struct mtd_oob_ops *ops)
1469 {
1470 struct mtd_info *master = mtd_get_master(mtd);
1471 int ret;
1472
1473 to = mtd_get_master_ofs(mtd, to);
1474 if (master->_write_oob)
1475 ret = master->_write_oob(master, to, ops);
1476 else
1477 ret = master->_write(master, to, ops->len, &ops->retlen,
1478 ops->datbuf);
1479
1480 return ret;
1481 }
1482
mtd_io_emulated_slc(struct mtd_info * mtd,loff_t start,bool read,struct mtd_oob_ops * ops)1483 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1484 struct mtd_oob_ops *ops)
1485 {
1486 struct mtd_info *master = mtd_get_master(mtd);
1487 int ngroups = mtd_pairing_groups(master);
1488 int npairs = mtd_wunit_per_eb(master) / ngroups;
1489 struct mtd_oob_ops adjops = *ops;
1490 unsigned int wunit, oobavail;
1491 struct mtd_pairing_info info;
1492 int max_bitflips = 0;
1493 u32 ebofs, pageofs;
1494 loff_t base, pos;
1495
1496 ebofs = mtd_mod_by_eb(start, mtd);
1497 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1498 info.group = 0;
1499 info.pair = mtd_div_by_ws(ebofs, mtd);
1500 pageofs = mtd_mod_by_ws(ebofs, mtd);
1501 oobavail = mtd_oobavail(mtd, ops);
1502
1503 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1504 int ret;
1505
1506 if (info.pair >= npairs) {
1507 info.pair = 0;
1508 base += master->erasesize;
1509 }
1510
1511 wunit = mtd_pairing_info_to_wunit(master, &info);
1512 pos = mtd_wunit_to_offset(mtd, base, wunit);
1513
1514 adjops.len = ops->len - ops->retlen;
1515 if (adjops.len > mtd->writesize - pageofs)
1516 adjops.len = mtd->writesize - pageofs;
1517
1518 adjops.ooblen = ops->ooblen - ops->oobretlen;
1519 if (adjops.ooblen > oobavail - adjops.ooboffs)
1520 adjops.ooblen = oobavail - adjops.ooboffs;
1521
1522 if (read) {
1523 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1524 if (ret > 0)
1525 max_bitflips = max(max_bitflips, ret);
1526 } else {
1527 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1528 }
1529
1530 if (ret < 0)
1531 return ret;
1532
1533 max_bitflips = max(max_bitflips, ret);
1534 ops->retlen += adjops.retlen;
1535 ops->oobretlen += adjops.oobretlen;
1536 adjops.datbuf += adjops.retlen;
1537 adjops.oobbuf += adjops.oobretlen;
1538 adjops.ooboffs = 0;
1539 pageofs = 0;
1540 info.pair++;
1541 }
1542
1543 return max_bitflips;
1544 }
1545
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1546 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1547 {
1548 struct mtd_info *master = mtd_get_master(mtd);
1549 struct mtd_ecc_stats old_stats = master->ecc_stats;
1550 int ret_code;
1551
1552 ops->retlen = ops->oobretlen = 0;
1553
1554 ret_code = mtd_check_oob_ops(mtd, from, ops);
1555 if (ret_code)
1556 return ret_code;
1557
1558 ledtrig_mtd_activity();
1559
1560 /* Check the validity of a potential fallback on mtd->_read */
1561 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1562 return -EOPNOTSUPP;
1563
1564 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1565 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1566 else
1567 ret_code = mtd_read_oob_std(mtd, from, ops);
1568
1569 mtd_update_ecc_stats(mtd, master, &old_stats);
1570
1571 /*
1572 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1573 * similar to mtd->_read(), returning a non-negative integer
1574 * representing max bitflips. In other cases, mtd->_read_oob() may
1575 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1576 */
1577 if (unlikely(ret_code < 0))
1578 return ret_code;
1579 if (mtd->ecc_strength == 0)
1580 return 0; /* device lacks ecc */
1581 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1582 }
1583 EXPORT_SYMBOL_GPL(mtd_read_oob);
1584
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1585 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1586 struct mtd_oob_ops *ops)
1587 {
1588 struct mtd_info *master = mtd_get_master(mtd);
1589 int ret;
1590
1591 ops->retlen = ops->oobretlen = 0;
1592
1593 if (!(mtd->flags & MTD_WRITEABLE))
1594 return -EROFS;
1595
1596 ret = mtd_check_oob_ops(mtd, to, ops);
1597 if (ret)
1598 return ret;
1599
1600 ledtrig_mtd_activity();
1601
1602 /* Check the validity of a potential fallback on mtd->_write */
1603 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1604 return -EOPNOTSUPP;
1605
1606 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1607 return mtd_io_emulated_slc(mtd, to, false, ops);
1608
1609 return mtd_write_oob_std(mtd, to, ops);
1610 }
1611 EXPORT_SYMBOL_GPL(mtd_write_oob);
1612
1613 /**
1614 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1615 * @mtd: MTD device structure
1616 * @section: ECC section. Depending on the layout you may have all the ECC
1617 * bytes stored in a single contiguous section, or one section
1618 * per ECC chunk (and sometime several sections for a single ECC
1619 * ECC chunk)
1620 * @oobecc: OOB region struct filled with the appropriate ECC position
1621 * information
1622 *
1623 * This function returns ECC section information in the OOB area. If you want
1624 * to get all the ECC bytes information, then you should call
1625 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1626 *
1627 * Returns zero on success, a negative error code otherwise.
1628 */
mtd_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)1629 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1630 struct mtd_oob_region *oobecc)
1631 {
1632 struct mtd_info *master = mtd_get_master(mtd);
1633
1634 memset(oobecc, 0, sizeof(*oobecc));
1635
1636 if (!master || section < 0)
1637 return -EINVAL;
1638
1639 if (!master->ooblayout || !master->ooblayout->ecc)
1640 return -ENOTSUPP;
1641
1642 return master->ooblayout->ecc(master, section, oobecc);
1643 }
1644 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1645
1646 /**
1647 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1648 * section
1649 * @mtd: MTD device structure
1650 * @section: Free section you are interested in. Depending on the layout
1651 * you may have all the free bytes stored in a single contiguous
1652 * section, or one section per ECC chunk plus an extra section
1653 * for the remaining bytes (or other funky layout).
1654 * @oobfree: OOB region struct filled with the appropriate free position
1655 * information
1656 *
1657 * This function returns free bytes position in the OOB area. If you want
1658 * to get all the free bytes information, then you should call
1659 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1660 *
1661 * Returns zero on success, a negative error code otherwise.
1662 */
mtd_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)1663 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1664 struct mtd_oob_region *oobfree)
1665 {
1666 struct mtd_info *master = mtd_get_master(mtd);
1667
1668 memset(oobfree, 0, sizeof(*oobfree));
1669
1670 if (!master || section < 0)
1671 return -EINVAL;
1672
1673 if (!master->ooblayout || !master->ooblayout->free)
1674 return -ENOTSUPP;
1675
1676 return master->ooblayout->free(master, section, oobfree);
1677 }
1678 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1679
1680 /**
1681 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1682 * @mtd: mtd info structure
1683 * @byte: the byte we are searching for
1684 * @sectionp: pointer where the section id will be stored
1685 * @oobregion: used to retrieve the ECC position
1686 * @iter: iterator function. Should be either mtd_ooblayout_free or
1687 * mtd_ooblayout_ecc depending on the region type you're searching for
1688 *
1689 * This function returns the section id and oobregion information of a
1690 * specific byte. For example, say you want to know where the 4th ECC byte is
1691 * stored, you'll use:
1692 *
1693 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1694 *
1695 * Returns zero on success, a negative error code otherwise.
1696 */
mtd_ooblayout_find_region(struct mtd_info * mtd,int byte,int * sectionp,struct mtd_oob_region * oobregion,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1697 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1698 int *sectionp, struct mtd_oob_region *oobregion,
1699 int (*iter)(struct mtd_info *,
1700 int section,
1701 struct mtd_oob_region *oobregion))
1702 {
1703 int pos = 0, ret, section = 0;
1704
1705 memset(oobregion, 0, sizeof(*oobregion));
1706
1707 while (1) {
1708 ret = iter(mtd, section, oobregion);
1709 if (ret)
1710 return ret;
1711
1712 if (pos + oobregion->length > byte)
1713 break;
1714
1715 pos += oobregion->length;
1716 section++;
1717 }
1718
1719 /*
1720 * Adjust region info to make it start at the beginning at the
1721 * 'start' ECC byte.
1722 */
1723 oobregion->offset += byte - pos;
1724 oobregion->length -= byte - pos;
1725 *sectionp = section;
1726
1727 return 0;
1728 }
1729
1730 /**
1731 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1732 * ECC byte
1733 * @mtd: mtd info structure
1734 * @eccbyte: the byte we are searching for
1735 * @section: pointer where the section id will be stored
1736 * @oobregion: OOB region information
1737 *
1738 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1739 * byte.
1740 *
1741 * Returns zero on success, a negative error code otherwise.
1742 */
mtd_ooblayout_find_eccregion(struct mtd_info * mtd,int eccbyte,int * section,struct mtd_oob_region * oobregion)1743 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1744 int *section,
1745 struct mtd_oob_region *oobregion)
1746 {
1747 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1748 mtd_ooblayout_ecc);
1749 }
1750 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1751
1752 /**
1753 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1754 * @mtd: mtd info structure
1755 * @buf: destination buffer to store OOB bytes
1756 * @oobbuf: OOB buffer
1757 * @start: first byte to retrieve
1758 * @nbytes: number of bytes to retrieve
1759 * @iter: section iterator
1760 *
1761 * Extract bytes attached to a specific category (ECC or free)
1762 * from the OOB buffer and copy them into buf.
1763 *
1764 * Returns zero on success, a negative error code otherwise.
1765 */
mtd_ooblayout_get_bytes(struct mtd_info * mtd,u8 * buf,const u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1766 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1767 const u8 *oobbuf, int start, int nbytes,
1768 int (*iter)(struct mtd_info *,
1769 int section,
1770 struct mtd_oob_region *oobregion))
1771 {
1772 struct mtd_oob_region oobregion;
1773 int section, ret;
1774
1775 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1776 &oobregion, iter);
1777
1778 while (!ret) {
1779 int cnt;
1780
1781 cnt = min_t(int, nbytes, oobregion.length);
1782 memcpy(buf, oobbuf + oobregion.offset, cnt);
1783 buf += cnt;
1784 nbytes -= cnt;
1785
1786 if (!nbytes)
1787 break;
1788
1789 ret = iter(mtd, ++section, &oobregion);
1790 }
1791
1792 return ret;
1793 }
1794
1795 /**
1796 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1797 * @mtd: mtd info structure
1798 * @buf: source buffer to get OOB bytes from
1799 * @oobbuf: OOB buffer
1800 * @start: first OOB byte to set
1801 * @nbytes: number of OOB bytes to set
1802 * @iter: section iterator
1803 *
1804 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1805 * is selected by passing the appropriate iterator.
1806 *
1807 * Returns zero on success, a negative error code otherwise.
1808 */
mtd_ooblayout_set_bytes(struct mtd_info * mtd,const u8 * buf,u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1809 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1810 u8 *oobbuf, int start, int nbytes,
1811 int (*iter)(struct mtd_info *,
1812 int section,
1813 struct mtd_oob_region *oobregion))
1814 {
1815 struct mtd_oob_region oobregion;
1816 int section, ret;
1817
1818 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1819 &oobregion, iter);
1820
1821 while (!ret) {
1822 int cnt;
1823
1824 cnt = min_t(int, nbytes, oobregion.length);
1825 memcpy(oobbuf + oobregion.offset, buf, cnt);
1826 buf += cnt;
1827 nbytes -= cnt;
1828
1829 if (!nbytes)
1830 break;
1831
1832 ret = iter(mtd, ++section, &oobregion);
1833 }
1834
1835 return ret;
1836 }
1837
1838 /**
1839 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1840 * @mtd: mtd info structure
1841 * @iter: category iterator
1842 *
1843 * Count the number of bytes in a given category.
1844 *
1845 * Returns a positive value on success, a negative error code otherwise.
1846 */
mtd_ooblayout_count_bytes(struct mtd_info * mtd,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1847 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1848 int (*iter)(struct mtd_info *,
1849 int section,
1850 struct mtd_oob_region *oobregion))
1851 {
1852 struct mtd_oob_region oobregion;
1853 int section = 0, ret, nbytes = 0;
1854
1855 while (1) {
1856 ret = iter(mtd, section++, &oobregion);
1857 if (ret) {
1858 if (ret == -ERANGE)
1859 ret = nbytes;
1860 break;
1861 }
1862
1863 nbytes += oobregion.length;
1864 }
1865
1866 return ret;
1867 }
1868
1869 /**
1870 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1871 * @mtd: mtd info structure
1872 * @eccbuf: destination buffer to store ECC bytes
1873 * @oobbuf: OOB buffer
1874 * @start: first ECC byte to retrieve
1875 * @nbytes: number of ECC bytes to retrieve
1876 *
1877 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1878 *
1879 * Returns zero on success, a negative error code otherwise.
1880 */
mtd_ooblayout_get_eccbytes(struct mtd_info * mtd,u8 * eccbuf,const u8 * oobbuf,int start,int nbytes)1881 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1882 const u8 *oobbuf, int start, int nbytes)
1883 {
1884 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1885 mtd_ooblayout_ecc);
1886 }
1887 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1888
1889 /**
1890 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1891 * @mtd: mtd info structure
1892 * @eccbuf: source buffer to get ECC bytes from
1893 * @oobbuf: OOB buffer
1894 * @start: first ECC byte to set
1895 * @nbytes: number of ECC bytes to set
1896 *
1897 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1898 *
1899 * Returns zero on success, a negative error code otherwise.
1900 */
mtd_ooblayout_set_eccbytes(struct mtd_info * mtd,const u8 * eccbuf,u8 * oobbuf,int start,int nbytes)1901 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1902 u8 *oobbuf, int start, int nbytes)
1903 {
1904 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1905 mtd_ooblayout_ecc);
1906 }
1907 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1908
1909 /**
1910 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1911 * @mtd: mtd info structure
1912 * @databuf: destination buffer to store ECC bytes
1913 * @oobbuf: OOB buffer
1914 * @start: first ECC byte to retrieve
1915 * @nbytes: number of ECC bytes to retrieve
1916 *
1917 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1918 *
1919 * Returns zero on success, a negative error code otherwise.
1920 */
mtd_ooblayout_get_databytes(struct mtd_info * mtd,u8 * databuf,const u8 * oobbuf,int start,int nbytes)1921 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1922 const u8 *oobbuf, int start, int nbytes)
1923 {
1924 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1925 mtd_ooblayout_free);
1926 }
1927 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1928
1929 /**
1930 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1931 * @mtd: mtd info structure
1932 * @databuf: source buffer to get data bytes from
1933 * @oobbuf: OOB buffer
1934 * @start: first ECC byte to set
1935 * @nbytes: number of ECC bytes to set
1936 *
1937 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1938 *
1939 * Returns zero on success, a negative error code otherwise.
1940 */
mtd_ooblayout_set_databytes(struct mtd_info * mtd,const u8 * databuf,u8 * oobbuf,int start,int nbytes)1941 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1942 u8 *oobbuf, int start, int nbytes)
1943 {
1944 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1945 mtd_ooblayout_free);
1946 }
1947 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1948
1949 /**
1950 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1951 * @mtd: mtd info structure
1952 *
1953 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1954 *
1955 * Returns zero on success, a negative error code otherwise.
1956 */
mtd_ooblayout_count_freebytes(struct mtd_info * mtd)1957 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1958 {
1959 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1960 }
1961 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1962
1963 /**
1964 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1965 * @mtd: mtd info structure
1966 *
1967 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1968 *
1969 * Returns zero on success, a negative error code otherwise.
1970 */
mtd_ooblayout_count_eccbytes(struct mtd_info * mtd)1971 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1972 {
1973 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1974 }
1975 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1976
1977 /*
1978 * Method to access the protection register area, present in some flash
1979 * devices. The user data is one time programmable but the factory data is read
1980 * only.
1981 */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1982 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1983 struct otp_info *buf)
1984 {
1985 struct mtd_info *master = mtd_get_master(mtd);
1986
1987 if (!master->_get_fact_prot_info)
1988 return -EOPNOTSUPP;
1989 if (!len)
1990 return 0;
1991 return master->_get_fact_prot_info(master, len, retlen, buf);
1992 }
1993 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1994
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1995 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1996 size_t *retlen, u_char *buf)
1997 {
1998 struct mtd_info *master = mtd_get_master(mtd);
1999
2000 *retlen = 0;
2001 if (!master->_read_fact_prot_reg)
2002 return -EOPNOTSUPP;
2003 if (!len)
2004 return 0;
2005 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2006 }
2007 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2008
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2009 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2010 struct otp_info *buf)
2011 {
2012 struct mtd_info *master = mtd_get_master(mtd);
2013
2014 if (!master->_get_user_prot_info)
2015 return -EOPNOTSUPP;
2016 if (!len)
2017 return 0;
2018 return master->_get_user_prot_info(master, len, retlen, buf);
2019 }
2020 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2021
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2022 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2023 size_t *retlen, u_char *buf)
2024 {
2025 struct mtd_info *master = mtd_get_master(mtd);
2026
2027 *retlen = 0;
2028 if (!master->_read_user_prot_reg)
2029 return -EOPNOTSUPP;
2030 if (!len)
2031 return 0;
2032 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2033 }
2034 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2035
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2036 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2037 size_t *retlen, const u_char *buf)
2038 {
2039 struct mtd_info *master = mtd_get_master(mtd);
2040 int ret;
2041
2042 *retlen = 0;
2043 if (!master->_write_user_prot_reg)
2044 return -EOPNOTSUPP;
2045 if (!len)
2046 return 0;
2047 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2048 if (ret)
2049 return ret;
2050
2051 /*
2052 * If no data could be written at all, we are out of memory and
2053 * must return -ENOSPC.
2054 */
2055 return (*retlen) ? 0 : -ENOSPC;
2056 }
2057 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2058
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2059 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2060 {
2061 struct mtd_info *master = mtd_get_master(mtd);
2062
2063 if (!master->_lock_user_prot_reg)
2064 return -EOPNOTSUPP;
2065 if (!len)
2066 return 0;
2067 return master->_lock_user_prot_reg(master, from, len);
2068 }
2069 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2070
mtd_erase_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2071 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2072 {
2073 struct mtd_info *master = mtd_get_master(mtd);
2074
2075 if (!master->_erase_user_prot_reg)
2076 return -EOPNOTSUPP;
2077 if (!len)
2078 return 0;
2079 return master->_erase_user_prot_reg(master, from, len);
2080 }
2081 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2082
2083 /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2084 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2085 {
2086 struct mtd_info *master = mtd_get_master(mtd);
2087
2088 if (!master->_lock)
2089 return -EOPNOTSUPP;
2090 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2091 return -EINVAL;
2092 if (!len)
2093 return 0;
2094
2095 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2096 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2097 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2098 }
2099
2100 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2101 }
2102 EXPORT_SYMBOL_GPL(mtd_lock);
2103
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2104 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2105 {
2106 struct mtd_info *master = mtd_get_master(mtd);
2107
2108 if (!master->_unlock)
2109 return -EOPNOTSUPP;
2110 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2111 return -EINVAL;
2112 if (!len)
2113 return 0;
2114
2115 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2116 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2117 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2118 }
2119
2120 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2121 }
2122 EXPORT_SYMBOL_GPL(mtd_unlock);
2123
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2124 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2125 {
2126 struct mtd_info *master = mtd_get_master(mtd);
2127
2128 if (!master->_is_locked)
2129 return -EOPNOTSUPP;
2130 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2131 return -EINVAL;
2132 if (!len)
2133 return 0;
2134
2135 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2136 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2137 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2138 }
2139
2140 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2141 }
2142 EXPORT_SYMBOL_GPL(mtd_is_locked);
2143
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)2144 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2145 {
2146 struct mtd_info *master = mtd_get_master(mtd);
2147
2148 if (ofs < 0 || ofs >= mtd->size)
2149 return -EINVAL;
2150 if (!master->_block_isreserved)
2151 return 0;
2152
2153 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2154 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2155
2156 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2157 }
2158 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2159
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)2160 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2161 {
2162 struct mtd_info *master = mtd_get_master(mtd);
2163
2164 if (ofs < 0 || ofs >= mtd->size)
2165 return -EINVAL;
2166 if (!master->_block_isbad)
2167 return 0;
2168
2169 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2170 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2171
2172 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2173 }
2174 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2175
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)2176 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2177 {
2178 struct mtd_info *master = mtd_get_master(mtd);
2179 int ret;
2180
2181 if (!master->_block_markbad)
2182 return -EOPNOTSUPP;
2183 if (ofs < 0 || ofs >= mtd->size)
2184 return -EINVAL;
2185 if (!(mtd->flags & MTD_WRITEABLE))
2186 return -EROFS;
2187
2188 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2189 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2190
2191 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2192 if (ret)
2193 return ret;
2194
2195 while (mtd->parent) {
2196 mtd->ecc_stats.badblocks++;
2197 mtd = mtd->parent;
2198 }
2199
2200 return 0;
2201 }
2202 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2203
2204 /*
2205 * default_mtd_writev - the default writev method
2206 * @mtd: mtd device description object pointer
2207 * @vecs: the vectors to write
2208 * @count: count of vectors in @vecs
2209 * @to: the MTD device offset to write to
2210 * @retlen: on exit contains the count of bytes written to the MTD device.
2211 *
2212 * This function returns zero in case of success and a negative error code in
2213 * case of failure.
2214 */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2215 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2216 unsigned long count, loff_t to, size_t *retlen)
2217 {
2218 unsigned long i;
2219 size_t totlen = 0, thislen;
2220 int ret = 0;
2221
2222 for (i = 0; i < count; i++) {
2223 if (!vecs[i].iov_len)
2224 continue;
2225 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2226 vecs[i].iov_base);
2227 totlen += thislen;
2228 if (ret || thislen != vecs[i].iov_len)
2229 break;
2230 to += vecs[i].iov_len;
2231 }
2232 *retlen = totlen;
2233 return ret;
2234 }
2235
2236 /*
2237 * mtd_writev - the vector-based MTD write method
2238 * @mtd: mtd device description object pointer
2239 * @vecs: the vectors to write
2240 * @count: count of vectors in @vecs
2241 * @to: the MTD device offset to write to
2242 * @retlen: on exit contains the count of bytes written to the MTD device.
2243 *
2244 * This function returns zero in case of success and a negative error code in
2245 * case of failure.
2246 */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2247 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2248 unsigned long count, loff_t to, size_t *retlen)
2249 {
2250 struct mtd_info *master = mtd_get_master(mtd);
2251
2252 *retlen = 0;
2253 if (!(mtd->flags & MTD_WRITEABLE))
2254 return -EROFS;
2255
2256 if (!master->_writev)
2257 return default_mtd_writev(mtd, vecs, count, to, retlen);
2258
2259 return master->_writev(master, vecs, count,
2260 mtd_get_master_ofs(mtd, to), retlen);
2261 }
2262 EXPORT_SYMBOL_GPL(mtd_writev);
2263
2264 /**
2265 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2266 * @mtd: mtd device description object pointer
2267 * @size: a pointer to the ideal or maximum size of the allocation, points
2268 * to the actual allocation size on success.
2269 *
2270 * This routine attempts to allocate a contiguous kernel buffer up to
2271 * the specified size, backing off the size of the request exponentially
2272 * until the request succeeds or until the allocation size falls below
2273 * the system page size. This attempts to make sure it does not adversely
2274 * impact system performance, so when allocating more than one page, we
2275 * ask the memory allocator to avoid re-trying, swapping, writing back
2276 * or performing I/O.
2277 *
2278 * Note, this function also makes sure that the allocated buffer is aligned to
2279 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2280 *
2281 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2282 * to handle smaller (i.e. degraded) buffer allocations under low- or
2283 * fragmented-memory situations where such reduced allocations, from a
2284 * requested ideal, are allowed.
2285 *
2286 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2287 */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)2288 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2289 {
2290 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2291 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2292 void *kbuf;
2293
2294 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2295
2296 while (*size > min_alloc) {
2297 kbuf = kmalloc(*size, flags);
2298 if (kbuf)
2299 return kbuf;
2300
2301 *size >>= 1;
2302 *size = ALIGN(*size, mtd->writesize);
2303 }
2304
2305 /*
2306 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2307 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2308 */
2309 return kmalloc(*size, GFP_KERNEL);
2310 }
2311 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2312
2313 #ifdef CONFIG_PROC_FS
2314
2315 /*====================================================================*/
2316 /* Support for /proc/mtd */
2317
mtd_proc_show(struct seq_file * m,void * v)2318 static int mtd_proc_show(struct seq_file *m, void *v)
2319 {
2320 struct mtd_info *mtd;
2321
2322 seq_puts(m, "dev: size erasesize name\n");
2323 mutex_lock(&mtd_table_mutex);
2324 mtd_for_each_device(mtd) {
2325 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2326 mtd->index, (unsigned long long)mtd->size,
2327 mtd->erasesize, mtd->name);
2328 }
2329 mutex_unlock(&mtd_table_mutex);
2330 return 0;
2331 }
2332 #endif /* CONFIG_PROC_FS */
2333
2334 /*====================================================================*/
2335 /* Init code */
2336
mtd_bdi_init(const char * name)2337 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2338 {
2339 struct backing_dev_info *bdi;
2340 int ret;
2341
2342 bdi = bdi_alloc(NUMA_NO_NODE);
2343 if (!bdi)
2344 return ERR_PTR(-ENOMEM);
2345 bdi->ra_pages = 0;
2346 bdi->io_pages = 0;
2347
2348 /*
2349 * We put '-0' suffix to the name to get the same name format as we
2350 * used to get. Since this is called only once, we get a unique name.
2351 */
2352 ret = bdi_register(bdi, "%.28s-0", name);
2353 if (ret)
2354 bdi_put(bdi);
2355
2356 return ret ? ERR_PTR(ret) : bdi;
2357 }
2358
2359 static struct proc_dir_entry *proc_mtd;
2360
init_mtd(void)2361 static int __init init_mtd(void)
2362 {
2363 int ret;
2364
2365 ret = class_register(&mtd_class);
2366 if (ret)
2367 goto err_reg;
2368
2369 mtd_bdi = mtd_bdi_init("mtd");
2370 if (IS_ERR(mtd_bdi)) {
2371 ret = PTR_ERR(mtd_bdi);
2372 goto err_bdi;
2373 }
2374
2375 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2376
2377 ret = init_mtdchar();
2378 if (ret)
2379 goto out_procfs;
2380
2381 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2382 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2383 &mtd_expert_analysis_mode);
2384
2385 return 0;
2386
2387 out_procfs:
2388 if (proc_mtd)
2389 remove_proc_entry("mtd", NULL);
2390 bdi_put(mtd_bdi);
2391 err_bdi:
2392 class_unregister(&mtd_class);
2393 err_reg:
2394 pr_err("Error registering mtd class or bdi: %d\n", ret);
2395 return ret;
2396 }
2397
cleanup_mtd(void)2398 static void __exit cleanup_mtd(void)
2399 {
2400 debugfs_remove_recursive(dfs_dir_mtd);
2401 cleanup_mtdchar();
2402 if (proc_mtd)
2403 remove_proc_entry("mtd", NULL);
2404 class_unregister(&mtd_class);
2405 bdi_unregister(mtd_bdi);
2406 bdi_put(mtd_bdi);
2407 idr_destroy(&mtd_idr);
2408 }
2409
2410 module_init(init_mtd);
2411 module_exit(cleanup_mtd);
2412
2413 MODULE_LICENSE("GPL");
2414 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2415 MODULE_DESCRIPTION("Core MTD registration and access routines");
2416