1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Simple MTD partitioning layer
4  *
5  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
6  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
7  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/kmod.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/partitions.h>
18 #include <linux/err.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 
22 #include "mtdcore.h"
23 
24 /*
25  * MTD methods which simply translate the effective address and pass through
26  * to the _real_ device.
27  */
28 
free_partition(struct mtd_info * mtd)29 static inline void free_partition(struct mtd_info *mtd)
30 {
31 	kfree(mtd->name);
32 	kfree(mtd);
33 }
34 
release_mtd_partition(struct mtd_info * mtd)35 void release_mtd_partition(struct mtd_info *mtd)
36 {
37 	WARN_ON(!list_empty(&mtd->part.node));
38 	free_partition(mtd);
39 }
40 
allocate_partition(struct mtd_info * parent,const struct mtd_partition * part,int partno,uint64_t cur_offset)41 static struct mtd_info *allocate_partition(struct mtd_info *parent,
42 					   const struct mtd_partition *part,
43 					   int partno, uint64_t cur_offset)
44 {
45 	struct mtd_info *master = mtd_get_master(parent);
46 	int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
47 			   master->writesize : master->erasesize;
48 	u64 parent_size = mtd_is_partition(parent) ?
49 			  parent->part.size : parent->size;
50 	struct mtd_info *child;
51 	u32 remainder;
52 	char *name;
53 	u64 tmp;
54 
55 	/* allocate the partition structure */
56 	child = kzalloc(sizeof(*child), GFP_KERNEL);
57 	name = kstrdup(part->name, GFP_KERNEL);
58 	if (!name || !child) {
59 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
60 		       parent->name);
61 		kfree(name);
62 		kfree(child);
63 		return ERR_PTR(-ENOMEM);
64 	}
65 
66 	/* set up the MTD object for this partition */
67 	child->type = parent->type;
68 	child->part.flags = parent->flags & ~part->mask_flags;
69 	child->part.flags |= part->add_flags;
70 	child->flags = child->part.flags;
71 	child->part.size = part->size;
72 	child->writesize = parent->writesize;
73 	child->writebufsize = parent->writebufsize;
74 	child->oobsize = parent->oobsize;
75 	child->oobavail = parent->oobavail;
76 	child->subpage_sft = parent->subpage_sft;
77 
78 	child->name = name;
79 	child->owner = parent->owner;
80 
81 	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
82 	 * concern for showing the same data in multiple partitions.
83 	 * However, it is very useful to have the master node present,
84 	 * so the MTD_PARTITIONED_MASTER option allows that. The master
85 	 * will have device nodes etc only if this is set, so make the
86 	 * parent conditional on that option. Note, this is a way to
87 	 * distinguish between the parent and its partitions in sysfs.
88 	 */
89 	child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
90 			    &parent->dev : parent->dev.parent;
91 	child->dev.of_node = part->of_node;
92 	child->parent = parent;
93 	child->part.offset = part->offset;
94 	INIT_LIST_HEAD(&child->partitions);
95 
96 	if (child->part.offset == MTDPART_OFS_APPEND)
97 		child->part.offset = cur_offset;
98 	if (child->part.offset == MTDPART_OFS_NXTBLK) {
99 		tmp = cur_offset;
100 		child->part.offset = cur_offset;
101 		remainder = do_div(tmp, wr_alignment);
102 		if (remainder) {
103 			child->part.offset += wr_alignment - remainder;
104 			printk(KERN_NOTICE "Moving partition %d: "
105 			       "0x%012llx -> 0x%012llx\n", partno,
106 			       (unsigned long long)cur_offset,
107 			       child->part.offset);
108 		}
109 	}
110 	if (child->part.offset == MTDPART_OFS_RETAIN) {
111 		child->part.offset = cur_offset;
112 		if (parent_size - child->part.offset >= child->part.size) {
113 			child->part.size = parent_size - child->part.offset -
114 					   child->part.size;
115 		} else {
116 			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
117 				part->name, parent_size - child->part.offset,
118 				child->part.size);
119 			/* register to preserve ordering */
120 			goto out_register;
121 		}
122 	}
123 	if (child->part.size == MTDPART_SIZ_FULL)
124 		child->part.size = parent_size - child->part.offset;
125 
126 	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
127 	       child->part.offset, child->part.offset + child->part.size,
128 	       child->name);
129 
130 	/* let's do some sanity checks */
131 	if (child->part.offset >= parent_size) {
132 		/* let's register it anyway to preserve ordering */
133 		child->part.offset = 0;
134 		child->part.size = 0;
135 
136 		/* Initialize ->erasesize to make add_mtd_device() happy. */
137 		child->erasesize = parent->erasesize;
138 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
139 			part->name);
140 		goto out_register;
141 	}
142 	if (child->part.offset + child->part.size > parent->size) {
143 		child->part.size = parent_size - child->part.offset;
144 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
145 			part->name, parent->name, child->part.size);
146 	}
147 
148 	if (parent->numeraseregions > 1) {
149 		/* Deal with variable erase size stuff */
150 		int i, max = parent->numeraseregions;
151 		u64 end = child->part.offset + child->part.size;
152 		struct mtd_erase_region_info *regions = parent->eraseregions;
153 
154 		/* Find the first erase regions which is part of this
155 		 * partition. */
156 		for (i = 0; i < max && regions[i].offset <= child->part.offset;
157 		     i++)
158 			;
159 		/* The loop searched for the region _behind_ the first one */
160 		if (i > 0)
161 			i--;
162 
163 		/* Pick biggest erasesize */
164 		for (; i < max && regions[i].offset < end; i++) {
165 			if (child->erasesize < regions[i].erasesize)
166 				child->erasesize = regions[i].erasesize;
167 		}
168 		BUG_ON(child->erasesize == 0);
169 	} else {
170 		/* Single erase size */
171 		child->erasesize = master->erasesize;
172 	}
173 
174 	/*
175 	 * Child erasesize might differ from the parent one if the parent
176 	 * exposes several regions with different erasesize. Adjust
177 	 * wr_alignment accordingly.
178 	 */
179 	if (!(child->flags & MTD_NO_ERASE))
180 		wr_alignment = child->erasesize;
181 
182 	tmp = mtd_get_master_ofs(child, 0);
183 	remainder = do_div(tmp, wr_alignment);
184 	if ((child->flags & MTD_WRITEABLE) && remainder) {
185 		/* Doesn't start on a boundary of major erase size */
186 		/* FIXME: Let it be writable if it is on a boundary of
187 		 * _minor_ erase size though */
188 		child->flags &= ~MTD_WRITEABLE;
189 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
190 			part->name);
191 	}
192 
193 	tmp = mtd_get_master_ofs(child, 0) + child->part.size;
194 	remainder = do_div(tmp, wr_alignment);
195 	if ((child->flags & MTD_WRITEABLE) && remainder) {
196 		child->flags &= ~MTD_WRITEABLE;
197 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
198 			part->name);
199 	}
200 
201 	child->size = child->part.size;
202 	child->ecc_step_size = parent->ecc_step_size;
203 	child->ecc_strength = parent->ecc_strength;
204 	child->bitflip_threshold = parent->bitflip_threshold;
205 
206 	if (master->_block_isbad) {
207 		uint64_t offs = 0;
208 
209 		while (offs < child->part.size) {
210 			if (mtd_block_isreserved(child, offs))
211 				child->ecc_stats.bbtblocks++;
212 			else if (mtd_block_isbad(child, offs))
213 				child->ecc_stats.badblocks++;
214 			offs += child->erasesize;
215 		}
216 	}
217 
218 out_register:
219 	return child;
220 }
221 
offset_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t offset_show(struct device *dev,
223 			   struct device_attribute *attr, char *buf)
224 {
225 	struct mtd_info *mtd = dev_get_drvdata(dev);
226 
227 	return sysfs_emit(buf, "%lld\n", mtd->part.offset);
228 }
229 static DEVICE_ATTR_RO(offset);	/* mtd partition offset */
230 
231 static const struct attribute *mtd_partition_attrs[] = {
232 	&dev_attr_offset.attr,
233 	NULL
234 };
235 
mtd_add_partition_attrs(struct mtd_info * new)236 static int mtd_add_partition_attrs(struct mtd_info *new)
237 {
238 	int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
239 	if (ret)
240 		printk(KERN_WARNING
241 		       "mtd: failed to create partition attrs, err=%d\n", ret);
242 	return ret;
243 }
244 
mtd_add_partition(struct mtd_info * parent,const char * name,long long offset,long long length)245 int mtd_add_partition(struct mtd_info *parent, const char *name,
246 		      long long offset, long long length)
247 {
248 	struct mtd_info *master = mtd_get_master(parent);
249 	u64 parent_size = mtd_is_partition(parent) ?
250 			  parent->part.size : parent->size;
251 	struct mtd_partition part;
252 	struct mtd_info *child;
253 	int ret = 0;
254 
255 	/* the direct offset is expected */
256 	if (offset == MTDPART_OFS_APPEND ||
257 	    offset == MTDPART_OFS_NXTBLK)
258 		return -EINVAL;
259 
260 	if (length == MTDPART_SIZ_FULL)
261 		length = parent_size - offset;
262 
263 	if (length <= 0)
264 		return -EINVAL;
265 
266 	memset(&part, 0, sizeof(part));
267 	part.name = name;
268 	part.size = length;
269 	part.offset = offset;
270 
271 	child = allocate_partition(parent, &part, -1, offset);
272 	if (IS_ERR(child))
273 		return PTR_ERR(child);
274 
275 	mutex_lock(&master->master.partitions_lock);
276 	list_add_tail(&child->part.node, &parent->partitions);
277 	mutex_unlock(&master->master.partitions_lock);
278 
279 	ret = add_mtd_device(child);
280 	if (ret)
281 		goto err_remove_part;
282 
283 	mtd_add_partition_attrs(child);
284 
285 	return 0;
286 
287 err_remove_part:
288 	mutex_lock(&master->master.partitions_lock);
289 	list_del(&child->part.node);
290 	mutex_unlock(&master->master.partitions_lock);
291 
292 	free_partition(child);
293 
294 	return ret;
295 }
296 EXPORT_SYMBOL_GPL(mtd_add_partition);
297 
298 /**
299  * __mtd_del_partition - delete MTD partition
300  *
301  * @mtd: MTD structure to be deleted
302  *
303  * This function must be called with the partitions mutex locked.
304  */
__mtd_del_partition(struct mtd_info * mtd)305 static int __mtd_del_partition(struct mtd_info *mtd)
306 {
307 	struct mtd_info *child, *next;
308 	int err;
309 
310 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
311 		err = __mtd_del_partition(child);
312 		if (err)
313 			return err;
314 	}
315 
316 	sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
317 
318 	list_del_init(&mtd->part.node);
319 	err = del_mtd_device(mtd);
320 	if (err)
321 		return err;
322 
323 	return 0;
324 }
325 
326 /*
327  * This function unregisters and destroy all slave MTD objects which are
328  * attached to the given MTD object, recursively.
329  */
__del_mtd_partitions(struct mtd_info * mtd)330 static int __del_mtd_partitions(struct mtd_info *mtd)
331 {
332 	struct mtd_info *child, *next;
333 	int ret, err = 0;
334 
335 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
336 		if (mtd_has_partitions(child))
337 			__del_mtd_partitions(child);
338 
339 		pr_info("Deleting %s MTD partition\n", child->name);
340 		list_del_init(&child->part.node);
341 		ret = del_mtd_device(child);
342 		if (ret < 0) {
343 			pr_err("Error when deleting partition \"%s\" (%d)\n",
344 			       child->name, ret);
345 			err = ret;
346 			continue;
347 		}
348 	}
349 
350 	return err;
351 }
352 
del_mtd_partitions(struct mtd_info * mtd)353 int del_mtd_partitions(struct mtd_info *mtd)
354 {
355 	struct mtd_info *master = mtd_get_master(mtd);
356 	int ret;
357 
358 	pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
359 
360 	mutex_lock(&master->master.partitions_lock);
361 	ret = __del_mtd_partitions(mtd);
362 	mutex_unlock(&master->master.partitions_lock);
363 
364 	return ret;
365 }
366 
mtd_del_partition(struct mtd_info * mtd,int partno)367 int mtd_del_partition(struct mtd_info *mtd, int partno)
368 {
369 	struct mtd_info *child, *master = mtd_get_master(mtd);
370 	int ret = -EINVAL;
371 
372 	mutex_lock(&master->master.partitions_lock);
373 	list_for_each_entry(child, &mtd->partitions, part.node) {
374 		if (child->index == partno) {
375 			ret = __mtd_del_partition(child);
376 			break;
377 		}
378 	}
379 	mutex_unlock(&master->master.partitions_lock);
380 
381 	return ret;
382 }
383 EXPORT_SYMBOL_GPL(mtd_del_partition);
384 
385 /*
386  * This function, given a parent MTD object and a partition table, creates
387  * and registers the child MTD objects which are bound to the parent according
388  * to the partition definitions.
389  *
390  * For historical reasons, this function's caller only registers the parent
391  * if the MTD_PARTITIONED_MASTER config option is set.
392  */
393 
add_mtd_partitions(struct mtd_info * parent,const struct mtd_partition * parts,int nbparts)394 int add_mtd_partitions(struct mtd_info *parent,
395 		       const struct mtd_partition *parts,
396 		       int nbparts)
397 {
398 	struct mtd_info *child, *master = mtd_get_master(parent);
399 	uint64_t cur_offset = 0;
400 	int i, ret;
401 
402 	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
403 	       nbparts, parent->name);
404 
405 	for (i = 0; i < nbparts; i++) {
406 		child = allocate_partition(parent, parts + i, i, cur_offset);
407 		if (IS_ERR(child)) {
408 			ret = PTR_ERR(child);
409 			goto err_del_partitions;
410 		}
411 
412 		mutex_lock(&master->master.partitions_lock);
413 		list_add_tail(&child->part.node, &parent->partitions);
414 		mutex_unlock(&master->master.partitions_lock);
415 
416 		ret = add_mtd_device(child);
417 		if (ret) {
418 			mutex_lock(&master->master.partitions_lock);
419 			list_del(&child->part.node);
420 			mutex_unlock(&master->master.partitions_lock);
421 
422 			free_partition(child);
423 			goto err_del_partitions;
424 		}
425 
426 		mtd_add_partition_attrs(child);
427 
428 		/* Look for subpartitions */
429 		parse_mtd_partitions(child, parts[i].types, NULL);
430 
431 		cur_offset = child->part.offset + child->part.size;
432 	}
433 
434 	return 0;
435 
436 err_del_partitions:
437 	del_mtd_partitions(master);
438 
439 	return ret;
440 }
441 
442 static DEFINE_SPINLOCK(part_parser_lock);
443 static LIST_HEAD(part_parsers);
444 
mtd_part_parser_get(const char * name)445 static struct mtd_part_parser *mtd_part_parser_get(const char *name)
446 {
447 	struct mtd_part_parser *p, *ret = NULL;
448 
449 	spin_lock(&part_parser_lock);
450 
451 	list_for_each_entry(p, &part_parsers, list)
452 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
453 			ret = p;
454 			break;
455 		}
456 
457 	spin_unlock(&part_parser_lock);
458 
459 	return ret;
460 }
461 
mtd_part_parser_put(const struct mtd_part_parser * p)462 static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
463 {
464 	module_put(p->owner);
465 }
466 
467 /*
468  * Many partition parsers just expected the core to kfree() all their data in
469  * one chunk. Do that by default.
470  */
mtd_part_parser_cleanup_default(const struct mtd_partition * pparts,int nr_parts)471 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
472 					    int nr_parts)
473 {
474 	kfree(pparts);
475 }
476 
__register_mtd_parser(struct mtd_part_parser * p,struct module * owner)477 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
478 {
479 	p->owner = owner;
480 
481 	if (!p->cleanup)
482 		p->cleanup = &mtd_part_parser_cleanup_default;
483 
484 	spin_lock(&part_parser_lock);
485 	list_add(&p->list, &part_parsers);
486 	spin_unlock(&part_parser_lock);
487 
488 	return 0;
489 }
490 EXPORT_SYMBOL_GPL(__register_mtd_parser);
491 
deregister_mtd_parser(struct mtd_part_parser * p)492 void deregister_mtd_parser(struct mtd_part_parser *p)
493 {
494 	spin_lock(&part_parser_lock);
495 	list_del(&p->list);
496 	spin_unlock(&part_parser_lock);
497 }
498 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
499 
500 /*
501  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
502  * are changing this array!
503  */
504 static const char * const default_mtd_part_types[] = {
505 	"cmdlinepart",
506 	"ofpart",
507 	NULL
508 };
509 
510 /* Check DT only when looking for subpartitions. */
511 static const char * const default_subpartition_types[] = {
512 	"ofpart",
513 	NULL
514 };
515 
mtd_part_do_parse(struct mtd_part_parser * parser,struct mtd_info * master,struct mtd_partitions * pparts,struct mtd_part_parser_data * data)516 static int mtd_part_do_parse(struct mtd_part_parser *parser,
517 			     struct mtd_info *master,
518 			     struct mtd_partitions *pparts,
519 			     struct mtd_part_parser_data *data)
520 {
521 	int ret;
522 
523 	ret = (*parser->parse_fn)(master, &pparts->parts, data);
524 	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
525 	if (ret <= 0)
526 		return ret;
527 
528 	pr_notice("%d %s partitions found on MTD device %s\n", ret,
529 		  parser->name, master->name);
530 
531 	pparts->nr_parts = ret;
532 	pparts->parser = parser;
533 
534 	return ret;
535 }
536 
537 /**
538  * mtd_part_get_compatible_parser - find MTD parser by a compatible string
539  *
540  * @compat: compatible string describing partitions in a device tree
541  *
542  * MTD parsers can specify supported partitions by providing a table of
543  * compatibility strings. This function finds a parser that advertises support
544  * for a passed value of "compatible".
545  */
mtd_part_get_compatible_parser(const char * compat)546 static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
547 {
548 	struct mtd_part_parser *p, *ret = NULL;
549 
550 	spin_lock(&part_parser_lock);
551 
552 	list_for_each_entry(p, &part_parsers, list) {
553 		const struct of_device_id *matches;
554 
555 		matches = p->of_match_table;
556 		if (!matches)
557 			continue;
558 
559 		for (; matches->compatible[0]; matches++) {
560 			if (!strcmp(matches->compatible, compat) &&
561 			    try_module_get(p->owner)) {
562 				ret = p;
563 				break;
564 			}
565 		}
566 
567 		if (ret)
568 			break;
569 	}
570 
571 	spin_unlock(&part_parser_lock);
572 
573 	return ret;
574 }
575 
mtd_part_of_parse(struct mtd_info * master,struct mtd_partitions * pparts)576 static int mtd_part_of_parse(struct mtd_info *master,
577 			     struct mtd_partitions *pparts)
578 {
579 	struct mtd_part_parser *parser;
580 	struct device_node *np;
581 	struct device_node *child;
582 	struct property *prop;
583 	struct device *dev;
584 	const char *compat;
585 	const char *fixed = "fixed-partitions";
586 	int ret, err = 0;
587 
588 	dev = &master->dev;
589 	/* Use parent device (controller) if the top level MTD is not registered */
590 	if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
591 		dev = master->dev.parent;
592 
593 	np = mtd_get_of_node(master);
594 	if (mtd_is_partition(master))
595 		of_node_get(np);
596 	else
597 		np = of_get_child_by_name(np, "partitions");
598 
599 	/*
600 	 * Don't create devices that are added to a bus but will never get
601 	 * probed. That'll cause fw_devlink to block probing of consumers of
602 	 * this partition until the partition device is probed.
603 	 */
604 	for_each_child_of_node(np, child)
605 		if (of_device_is_compatible(child, "nvmem-cells"))
606 			of_node_set_flag(child, OF_POPULATED);
607 
608 	of_property_for_each_string(np, "compatible", prop, compat) {
609 		parser = mtd_part_get_compatible_parser(compat);
610 		if (!parser)
611 			continue;
612 		ret = mtd_part_do_parse(parser, master, pparts, NULL);
613 		if (ret > 0) {
614 			of_platform_populate(np, NULL, NULL, dev);
615 			of_node_put(np);
616 			return ret;
617 		}
618 		mtd_part_parser_put(parser);
619 		if (ret < 0 && !err)
620 			err = ret;
621 	}
622 	of_platform_populate(np, NULL, NULL, dev);
623 	of_node_put(np);
624 
625 	/*
626 	 * For backward compatibility we have to try the "fixed-partitions"
627 	 * parser. It supports old DT format with partitions specified as a
628 	 * direct subnodes of a flash device DT node without any compatibility
629 	 * specified we could match.
630 	 */
631 	parser = mtd_part_parser_get(fixed);
632 	if (!parser && !request_module("%s", fixed))
633 		parser = mtd_part_parser_get(fixed);
634 	if (parser) {
635 		ret = mtd_part_do_parse(parser, master, pparts, NULL);
636 		if (ret > 0)
637 			return ret;
638 		mtd_part_parser_put(parser);
639 		if (ret < 0 && !err)
640 			err = ret;
641 	}
642 
643 	return err;
644 }
645 
646 /**
647  * parse_mtd_partitions - parse and register MTD partitions
648  *
649  * @master: the master partition (describes whole MTD device)
650  * @types: names of partition parsers to try or %NULL
651  * @data: MTD partition parser-specific data
652  *
653  * This function tries to find & register partitions on MTD device @master. It
654  * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
655  * then the default list of parsers is used. The default list contains only the
656  * "cmdlinepart" and "ofpart" parsers ATM.
657  * Note: If there are more then one parser in @types, the kernel only takes the
658  * partitions parsed out by the first parser.
659  *
660  * This function may return:
661  * o a negative error code in case of failure
662  * o number of found partitions otherwise
663  */
parse_mtd_partitions(struct mtd_info * master,const char * const * types,struct mtd_part_parser_data * data)664 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
665 			 struct mtd_part_parser_data *data)
666 {
667 	struct mtd_partitions pparts = { };
668 	struct mtd_part_parser *parser;
669 	int ret, err = 0;
670 
671 	if (!types)
672 		types = mtd_is_partition(master) ? default_subpartition_types :
673 			default_mtd_part_types;
674 
675 	for ( ; *types; types++) {
676 		/*
677 		 * ofpart is a special type that means OF partitioning info
678 		 * should be used. It requires a bit different logic so it is
679 		 * handled in a separated function.
680 		 */
681 		if (!strcmp(*types, "ofpart")) {
682 			ret = mtd_part_of_parse(master, &pparts);
683 		} else {
684 			pr_debug("%s: parsing partitions %s\n", master->name,
685 				 *types);
686 			parser = mtd_part_parser_get(*types);
687 			if (!parser && !request_module("%s", *types))
688 				parser = mtd_part_parser_get(*types);
689 			pr_debug("%s: got parser %s\n", master->name,
690 				parser ? parser->name : NULL);
691 			if (!parser)
692 				continue;
693 			ret = mtd_part_do_parse(parser, master, &pparts, data);
694 			if (ret <= 0)
695 				mtd_part_parser_put(parser);
696 		}
697 		/* Found partitions! */
698 		if (ret > 0) {
699 			err = add_mtd_partitions(master, pparts.parts,
700 						 pparts.nr_parts);
701 			mtd_part_parser_cleanup(&pparts);
702 			return err ? err : pparts.nr_parts;
703 		}
704 		/*
705 		 * Stash the first error we see; only report it if no parser
706 		 * succeeds
707 		 */
708 		if (ret < 0 && !err)
709 			err = ret;
710 	}
711 	return err;
712 }
713 
mtd_part_parser_cleanup(struct mtd_partitions * parts)714 void mtd_part_parser_cleanup(struct mtd_partitions *parts)
715 {
716 	const struct mtd_part_parser *parser;
717 
718 	if (!parts)
719 		return;
720 
721 	parser = parts->parser;
722 	if (parser) {
723 		if (parser->cleanup)
724 			parser->cleanup(parts->parts, parts->nr_parts);
725 
726 		mtd_part_parser_put(parser);
727 	}
728 }
729 
730 /* Returns the size of the entire flash chip */
mtd_get_device_size(const struct mtd_info * mtd)731 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
732 {
733 	struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
734 
735 	return master->size;
736 }
737 EXPORT_SYMBOL_GPL(mtd_get_device_size);
738