1 /*
2  * MTD device concatenation layer
3  *
4  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * NAND support by Christian Gan <cgan@iders.ca>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  *
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
34 
35 #include <asm/div64.h>
36 
37 /*
38  * Our storage structure:
39  * Subdev points to an array of pointers to struct mtd_info objects
40  * which is allocated along with this structure
41  *
42  */
43 struct mtd_concat {
44 	struct mtd_info mtd;
45 	int num_subdev;
46 	struct mtd_info **subdev;
47 };
48 
49 /*
50  * how to calculate the size required for the above structure,
51  * including the pointer array subdev points to:
52  */
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\
54 	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55 
56 /*
57  * Given a pointer to the MTD object in the mtd_concat structure,
58  * we can retrieve the pointer to that structure with this macro.
59  */
60 #define CONCAT(x)  ((struct mtd_concat *)(x))
61 
62 /*
63  * MTD methods which look up the relevant subdevice, translate the
64  * effective address and pass through to the subdevice.
65  */
66 
67 static int
concat_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69 	    size_t * retlen, u_char * buf)
70 {
71 	struct mtd_concat *concat = CONCAT(mtd);
72 	int ret = 0, err;
73 	int i;
74 
75 	*retlen = 0;
76 
77 	for (i = 0; i < concat->num_subdev; i++) {
78 		struct mtd_info *subdev = concat->subdev[i];
79 		size_t size, retsize;
80 
81 		if (from >= subdev->size) {
82 			/* Not destined for this subdev */
83 			size = 0;
84 			from -= subdev->size;
85 			continue;
86 		}
87 		if (from + len > subdev->size)
88 			/* First part goes into this subdev */
89 			size = subdev->size - from;
90 		else
91 			/* Entire transaction goes into this subdev */
92 			size = len;
93 
94 		err = subdev->read(subdev, from, size, &retsize, buf);
95 
96 		/* Save information about bitflips! */
97 		if (unlikely(err)) {
98 			if (err == -EBADMSG) {
99 				mtd->ecc_stats.failed++;
100 				ret = err;
101 			} else if (err == -EUCLEAN) {
102 				mtd->ecc_stats.corrected++;
103 				/* Do not overwrite -EBADMSG !! */
104 				if (!ret)
105 					ret = err;
106 			} else
107 				return err;
108 		}
109 
110 		*retlen += retsize;
111 		len -= size;
112 		if (len == 0)
113 			return ret;
114 
115 		buf += size;
116 		from = 0;
117 	}
118 	return -EINVAL;
119 }
120 
121 static int
concat_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)122 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
123 	     size_t * retlen, const u_char * buf)
124 {
125 	struct mtd_concat *concat = CONCAT(mtd);
126 	int err = -EINVAL;
127 	int i;
128 
129 	if (!(mtd->flags & MTD_WRITEABLE))
130 		return -EROFS;
131 
132 	*retlen = 0;
133 
134 	for (i = 0; i < concat->num_subdev; i++) {
135 		struct mtd_info *subdev = concat->subdev[i];
136 		size_t size, retsize;
137 
138 		if (to >= subdev->size) {
139 			size = 0;
140 			to -= subdev->size;
141 			continue;
142 		}
143 		if (to + len > subdev->size)
144 			size = subdev->size - to;
145 		else
146 			size = len;
147 
148 		if (!(subdev->flags & MTD_WRITEABLE))
149 			err = -EROFS;
150 		else
151 			err = subdev->write(subdev, to, size, &retsize, buf);
152 
153 		if (err)
154 			break;
155 
156 		*retlen += retsize;
157 		len -= size;
158 		if (len == 0)
159 			break;
160 
161 		err = -EINVAL;
162 		buf += size;
163 		to = 0;
164 	}
165 	return err;
166 }
167 
168 static int
concat_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)169 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
170 		unsigned long count, loff_t to, size_t * retlen)
171 {
172 	struct mtd_concat *concat = CONCAT(mtd);
173 	struct kvec *vecs_copy;
174 	unsigned long entry_low, entry_high;
175 	size_t total_len = 0;
176 	int i;
177 	int err = -EINVAL;
178 
179 	if (!(mtd->flags & MTD_WRITEABLE))
180 		return -EROFS;
181 
182 	*retlen = 0;
183 
184 	/* Calculate total length of data */
185 	for (i = 0; i < count; i++)
186 		total_len += vecs[i].iov_len;
187 
188 	/* Do not allow write past end of device */
189 	if ((to + total_len) > mtd->size)
190 		return -EINVAL;
191 
192 	/* Check alignment */
193 	if (mtd->writesize > 1) {
194 		uint64_t __to = to;
195 		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
196 			return -EINVAL;
197 	}
198 
199 	/* make a copy of vecs */
200 	vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
201 	if (!vecs_copy)
202 		return -ENOMEM;
203 
204 	entry_low = 0;
205 	for (i = 0; i < concat->num_subdev; i++) {
206 		struct mtd_info *subdev = concat->subdev[i];
207 		size_t size, wsize, retsize, old_iov_len;
208 
209 		if (to >= subdev->size) {
210 			to -= subdev->size;
211 			continue;
212 		}
213 
214 		size = min_t(uint64_t, total_len, subdev->size - to);
215 		wsize = size; /* store for future use */
216 
217 		entry_high = entry_low;
218 		while (entry_high < count) {
219 			if (size <= vecs_copy[entry_high].iov_len)
220 				break;
221 			size -= vecs_copy[entry_high++].iov_len;
222 		}
223 
224 		old_iov_len = vecs_copy[entry_high].iov_len;
225 		vecs_copy[entry_high].iov_len = size;
226 
227 		if (!(subdev->flags & MTD_WRITEABLE))
228 			err = -EROFS;
229 		else
230 			err = subdev->writev(subdev, &vecs_copy[entry_low],
231 				entry_high - entry_low + 1, to, &retsize);
232 
233 		vecs_copy[entry_high].iov_len = old_iov_len - size;
234 		vecs_copy[entry_high].iov_base += size;
235 
236 		entry_low = entry_high;
237 
238 		if (err)
239 			break;
240 
241 		*retlen += retsize;
242 		total_len -= wsize;
243 
244 		if (total_len == 0)
245 			break;
246 
247 		err = -EINVAL;
248 		to = 0;
249 	}
250 
251 	kfree(vecs_copy);
252 	return err;
253 }
254 
255 static int
concat_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)256 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
257 {
258 	struct mtd_concat *concat = CONCAT(mtd);
259 	struct mtd_oob_ops devops = *ops;
260 	int i, err, ret = 0;
261 
262 	ops->retlen = ops->oobretlen = 0;
263 
264 	for (i = 0; i < concat->num_subdev; i++) {
265 		struct mtd_info *subdev = concat->subdev[i];
266 
267 		if (from >= subdev->size) {
268 			from -= subdev->size;
269 			continue;
270 		}
271 
272 		/* partial read ? */
273 		if (from + devops.len > subdev->size)
274 			devops.len = subdev->size - from;
275 
276 		err = subdev->read_oob(subdev, from, &devops);
277 		ops->retlen += devops.retlen;
278 		ops->oobretlen += devops.oobretlen;
279 
280 		/* Save information about bitflips! */
281 		if (unlikely(err)) {
282 			if (err == -EBADMSG) {
283 				mtd->ecc_stats.failed++;
284 				ret = err;
285 			} else if (err == -EUCLEAN) {
286 				mtd->ecc_stats.corrected++;
287 				/* Do not overwrite -EBADMSG !! */
288 				if (!ret)
289 					ret = err;
290 			} else
291 				return err;
292 		}
293 
294 		if (devops.datbuf) {
295 			devops.len = ops->len - ops->retlen;
296 			if (!devops.len)
297 				return ret;
298 			devops.datbuf += devops.retlen;
299 		}
300 		if (devops.oobbuf) {
301 			devops.ooblen = ops->ooblen - ops->oobretlen;
302 			if (!devops.ooblen)
303 				return ret;
304 			devops.oobbuf += ops->oobretlen;
305 		}
306 
307 		from = 0;
308 	}
309 	return -EINVAL;
310 }
311 
312 static int
concat_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)313 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
314 {
315 	struct mtd_concat *concat = CONCAT(mtd);
316 	struct mtd_oob_ops devops = *ops;
317 	int i, err;
318 
319 	if (!(mtd->flags & MTD_WRITEABLE))
320 		return -EROFS;
321 
322 	ops->retlen = 0;
323 
324 	for (i = 0; i < concat->num_subdev; i++) {
325 		struct mtd_info *subdev = concat->subdev[i];
326 
327 		if (to >= subdev->size) {
328 			to -= subdev->size;
329 			continue;
330 		}
331 
332 		/* partial write ? */
333 		if (to + devops.len > subdev->size)
334 			devops.len = subdev->size - to;
335 
336 		err = subdev->write_oob(subdev, to, &devops);
337 		ops->retlen += devops.retlen;
338 		if (err)
339 			return err;
340 
341 		if (devops.datbuf) {
342 			devops.len = ops->len - ops->retlen;
343 			if (!devops.len)
344 				return 0;
345 			devops.datbuf += devops.retlen;
346 		}
347 		if (devops.oobbuf) {
348 			devops.ooblen = ops->ooblen - ops->oobretlen;
349 			if (!devops.ooblen)
350 				return 0;
351 			devops.oobbuf += devops.oobretlen;
352 		}
353 		to = 0;
354 	}
355 	return -EINVAL;
356 }
357 
concat_erase_callback(struct erase_info * instr)358 static void concat_erase_callback(struct erase_info *instr)
359 {
360 	wake_up((wait_queue_head_t *) instr->priv);
361 }
362 
concat_dev_erase(struct mtd_info * mtd,struct erase_info * erase)363 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
364 {
365 	int err;
366 	wait_queue_head_t waitq;
367 	DECLARE_WAITQUEUE(wait, current);
368 
369 	/*
370 	 * This code was stol^H^H^H^Hinspired by mtdchar.c
371 	 */
372 	init_waitqueue_head(&waitq);
373 
374 	erase->mtd = mtd;
375 	erase->callback = concat_erase_callback;
376 	erase->priv = (unsigned long) &waitq;
377 
378 	/*
379 	 * FIXME: Allow INTERRUPTIBLE. Which means
380 	 * not having the wait_queue head on the stack.
381 	 */
382 	err = mtd->erase(mtd, erase);
383 	if (!err) {
384 		set_current_state(TASK_UNINTERRUPTIBLE);
385 		add_wait_queue(&waitq, &wait);
386 		if (erase->state != MTD_ERASE_DONE
387 		    && erase->state != MTD_ERASE_FAILED)
388 			schedule();
389 		remove_wait_queue(&waitq, &wait);
390 		set_current_state(TASK_RUNNING);
391 
392 		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
393 	}
394 	return err;
395 }
396 
concat_erase(struct mtd_info * mtd,struct erase_info * instr)397 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
398 {
399 	struct mtd_concat *concat = CONCAT(mtd);
400 	struct mtd_info *subdev;
401 	int i, err;
402 	uint64_t length, offset = 0;
403 	struct erase_info *erase;
404 
405 	if (!(mtd->flags & MTD_WRITEABLE))
406 		return -EROFS;
407 
408 	if (instr->addr > concat->mtd.size)
409 		return -EINVAL;
410 
411 	if (instr->len + instr->addr > concat->mtd.size)
412 		return -EINVAL;
413 
414 	/*
415 	 * Check for proper erase block alignment of the to-be-erased area.
416 	 * It is easier to do this based on the super device's erase
417 	 * region info rather than looking at each particular sub-device
418 	 * in turn.
419 	 */
420 	if (!concat->mtd.numeraseregions) {
421 		/* the easy case: device has uniform erase block size */
422 		if (instr->addr & (concat->mtd.erasesize - 1))
423 			return -EINVAL;
424 		if (instr->len & (concat->mtd.erasesize - 1))
425 			return -EINVAL;
426 	} else {
427 		/* device has variable erase size */
428 		struct mtd_erase_region_info *erase_regions =
429 		    concat->mtd.eraseregions;
430 
431 		/*
432 		 * Find the erase region where the to-be-erased area begins:
433 		 */
434 		for (i = 0; i < concat->mtd.numeraseregions &&
435 		     instr->addr >= erase_regions[i].offset; i++) ;
436 		--i;
437 
438 		/*
439 		 * Now erase_regions[i] is the region in which the
440 		 * to-be-erased area begins. Verify that the starting
441 		 * offset is aligned to this region's erase size:
442 		 */
443 		if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
444 			return -EINVAL;
445 
446 		/*
447 		 * now find the erase region where the to-be-erased area ends:
448 		 */
449 		for (; i < concat->mtd.numeraseregions &&
450 		     (instr->addr + instr->len) >= erase_regions[i].offset;
451 		     ++i) ;
452 		--i;
453 		/*
454 		 * check if the ending offset is aligned to this region's erase size
455 		 */
456 		if (i < 0 || ((instr->addr + instr->len) &
457 					(erase_regions[i].erasesize - 1)))
458 			return -EINVAL;
459 	}
460 
461 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
462 
463 	/* make a local copy of instr to avoid modifying the caller's struct */
464 	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
465 
466 	if (!erase)
467 		return -ENOMEM;
468 
469 	*erase = *instr;
470 	length = instr->len;
471 
472 	/*
473 	 * find the subdevice where the to-be-erased area begins, adjust
474 	 * starting offset to be relative to the subdevice start
475 	 */
476 	for (i = 0; i < concat->num_subdev; i++) {
477 		subdev = concat->subdev[i];
478 		if (subdev->size <= erase->addr) {
479 			erase->addr -= subdev->size;
480 			offset += subdev->size;
481 		} else {
482 			break;
483 		}
484 	}
485 
486 	/* must never happen since size limit has been verified above */
487 	BUG_ON(i >= concat->num_subdev);
488 
489 	/* now do the erase: */
490 	err = 0;
491 	for (; length > 0; i++) {
492 		/* loop for all subdevices affected by this request */
493 		subdev = concat->subdev[i];	/* get current subdevice */
494 
495 		/* limit length to subdevice's size: */
496 		if (erase->addr + length > subdev->size)
497 			erase->len = subdev->size - erase->addr;
498 		else
499 			erase->len = length;
500 
501 		if (!(subdev->flags & MTD_WRITEABLE)) {
502 			err = -EROFS;
503 			break;
504 		}
505 		length -= erase->len;
506 		if ((err = concat_dev_erase(subdev, erase))) {
507 			/* sanity check: should never happen since
508 			 * block alignment has been checked above */
509 			BUG_ON(err == -EINVAL);
510 			if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
511 				instr->fail_addr = erase->fail_addr + offset;
512 			break;
513 		}
514 		/*
515 		 * erase->addr specifies the offset of the area to be
516 		 * erased *within the current subdevice*. It can be
517 		 * non-zero only the first time through this loop, i.e.
518 		 * for the first subdevice where blocks need to be erased.
519 		 * All the following erases must begin at the start of the
520 		 * current subdevice, i.e. at offset zero.
521 		 */
522 		erase->addr = 0;
523 		offset += subdev->size;
524 	}
525 	instr->state = erase->state;
526 	kfree(erase);
527 	if (err)
528 		return err;
529 
530 	if (instr->callback)
531 		instr->callback(instr);
532 	return 0;
533 }
534 
concat_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)535 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
536 {
537 	struct mtd_concat *concat = CONCAT(mtd);
538 	int i, err = -EINVAL;
539 
540 	if ((len + ofs) > mtd->size)
541 		return -EINVAL;
542 
543 	for (i = 0; i < concat->num_subdev; i++) {
544 		struct mtd_info *subdev = concat->subdev[i];
545 		uint64_t size;
546 
547 		if (ofs >= subdev->size) {
548 			size = 0;
549 			ofs -= subdev->size;
550 			continue;
551 		}
552 		if (ofs + len > subdev->size)
553 			size = subdev->size - ofs;
554 		else
555 			size = len;
556 
557 		if (subdev->lock) {
558 			err = subdev->lock(subdev, ofs, size);
559 			if (err)
560 				break;
561 		} else
562 			err = -EOPNOTSUPP;
563 
564 		len -= size;
565 		if (len == 0)
566 			break;
567 
568 		err = -EINVAL;
569 		ofs = 0;
570 	}
571 
572 	return err;
573 }
574 
concat_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)575 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
576 {
577 	struct mtd_concat *concat = CONCAT(mtd);
578 	int i, err = 0;
579 
580 	if ((len + ofs) > mtd->size)
581 		return -EINVAL;
582 
583 	for (i = 0; i < concat->num_subdev; i++) {
584 		struct mtd_info *subdev = concat->subdev[i];
585 		uint64_t size;
586 
587 		if (ofs >= subdev->size) {
588 			size = 0;
589 			ofs -= subdev->size;
590 			continue;
591 		}
592 		if (ofs + len > subdev->size)
593 			size = subdev->size - ofs;
594 		else
595 			size = len;
596 
597 		if (subdev->unlock) {
598 			err = subdev->unlock(subdev, ofs, size);
599 			if (err)
600 				break;
601 		} else
602 			err = -EOPNOTSUPP;
603 
604 		len -= size;
605 		if (len == 0)
606 			break;
607 
608 		err = -EINVAL;
609 		ofs = 0;
610 	}
611 
612 	return err;
613 }
614 
concat_sync(struct mtd_info * mtd)615 static void concat_sync(struct mtd_info *mtd)
616 {
617 	struct mtd_concat *concat = CONCAT(mtd);
618 	int i;
619 
620 	for (i = 0; i < concat->num_subdev; i++) {
621 		struct mtd_info *subdev = concat->subdev[i];
622 		subdev->sync(subdev);
623 	}
624 }
625 
concat_suspend(struct mtd_info * mtd)626 static int concat_suspend(struct mtd_info *mtd)
627 {
628 	struct mtd_concat *concat = CONCAT(mtd);
629 	int i, rc = 0;
630 
631 	for (i = 0; i < concat->num_subdev; i++) {
632 		struct mtd_info *subdev = concat->subdev[i];
633 		if ((rc = subdev->suspend(subdev)) < 0)
634 			return rc;
635 	}
636 	return rc;
637 }
638 
concat_resume(struct mtd_info * mtd)639 static void concat_resume(struct mtd_info *mtd)
640 {
641 	struct mtd_concat *concat = CONCAT(mtd);
642 	int i;
643 
644 	for (i = 0; i < concat->num_subdev; i++) {
645 		struct mtd_info *subdev = concat->subdev[i];
646 		subdev->resume(subdev);
647 	}
648 }
649 
concat_block_isbad(struct mtd_info * mtd,loff_t ofs)650 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
651 {
652 	struct mtd_concat *concat = CONCAT(mtd);
653 	int i, res = 0;
654 
655 	if (!concat->subdev[0]->block_isbad)
656 		return res;
657 
658 	if (ofs > mtd->size)
659 		return -EINVAL;
660 
661 	for (i = 0; i < concat->num_subdev; i++) {
662 		struct mtd_info *subdev = concat->subdev[i];
663 
664 		if (ofs >= subdev->size) {
665 			ofs -= subdev->size;
666 			continue;
667 		}
668 
669 		res = subdev->block_isbad(subdev, ofs);
670 		break;
671 	}
672 
673 	return res;
674 }
675 
concat_block_markbad(struct mtd_info * mtd,loff_t ofs)676 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
677 {
678 	struct mtd_concat *concat = CONCAT(mtd);
679 	int i, err = -EINVAL;
680 
681 	if (!concat->subdev[0]->block_markbad)
682 		return 0;
683 
684 	if (ofs > mtd->size)
685 		return -EINVAL;
686 
687 	for (i = 0; i < concat->num_subdev; i++) {
688 		struct mtd_info *subdev = concat->subdev[i];
689 
690 		if (ofs >= subdev->size) {
691 			ofs -= subdev->size;
692 			continue;
693 		}
694 
695 		err = subdev->block_markbad(subdev, ofs);
696 		if (!err)
697 			mtd->ecc_stats.badblocks++;
698 		break;
699 	}
700 
701 	return err;
702 }
703 
704 /*
705  * try to support NOMMU mmaps on concatenated devices
706  * - we don't support subdev spanning as we can't guarantee it'll work
707  */
concat_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)708 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
709 					      unsigned long len,
710 					      unsigned long offset,
711 					      unsigned long flags)
712 {
713 	struct mtd_concat *concat = CONCAT(mtd);
714 	int i;
715 
716 	for (i = 0; i < concat->num_subdev; i++) {
717 		struct mtd_info *subdev = concat->subdev[i];
718 
719 		if (offset >= subdev->size) {
720 			offset -= subdev->size;
721 			continue;
722 		}
723 
724 		/* we've found the subdev over which the mapping will reside */
725 		if (offset + len > subdev->size)
726 			return (unsigned long) -EINVAL;
727 
728 		if (subdev->get_unmapped_area)
729 			return subdev->get_unmapped_area(subdev, len, offset,
730 							 flags);
731 
732 		break;
733 	}
734 
735 	return (unsigned long) -ENOSYS;
736 }
737 
738 /*
739  * This function constructs a virtual MTD device by concatenating
740  * num_devs MTD devices. A pointer to the new device object is
741  * stored to *new_dev upon success. This function does _not_
742  * register any devices: this is the caller's responsibility.
743  */
mtd_concat_create(struct mtd_info * subdev[],int num_devs,const char * name)744 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
745 				   int num_devs,	/* number of subdevices      */
746 				   const char *name)
747 {				/* name for the new device   */
748 	int i;
749 	size_t size;
750 	struct mtd_concat *concat;
751 	uint32_t max_erasesize, curr_erasesize;
752 	int num_erase_region;
753 	int max_writebufsize = 0;
754 
755 	printk(KERN_NOTICE "Concatenating MTD devices:\n");
756 	for (i = 0; i < num_devs; i++)
757 		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
758 	printk(KERN_NOTICE "into device \"%s\"\n", name);
759 
760 	/* allocate the device structure */
761 	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
762 	concat = kzalloc(size, GFP_KERNEL);
763 	if (!concat) {
764 		printk
765 		    ("memory allocation error while creating concatenated device \"%s\"\n",
766 		     name);
767 		return NULL;
768 	}
769 	concat->subdev = (struct mtd_info **) (concat + 1);
770 
771 	/*
772 	 * Set up the new "super" device's MTD object structure, check for
773 	 * incompatibilites between the subdevices.
774 	 */
775 	concat->mtd.type = subdev[0]->type;
776 	concat->mtd.flags = subdev[0]->flags;
777 	concat->mtd.size = subdev[0]->size;
778 	concat->mtd.erasesize = subdev[0]->erasesize;
779 	concat->mtd.writesize = subdev[0]->writesize;
780 
781 	for (i = 0; i < num_devs; i++)
782 		if (max_writebufsize < subdev[i]->writebufsize)
783 			max_writebufsize = subdev[i]->writebufsize;
784 	concat->mtd.writebufsize = max_writebufsize;
785 
786 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
787 	concat->mtd.oobsize = subdev[0]->oobsize;
788 	concat->mtd.oobavail = subdev[0]->oobavail;
789 	if (subdev[0]->writev)
790 		concat->mtd.writev = concat_writev;
791 	if (subdev[0]->read_oob)
792 		concat->mtd.read_oob = concat_read_oob;
793 	if (subdev[0]->write_oob)
794 		concat->mtd.write_oob = concat_write_oob;
795 	if (subdev[0]->block_isbad)
796 		concat->mtd.block_isbad = concat_block_isbad;
797 	if (subdev[0]->block_markbad)
798 		concat->mtd.block_markbad = concat_block_markbad;
799 
800 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
801 
802 	concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
803 
804 	concat->subdev[0] = subdev[0];
805 
806 	for (i = 1; i < num_devs; i++) {
807 		if (concat->mtd.type != subdev[i]->type) {
808 			kfree(concat);
809 			printk("Incompatible device type on \"%s\"\n",
810 			       subdev[i]->name);
811 			return NULL;
812 		}
813 		if (concat->mtd.flags != subdev[i]->flags) {
814 			/*
815 			 * Expect all flags except MTD_WRITEABLE to be
816 			 * equal on all subdevices.
817 			 */
818 			if ((concat->mtd.flags ^ subdev[i]->
819 			     flags) & ~MTD_WRITEABLE) {
820 				kfree(concat);
821 				printk("Incompatible device flags on \"%s\"\n",
822 				       subdev[i]->name);
823 				return NULL;
824 			} else
825 				/* if writeable attribute differs,
826 				   make super device writeable */
827 				concat->mtd.flags |=
828 				    subdev[i]->flags & MTD_WRITEABLE;
829 		}
830 
831 		/* only permit direct mapping if the BDIs are all the same
832 		 * - copy-mapping is still permitted
833 		 */
834 		if (concat->mtd.backing_dev_info !=
835 		    subdev[i]->backing_dev_info)
836 			concat->mtd.backing_dev_info =
837 				&default_backing_dev_info;
838 
839 		concat->mtd.size += subdev[i]->size;
840 		concat->mtd.ecc_stats.badblocks +=
841 			subdev[i]->ecc_stats.badblocks;
842 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
843 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
844 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
845 		    !concat->mtd.read_oob  != !subdev[i]->read_oob ||
846 		    !concat->mtd.write_oob != !subdev[i]->write_oob) {
847 			kfree(concat);
848 			printk("Incompatible OOB or ECC data on \"%s\"\n",
849 			       subdev[i]->name);
850 			return NULL;
851 		}
852 		concat->subdev[i] = subdev[i];
853 
854 	}
855 
856 	concat->mtd.ecclayout = subdev[0]->ecclayout;
857 
858 	concat->num_subdev = num_devs;
859 	concat->mtd.name = name;
860 
861 	concat->mtd.erase = concat_erase;
862 	concat->mtd.read = concat_read;
863 	concat->mtd.write = concat_write;
864 	concat->mtd.sync = concat_sync;
865 	concat->mtd.lock = concat_lock;
866 	concat->mtd.unlock = concat_unlock;
867 	concat->mtd.suspend = concat_suspend;
868 	concat->mtd.resume = concat_resume;
869 	concat->mtd.get_unmapped_area = concat_get_unmapped_area;
870 
871 	/*
872 	 * Combine the erase block size info of the subdevices:
873 	 *
874 	 * first, walk the map of the new device and see how
875 	 * many changes in erase size we have
876 	 */
877 	max_erasesize = curr_erasesize = subdev[0]->erasesize;
878 	num_erase_region = 1;
879 	for (i = 0; i < num_devs; i++) {
880 		if (subdev[i]->numeraseregions == 0) {
881 			/* current subdevice has uniform erase size */
882 			if (subdev[i]->erasesize != curr_erasesize) {
883 				/* if it differs from the last subdevice's erase size, count it */
884 				++num_erase_region;
885 				curr_erasesize = subdev[i]->erasesize;
886 				if (curr_erasesize > max_erasesize)
887 					max_erasesize = curr_erasesize;
888 			}
889 		} else {
890 			/* current subdevice has variable erase size */
891 			int j;
892 			for (j = 0; j < subdev[i]->numeraseregions; j++) {
893 
894 				/* walk the list of erase regions, count any changes */
895 				if (subdev[i]->eraseregions[j].erasesize !=
896 				    curr_erasesize) {
897 					++num_erase_region;
898 					curr_erasesize =
899 					    subdev[i]->eraseregions[j].
900 					    erasesize;
901 					if (curr_erasesize > max_erasesize)
902 						max_erasesize = curr_erasesize;
903 				}
904 			}
905 		}
906 	}
907 
908 	if (num_erase_region == 1) {
909 		/*
910 		 * All subdevices have the same uniform erase size.
911 		 * This is easy:
912 		 */
913 		concat->mtd.erasesize = curr_erasesize;
914 		concat->mtd.numeraseregions = 0;
915 	} else {
916 		uint64_t tmp64;
917 
918 		/*
919 		 * erase block size varies across the subdevices: allocate
920 		 * space to store the data describing the variable erase regions
921 		 */
922 		struct mtd_erase_region_info *erase_region_p;
923 		uint64_t begin, position;
924 
925 		concat->mtd.erasesize = max_erasesize;
926 		concat->mtd.numeraseregions = num_erase_region;
927 		concat->mtd.eraseregions = erase_region_p =
928 		    kmalloc(num_erase_region *
929 			    sizeof (struct mtd_erase_region_info), GFP_KERNEL);
930 		if (!erase_region_p) {
931 			kfree(concat);
932 			printk
933 			    ("memory allocation error while creating erase region list"
934 			     " for device \"%s\"\n", name);
935 			return NULL;
936 		}
937 
938 		/*
939 		 * walk the map of the new device once more and fill in
940 		 * in erase region info:
941 		 */
942 		curr_erasesize = subdev[0]->erasesize;
943 		begin = position = 0;
944 		for (i = 0; i < num_devs; i++) {
945 			if (subdev[i]->numeraseregions == 0) {
946 				/* current subdevice has uniform erase size */
947 				if (subdev[i]->erasesize != curr_erasesize) {
948 					/*
949 					 *  fill in an mtd_erase_region_info structure for the area
950 					 *  we have walked so far:
951 					 */
952 					erase_region_p->offset = begin;
953 					erase_region_p->erasesize =
954 					    curr_erasesize;
955 					tmp64 = position - begin;
956 					do_div(tmp64, curr_erasesize);
957 					erase_region_p->numblocks = tmp64;
958 					begin = position;
959 
960 					curr_erasesize = subdev[i]->erasesize;
961 					++erase_region_p;
962 				}
963 				position += subdev[i]->size;
964 			} else {
965 				/* current subdevice has variable erase size */
966 				int j;
967 				for (j = 0; j < subdev[i]->numeraseregions; j++) {
968 					/* walk the list of erase regions, count any changes */
969 					if (subdev[i]->eraseregions[j].
970 					    erasesize != curr_erasesize) {
971 						erase_region_p->offset = begin;
972 						erase_region_p->erasesize =
973 						    curr_erasesize;
974 						tmp64 = position - begin;
975 						do_div(tmp64, curr_erasesize);
976 						erase_region_p->numblocks = tmp64;
977 						begin = position;
978 
979 						curr_erasesize =
980 						    subdev[i]->eraseregions[j].
981 						    erasesize;
982 						++erase_region_p;
983 					}
984 					position +=
985 					    subdev[i]->eraseregions[j].
986 					    numblocks * (uint64_t)curr_erasesize;
987 				}
988 			}
989 		}
990 		/* Now write the final entry */
991 		erase_region_p->offset = begin;
992 		erase_region_p->erasesize = curr_erasesize;
993 		tmp64 = position - begin;
994 		do_div(tmp64, curr_erasesize);
995 		erase_region_p->numblocks = tmp64;
996 	}
997 
998 	return &concat->mtd;
999 }
1000 
1001 /*
1002  * This function destroys an MTD object obtained from concat_mtd_devs()
1003  */
1004 
mtd_concat_destroy(struct mtd_info * mtd)1005 void mtd_concat_destroy(struct mtd_info *mtd)
1006 {
1007 	struct mtd_concat *concat = CONCAT(mtd);
1008 	if (concat->mtd.numeraseregions)
1009 		kfree(concat->mtd.eraseregions);
1010 	kfree(concat);
1011 }
1012 
1013 EXPORT_SYMBOL(mtd_concat_create);
1014 EXPORT_SYMBOL(mtd_concat_destroy);
1015 
1016 MODULE_LICENSE("GPL");
1017 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
1018 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
1019