1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
4 */
5
6 #include <linux/device.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/mutex.h>
16 #include <linux/backing-dev.h>
17 #include <linux/compat.h>
18 #include <linux/mount.h>
19 #include <linux/blkpg.h>
20 #include <linux/magic.h>
21 #include <linux/major.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/mtd/map.h>
25
26 #include <linux/uaccess.h>
27
28 #include "mtdcore.h"
29
30 /*
31 * Data structure to hold the pointer to the mtd device as well
32 * as mode information of various use cases.
33 */
34 struct mtd_file_info {
35 struct mtd_info *mtd;
36 enum mtd_file_modes mode;
37 };
38
mtdchar_lseek(struct file * file,loff_t offset,int orig)39 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
40 {
41 struct mtd_file_info *mfi = file->private_data;
42 return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
43 }
44
mtdchar_open(struct inode * inode,struct file * file)45 static int mtdchar_open(struct inode *inode, struct file *file)
46 {
47 int minor = iminor(inode);
48 int devnum = minor >> 1;
49 int ret = 0;
50 struct mtd_info *mtd;
51 struct mtd_file_info *mfi;
52
53 pr_debug("MTD_open\n");
54
55 /* You can't open the RO devices RW */
56 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
57 return -EACCES;
58
59 mtd = get_mtd_device(NULL, devnum);
60
61 if (IS_ERR(mtd))
62 return PTR_ERR(mtd);
63
64 if (mtd->type == MTD_ABSENT) {
65 ret = -ENODEV;
66 goto out1;
67 }
68
69 /* You can't open it RW if it's not a writeable device */
70 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
71 ret = -EACCES;
72 goto out1;
73 }
74
75 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
76 if (!mfi) {
77 ret = -ENOMEM;
78 goto out1;
79 }
80 mfi->mtd = mtd;
81 file->private_data = mfi;
82 return 0;
83
84 out1:
85 put_mtd_device(mtd);
86 return ret;
87 } /* mtdchar_open */
88
89 /*====================================================================*/
90
mtdchar_close(struct inode * inode,struct file * file)91 static int mtdchar_close(struct inode *inode, struct file *file)
92 {
93 struct mtd_file_info *mfi = file->private_data;
94 struct mtd_info *mtd = mfi->mtd;
95
96 pr_debug("MTD_close\n");
97
98 /* Only sync if opened RW */
99 if ((file->f_mode & FMODE_WRITE))
100 mtd_sync(mtd);
101
102 put_mtd_device(mtd);
103 file->private_data = NULL;
104 kfree(mfi);
105
106 return 0;
107 } /* mtdchar_close */
108
109 /* Back in June 2001, dwmw2 wrote:
110 *
111 * FIXME: This _really_ needs to die. In 2.5, we should lock the
112 * userspace buffer down and use it directly with readv/writev.
113 *
114 * The implementation below, using mtd_kmalloc_up_to, mitigates
115 * allocation failures when the system is under low-memory situations
116 * or if memory is highly fragmented at the cost of reducing the
117 * performance of the requested transfer due to a smaller buffer size.
118 *
119 * A more complex but more memory-efficient implementation based on
120 * get_user_pages and iovecs to cover extents of those pages is a
121 * longer-term goal, as intimated by dwmw2 above. However, for the
122 * write case, this requires yet more complex head and tail transfer
123 * handling when those head and tail offsets and sizes are such that
124 * alignment requirements are not met in the NAND subdriver.
125 */
126
mtdchar_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)127 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
128 loff_t *ppos)
129 {
130 struct mtd_file_info *mfi = file->private_data;
131 struct mtd_info *mtd = mfi->mtd;
132 size_t retlen;
133 size_t total_retlen=0;
134 int ret=0;
135 int len;
136 size_t size = count;
137 char *kbuf;
138
139 pr_debug("MTD_read\n");
140
141 if (*ppos + count > mtd->size) {
142 if (*ppos < mtd->size)
143 count = mtd->size - *ppos;
144 else
145 count = 0;
146 }
147
148 if (!count)
149 return 0;
150
151 kbuf = mtd_kmalloc_up_to(mtd, &size);
152 if (!kbuf)
153 return -ENOMEM;
154
155 while (count) {
156 len = min_t(size_t, count, size);
157
158 switch (mfi->mode) {
159 case MTD_FILE_MODE_OTP_FACTORY:
160 ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
161 &retlen, kbuf);
162 break;
163 case MTD_FILE_MODE_OTP_USER:
164 ret = mtd_read_user_prot_reg(mtd, *ppos, len,
165 &retlen, kbuf);
166 break;
167 case MTD_FILE_MODE_RAW:
168 {
169 struct mtd_oob_ops ops = {};
170
171 ops.mode = MTD_OPS_RAW;
172 ops.datbuf = kbuf;
173 ops.oobbuf = NULL;
174 ops.len = len;
175
176 ret = mtd_read_oob(mtd, *ppos, &ops);
177 retlen = ops.retlen;
178 break;
179 }
180 default:
181 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
182 }
183 /* Nand returns -EBADMSG on ECC errors, but it returns
184 * the data. For our userspace tools it is important
185 * to dump areas with ECC errors!
186 * For kernel internal usage it also might return -EUCLEAN
187 * to signal the caller that a bitflip has occurred and has
188 * been corrected by the ECC algorithm.
189 * Userspace software which accesses NAND this way
190 * must be aware of the fact that it deals with NAND
191 */
192 if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
193 *ppos += retlen;
194 if (copy_to_user(buf, kbuf, retlen)) {
195 kfree(kbuf);
196 return -EFAULT;
197 }
198 else
199 total_retlen += retlen;
200
201 count -= retlen;
202 buf += retlen;
203 if (retlen == 0)
204 count = 0;
205 }
206 else {
207 kfree(kbuf);
208 return ret;
209 }
210
211 }
212
213 kfree(kbuf);
214 return total_retlen;
215 } /* mtdchar_read */
216
mtdchar_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)217 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
218 loff_t *ppos)
219 {
220 struct mtd_file_info *mfi = file->private_data;
221 struct mtd_info *mtd = mfi->mtd;
222 size_t size = count;
223 char *kbuf;
224 size_t retlen;
225 size_t total_retlen=0;
226 int ret=0;
227 int len;
228
229 pr_debug("MTD_write\n");
230
231 if (*ppos >= mtd->size)
232 return -ENOSPC;
233
234 if (*ppos + count > mtd->size)
235 count = mtd->size - *ppos;
236
237 if (!count)
238 return 0;
239
240 kbuf = mtd_kmalloc_up_to(mtd, &size);
241 if (!kbuf)
242 return -ENOMEM;
243
244 while (count) {
245 len = min_t(size_t, count, size);
246
247 if (copy_from_user(kbuf, buf, len)) {
248 kfree(kbuf);
249 return -EFAULT;
250 }
251
252 switch (mfi->mode) {
253 case MTD_FILE_MODE_OTP_FACTORY:
254 ret = -EROFS;
255 break;
256 case MTD_FILE_MODE_OTP_USER:
257 ret = mtd_write_user_prot_reg(mtd, *ppos, len,
258 &retlen, kbuf);
259 break;
260
261 case MTD_FILE_MODE_RAW:
262 {
263 struct mtd_oob_ops ops = {};
264
265 ops.mode = MTD_OPS_RAW;
266 ops.datbuf = kbuf;
267 ops.oobbuf = NULL;
268 ops.ooboffs = 0;
269 ops.len = len;
270
271 ret = mtd_write_oob(mtd, *ppos, &ops);
272 retlen = ops.retlen;
273 break;
274 }
275
276 default:
277 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
278 }
279
280 /*
281 * Return -ENOSPC only if no data could be written at all.
282 * Otherwise just return the number of bytes that actually
283 * have been written.
284 */
285 if ((ret == -ENOSPC) && (total_retlen))
286 break;
287
288 if (!ret) {
289 *ppos += retlen;
290 total_retlen += retlen;
291 count -= retlen;
292 buf += retlen;
293 }
294 else {
295 kfree(kbuf);
296 return ret;
297 }
298 }
299
300 kfree(kbuf);
301 return total_retlen;
302 } /* mtdchar_write */
303
304 /*======================================================================
305
306 IOCTL calls for getting device parameters.
307
308 ======================================================================*/
309
otp_select_filemode(struct mtd_file_info * mfi,int mode)310 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
311 {
312 struct mtd_info *mtd = mfi->mtd;
313 size_t retlen;
314
315 switch (mode) {
316 case MTD_OTP_FACTORY:
317 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
318 -EOPNOTSUPP)
319 return -EOPNOTSUPP;
320
321 mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
322 break;
323 case MTD_OTP_USER:
324 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
325 -EOPNOTSUPP)
326 return -EOPNOTSUPP;
327
328 mfi->mode = MTD_FILE_MODE_OTP_USER;
329 break;
330 case MTD_OTP_OFF:
331 mfi->mode = MTD_FILE_MODE_NORMAL;
332 break;
333 default:
334 return -EINVAL;
335 }
336
337 return 0;
338 }
339
mtdchar_writeoob(struct file * file,struct mtd_info * mtd,uint64_t start,uint32_t length,void __user * ptr,uint32_t __user * retp)340 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
341 uint64_t start, uint32_t length, void __user *ptr,
342 uint32_t __user *retp)
343 {
344 struct mtd_info *master = mtd_get_master(mtd);
345 struct mtd_file_info *mfi = file->private_data;
346 struct mtd_oob_ops ops = {};
347 uint32_t retlen;
348 int ret = 0;
349
350 if (length > 4096)
351 return -EINVAL;
352
353 if (!master->_write_oob)
354 return -EOPNOTSUPP;
355
356 ops.ooblen = length;
357 ops.ooboffs = start & (mtd->writesize - 1);
358 ops.datbuf = NULL;
359 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
360 MTD_OPS_PLACE_OOB;
361
362 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
363 return -EINVAL;
364
365 ops.oobbuf = memdup_user(ptr, length);
366 if (IS_ERR(ops.oobbuf))
367 return PTR_ERR(ops.oobbuf);
368
369 start &= ~((uint64_t)mtd->writesize - 1);
370 ret = mtd_write_oob(mtd, start, &ops);
371
372 if (ops.oobretlen > 0xFFFFFFFFU)
373 ret = -EOVERFLOW;
374 retlen = ops.oobretlen;
375 if (copy_to_user(retp, &retlen, sizeof(length)))
376 ret = -EFAULT;
377
378 kfree(ops.oobbuf);
379 return ret;
380 }
381
mtdchar_readoob(struct file * file,struct mtd_info * mtd,uint64_t start,uint32_t length,void __user * ptr,uint32_t __user * retp)382 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
383 uint64_t start, uint32_t length, void __user *ptr,
384 uint32_t __user *retp)
385 {
386 struct mtd_file_info *mfi = file->private_data;
387 struct mtd_oob_ops ops = {};
388 int ret = 0;
389
390 if (length > 4096)
391 return -EINVAL;
392
393 ops.ooblen = length;
394 ops.ooboffs = start & (mtd->writesize - 1);
395 ops.datbuf = NULL;
396 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
397 MTD_OPS_PLACE_OOB;
398
399 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
400 return -EINVAL;
401
402 ops.oobbuf = kmalloc(length, GFP_KERNEL);
403 if (!ops.oobbuf)
404 return -ENOMEM;
405
406 start &= ~((uint64_t)mtd->writesize - 1);
407 ret = mtd_read_oob(mtd, start, &ops);
408
409 if (put_user(ops.oobretlen, retp))
410 ret = -EFAULT;
411 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
412 ops.oobretlen))
413 ret = -EFAULT;
414
415 kfree(ops.oobbuf);
416
417 /*
418 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
419 * data. For our userspace tools it is important to dump areas
420 * with ECC errors!
421 * For kernel internal usage it also might return -EUCLEAN
422 * to signal the caller that a bitflip has occurred and has
423 * been corrected by the ECC algorithm.
424 *
425 * Note: currently the standard NAND function, nand_read_oob_std,
426 * does not calculate ECC for the OOB area, so do not rely on
427 * this behavior unless you have replaced it with your own.
428 */
429 if (mtd_is_bitflip_or_eccerr(ret))
430 return 0;
431
432 return ret;
433 }
434
435 /*
436 * Copies (and truncates, if necessary) OOB layout information to the
437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to
438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
440 * can describe any kind of OOB layout with almost zero overhead from a
441 * memory usage point of view).
442 */
shrink_ecclayout(struct mtd_info * mtd,struct nand_ecclayout_user * to)443 static int shrink_ecclayout(struct mtd_info *mtd,
444 struct nand_ecclayout_user *to)
445 {
446 struct mtd_oob_region oobregion;
447 int i, section = 0, ret;
448
449 if (!mtd || !to)
450 return -EINVAL;
451
452 memset(to, 0, sizeof(*to));
453
454 to->eccbytes = 0;
455 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
456 u32 eccpos;
457
458 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
459 if (ret < 0) {
460 if (ret != -ERANGE)
461 return ret;
462
463 break;
464 }
465
466 eccpos = oobregion.offset;
467 for (; i < MTD_MAX_ECCPOS_ENTRIES &&
468 eccpos < oobregion.offset + oobregion.length; i++) {
469 to->eccpos[i] = eccpos++;
470 to->eccbytes++;
471 }
472 }
473
474 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
475 ret = mtd_ooblayout_free(mtd, i, &oobregion);
476 if (ret < 0) {
477 if (ret != -ERANGE)
478 return ret;
479
480 break;
481 }
482
483 to->oobfree[i].offset = oobregion.offset;
484 to->oobfree[i].length = oobregion.length;
485 to->oobavail += to->oobfree[i].length;
486 }
487
488 return 0;
489 }
490
get_oobinfo(struct mtd_info * mtd,struct nand_oobinfo * to)491 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
492 {
493 struct mtd_oob_region oobregion;
494 int i, section = 0, ret;
495
496 if (!mtd || !to)
497 return -EINVAL;
498
499 memset(to, 0, sizeof(*to));
500
501 to->eccbytes = 0;
502 for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
503 u32 eccpos;
504
505 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
506 if (ret < 0) {
507 if (ret != -ERANGE)
508 return ret;
509
510 break;
511 }
512
513 if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
514 return -EINVAL;
515
516 eccpos = oobregion.offset;
517 for (; eccpos < oobregion.offset + oobregion.length; i++) {
518 to->eccpos[i] = eccpos++;
519 to->eccbytes++;
520 }
521 }
522
523 for (i = 0; i < 8; i++) {
524 ret = mtd_ooblayout_free(mtd, i, &oobregion);
525 if (ret < 0) {
526 if (ret != -ERANGE)
527 return ret;
528
529 break;
530 }
531
532 to->oobfree[i][0] = oobregion.offset;
533 to->oobfree[i][1] = oobregion.length;
534 }
535
536 to->useecc = MTD_NANDECC_AUTOPLACE;
537
538 return 0;
539 }
540
mtdchar_blkpg_ioctl(struct mtd_info * mtd,struct blkpg_ioctl_arg * arg)541 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
542 struct blkpg_ioctl_arg *arg)
543 {
544 struct blkpg_partition p;
545
546 if (!capable(CAP_SYS_ADMIN))
547 return -EPERM;
548
549 if (copy_from_user(&p, arg->data, sizeof(p)))
550 return -EFAULT;
551
552 switch (arg->op) {
553 case BLKPG_ADD_PARTITION:
554
555 /* Only master mtd device must be used to add partitions */
556 if (mtd_is_partition(mtd))
557 return -EINVAL;
558
559 /* Sanitize user input */
560 p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
561
562 return mtd_add_partition(mtd, p.devname, p.start, p.length);
563
564 case BLKPG_DEL_PARTITION:
565
566 if (p.pno < 0)
567 return -EINVAL;
568
569 return mtd_del_partition(mtd, p.pno);
570
571 default:
572 return -EINVAL;
573 }
574 }
575
adjust_oob_length(struct mtd_info * mtd,uint64_t start,struct mtd_oob_ops * ops)576 static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
577 struct mtd_oob_ops *ops)
578 {
579 uint32_t start_page, end_page;
580 u32 oob_per_page;
581
582 if (ops->len == 0 || ops->ooblen == 0)
583 return;
584
585 start_page = mtd_div_by_ws(start, mtd);
586 end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
587 oob_per_page = mtd_oobavail(mtd, ops);
588
589 ops->ooblen = min_t(size_t, ops->ooblen,
590 (end_page - start_page + 1) * oob_per_page);
591 }
592
mtdchar_write_ioctl(struct mtd_info * mtd,struct mtd_write_req __user * argp)593 static int mtdchar_write_ioctl(struct mtd_info *mtd,
594 struct mtd_write_req __user *argp)
595 {
596 struct mtd_info *master = mtd_get_master(mtd);
597 struct mtd_write_req req;
598 const void __user *usr_data, *usr_oob;
599 uint8_t *datbuf = NULL, *oobbuf = NULL;
600 size_t datbuf_len, oobbuf_len;
601 int ret = 0;
602
603 if (copy_from_user(&req, argp, sizeof(req)))
604 return -EFAULT;
605
606 usr_data = (const void __user *)(uintptr_t)req.usr_data;
607 usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
608
609 if (!master->_write_oob)
610 return -EOPNOTSUPP;
611
612 if (!usr_data)
613 req.len = 0;
614
615 if (!usr_oob)
616 req.ooblen = 0;
617
618 if (req.start + req.len > mtd->size)
619 return -EINVAL;
620
621 datbuf_len = min_t(size_t, req.len, mtd->erasesize);
622 if (datbuf_len > 0) {
623 datbuf = kmalloc(datbuf_len, GFP_KERNEL);
624 if (!datbuf)
625 return -ENOMEM;
626 }
627
628 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
629 if (oobbuf_len > 0) {
630 oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
631 if (!oobbuf) {
632 kfree(datbuf);
633 return -ENOMEM;
634 }
635 }
636
637 while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
638 struct mtd_oob_ops ops = {
639 .mode = req.mode,
640 .len = min_t(size_t, req.len, datbuf_len),
641 .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
642 .datbuf = datbuf,
643 .oobbuf = oobbuf,
644 };
645
646 /*
647 * Shorten non-page-aligned, eraseblock-sized writes so that
648 * the write ends on an eraseblock boundary. This is necessary
649 * for adjust_oob_length() to properly handle non-page-aligned
650 * writes.
651 */
652 if (ops.len == mtd->erasesize)
653 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
654
655 /*
656 * For writes which are not OOB-only, adjust the amount of OOB
657 * data written according to the number of data pages written.
658 * This is necessary to prevent OOB data from being skipped
659 * over in data+OOB writes requiring multiple mtd_write_oob()
660 * calls to be completed.
661 */
662 adjust_oob_length(mtd, req.start, &ops);
663
664 if (copy_from_user(datbuf, usr_data, ops.len) ||
665 copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
666 ret = -EFAULT;
667 break;
668 }
669
670 ret = mtd_write_oob(mtd, req.start, &ops);
671 if (ret)
672 break;
673
674 req.start += ops.retlen;
675 req.len -= ops.retlen;
676 usr_data += ops.retlen;
677
678 req.ooblen -= ops.oobretlen;
679 usr_oob += ops.oobretlen;
680 }
681
682 kfree(datbuf);
683 kfree(oobbuf);
684
685 return ret;
686 }
687
mtdchar_ioctl(struct file * file,u_int cmd,u_long arg)688 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
689 {
690 struct mtd_file_info *mfi = file->private_data;
691 struct mtd_info *mtd = mfi->mtd;
692 struct mtd_info *master = mtd_get_master(mtd);
693 void __user *argp = (void __user *)arg;
694 int ret = 0;
695 struct mtd_info_user info;
696
697 pr_debug("MTD_ioctl\n");
698
699 /*
700 * Check the file mode to require "dangerous" commands to have write
701 * permissions.
702 */
703 switch (cmd) {
704 /* "safe" commands */
705 case MEMGETREGIONCOUNT:
706 case MEMGETREGIONINFO:
707 case MEMGETINFO:
708 case MEMREADOOB:
709 case MEMREADOOB64:
710 case MEMISLOCKED:
711 case MEMGETOOBSEL:
712 case MEMGETBADBLOCK:
713 case OTPSELECT:
714 case OTPGETREGIONCOUNT:
715 case OTPGETREGIONINFO:
716 case ECCGETLAYOUT:
717 case ECCGETSTATS:
718 case MTDFILEMODE:
719 case BLKPG:
720 case BLKRRPART:
721 break;
722
723 /* "dangerous" commands */
724 case MEMERASE:
725 case MEMERASE64:
726 case MEMLOCK:
727 case MEMUNLOCK:
728 case MEMSETBADBLOCK:
729 case MEMWRITEOOB:
730 case MEMWRITEOOB64:
731 case MEMWRITE:
732 case OTPLOCK:
733 case OTPERASE:
734 if (!(file->f_mode & FMODE_WRITE))
735 return -EPERM;
736 break;
737
738 default:
739 return -ENOTTY;
740 }
741
742 switch (cmd) {
743 case MEMGETREGIONCOUNT:
744 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
745 return -EFAULT;
746 break;
747
748 case MEMGETREGIONINFO:
749 {
750 uint32_t ur_idx;
751 struct mtd_erase_region_info *kr;
752 struct region_info_user __user *ur = argp;
753
754 if (get_user(ur_idx, &(ur->regionindex)))
755 return -EFAULT;
756
757 if (ur_idx >= mtd->numeraseregions)
758 return -EINVAL;
759
760 kr = &(mtd->eraseregions[ur_idx]);
761
762 if (put_user(kr->offset, &(ur->offset))
763 || put_user(kr->erasesize, &(ur->erasesize))
764 || put_user(kr->numblocks, &(ur->numblocks)))
765 return -EFAULT;
766
767 break;
768 }
769
770 case MEMGETINFO:
771 memset(&info, 0, sizeof(info));
772 info.type = mtd->type;
773 info.flags = mtd->flags;
774 info.size = mtd->size;
775 info.erasesize = mtd->erasesize;
776 info.writesize = mtd->writesize;
777 info.oobsize = mtd->oobsize;
778 /* The below field is obsolete */
779 info.padding = 0;
780 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
781 return -EFAULT;
782 break;
783
784 case MEMERASE:
785 case MEMERASE64:
786 {
787 struct erase_info *erase;
788
789 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
790 if (!erase)
791 ret = -ENOMEM;
792 else {
793 if (cmd == MEMERASE64) {
794 struct erase_info_user64 einfo64;
795
796 if (copy_from_user(&einfo64, argp,
797 sizeof(struct erase_info_user64))) {
798 kfree(erase);
799 return -EFAULT;
800 }
801 erase->addr = einfo64.start;
802 erase->len = einfo64.length;
803 } else {
804 struct erase_info_user einfo32;
805
806 if (copy_from_user(&einfo32, argp,
807 sizeof(struct erase_info_user))) {
808 kfree(erase);
809 return -EFAULT;
810 }
811 erase->addr = einfo32.start;
812 erase->len = einfo32.length;
813 }
814
815 ret = mtd_erase(mtd, erase);
816 kfree(erase);
817 }
818 break;
819 }
820
821 case MEMWRITEOOB:
822 {
823 struct mtd_oob_buf buf;
824 struct mtd_oob_buf __user *buf_user = argp;
825
826 /* NOTE: writes return length to buf_user->length */
827 if (copy_from_user(&buf, argp, sizeof(buf)))
828 ret = -EFAULT;
829 else
830 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
831 buf.ptr, &buf_user->length);
832 break;
833 }
834
835 case MEMREADOOB:
836 {
837 struct mtd_oob_buf buf;
838 struct mtd_oob_buf __user *buf_user = argp;
839
840 /* NOTE: writes return length to buf_user->start */
841 if (copy_from_user(&buf, argp, sizeof(buf)))
842 ret = -EFAULT;
843 else
844 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
845 buf.ptr, &buf_user->start);
846 break;
847 }
848
849 case MEMWRITEOOB64:
850 {
851 struct mtd_oob_buf64 buf;
852 struct mtd_oob_buf64 __user *buf_user = argp;
853
854 if (copy_from_user(&buf, argp, sizeof(buf)))
855 ret = -EFAULT;
856 else
857 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
858 (void __user *)(uintptr_t)buf.usr_ptr,
859 &buf_user->length);
860 break;
861 }
862
863 case MEMREADOOB64:
864 {
865 struct mtd_oob_buf64 buf;
866 struct mtd_oob_buf64 __user *buf_user = argp;
867
868 if (copy_from_user(&buf, argp, sizeof(buf)))
869 ret = -EFAULT;
870 else
871 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
872 (void __user *)(uintptr_t)buf.usr_ptr,
873 &buf_user->length);
874 break;
875 }
876
877 case MEMWRITE:
878 {
879 ret = mtdchar_write_ioctl(mtd,
880 (struct mtd_write_req __user *)arg);
881 break;
882 }
883
884 case MEMLOCK:
885 {
886 struct erase_info_user einfo;
887
888 if (copy_from_user(&einfo, argp, sizeof(einfo)))
889 return -EFAULT;
890
891 ret = mtd_lock(mtd, einfo.start, einfo.length);
892 break;
893 }
894
895 case MEMUNLOCK:
896 {
897 struct erase_info_user einfo;
898
899 if (copy_from_user(&einfo, argp, sizeof(einfo)))
900 return -EFAULT;
901
902 ret = mtd_unlock(mtd, einfo.start, einfo.length);
903 break;
904 }
905
906 case MEMISLOCKED:
907 {
908 struct erase_info_user einfo;
909
910 if (copy_from_user(&einfo, argp, sizeof(einfo)))
911 return -EFAULT;
912
913 ret = mtd_is_locked(mtd, einfo.start, einfo.length);
914 break;
915 }
916
917 /* Legacy interface */
918 case MEMGETOOBSEL:
919 {
920 struct nand_oobinfo oi;
921
922 if (!master->ooblayout)
923 return -EOPNOTSUPP;
924
925 ret = get_oobinfo(mtd, &oi);
926 if (ret)
927 return ret;
928
929 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
930 return -EFAULT;
931 break;
932 }
933
934 case MEMGETBADBLOCK:
935 {
936 loff_t offs;
937
938 if (copy_from_user(&offs, argp, sizeof(loff_t)))
939 return -EFAULT;
940 return mtd_block_isbad(mtd, offs);
941 }
942
943 case MEMSETBADBLOCK:
944 {
945 loff_t offs;
946
947 if (copy_from_user(&offs, argp, sizeof(loff_t)))
948 return -EFAULT;
949 return mtd_block_markbad(mtd, offs);
950 }
951
952 case OTPSELECT:
953 {
954 int mode;
955 if (copy_from_user(&mode, argp, sizeof(int)))
956 return -EFAULT;
957
958 mfi->mode = MTD_FILE_MODE_NORMAL;
959
960 ret = otp_select_filemode(mfi, mode);
961
962 file->f_pos = 0;
963 break;
964 }
965
966 case OTPGETREGIONCOUNT:
967 case OTPGETREGIONINFO:
968 {
969 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
970 size_t retlen;
971 if (!buf)
972 return -ENOMEM;
973 switch (mfi->mode) {
974 case MTD_FILE_MODE_OTP_FACTORY:
975 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
976 break;
977 case MTD_FILE_MODE_OTP_USER:
978 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
979 break;
980 default:
981 ret = -EINVAL;
982 break;
983 }
984 if (!ret) {
985 if (cmd == OTPGETREGIONCOUNT) {
986 int nbr = retlen / sizeof(struct otp_info);
987 ret = copy_to_user(argp, &nbr, sizeof(int));
988 } else
989 ret = copy_to_user(argp, buf, retlen);
990 if (ret)
991 ret = -EFAULT;
992 }
993 kfree(buf);
994 break;
995 }
996
997 case OTPLOCK:
998 case OTPERASE:
999 {
1000 struct otp_info oinfo;
1001
1002 if (mfi->mode != MTD_FILE_MODE_OTP_USER)
1003 return -EINVAL;
1004 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
1005 return -EFAULT;
1006 if (cmd == OTPLOCK)
1007 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
1008 else
1009 ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
1010 break;
1011 }
1012
1013 /* This ioctl is being deprecated - it truncates the ECC layout */
1014 case ECCGETLAYOUT:
1015 {
1016 struct nand_ecclayout_user *usrlay;
1017
1018 if (!master->ooblayout)
1019 return -EOPNOTSUPP;
1020
1021 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
1022 if (!usrlay)
1023 return -ENOMEM;
1024
1025 shrink_ecclayout(mtd, usrlay);
1026
1027 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
1028 ret = -EFAULT;
1029 kfree(usrlay);
1030 break;
1031 }
1032
1033 case ECCGETSTATS:
1034 {
1035 if (copy_to_user(argp, &mtd->ecc_stats,
1036 sizeof(struct mtd_ecc_stats)))
1037 return -EFAULT;
1038 break;
1039 }
1040
1041 case MTDFILEMODE:
1042 {
1043 mfi->mode = 0;
1044
1045 switch(arg) {
1046 case MTD_FILE_MODE_OTP_FACTORY:
1047 case MTD_FILE_MODE_OTP_USER:
1048 ret = otp_select_filemode(mfi, arg);
1049 break;
1050
1051 case MTD_FILE_MODE_RAW:
1052 if (!mtd_has_oob(mtd))
1053 return -EOPNOTSUPP;
1054 mfi->mode = arg;
1055 break;
1056
1057 case MTD_FILE_MODE_NORMAL:
1058 break;
1059 default:
1060 ret = -EINVAL;
1061 }
1062 file->f_pos = 0;
1063 break;
1064 }
1065
1066 case BLKPG:
1067 {
1068 struct blkpg_ioctl_arg __user *blk_arg = argp;
1069 struct blkpg_ioctl_arg a;
1070
1071 if (copy_from_user(&a, blk_arg, sizeof(a)))
1072 ret = -EFAULT;
1073 else
1074 ret = mtdchar_blkpg_ioctl(mtd, &a);
1075 break;
1076 }
1077
1078 case BLKRRPART:
1079 {
1080 /* No reread partition feature. Just return ok */
1081 ret = 0;
1082 break;
1083 }
1084 }
1085
1086 return ret;
1087 } /* memory_ioctl */
1088
mtdchar_unlocked_ioctl(struct file * file,u_int cmd,u_long arg)1089 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1090 {
1091 struct mtd_file_info *mfi = file->private_data;
1092 struct mtd_info *mtd = mfi->mtd;
1093 struct mtd_info *master = mtd_get_master(mtd);
1094 int ret;
1095
1096 mutex_lock(&master->master.chrdev_lock);
1097 ret = mtdchar_ioctl(file, cmd, arg);
1098 mutex_unlock(&master->master.chrdev_lock);
1099
1100 return ret;
1101 }
1102
1103 #ifdef CONFIG_COMPAT
1104
1105 struct mtd_oob_buf32 {
1106 u_int32_t start;
1107 u_int32_t length;
1108 compat_caddr_t ptr; /* unsigned char* */
1109 };
1110
1111 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
1112 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
1113
mtdchar_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1114 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1115 unsigned long arg)
1116 {
1117 struct mtd_file_info *mfi = file->private_data;
1118 struct mtd_info *mtd = mfi->mtd;
1119 struct mtd_info *master = mtd_get_master(mtd);
1120 void __user *argp = compat_ptr(arg);
1121 int ret = 0;
1122
1123 mutex_lock(&master->master.chrdev_lock);
1124
1125 switch (cmd) {
1126 case MEMWRITEOOB32:
1127 {
1128 struct mtd_oob_buf32 buf;
1129 struct mtd_oob_buf32 __user *buf_user = argp;
1130
1131 if (!(file->f_mode & FMODE_WRITE)) {
1132 ret = -EPERM;
1133 break;
1134 }
1135
1136 if (copy_from_user(&buf, argp, sizeof(buf)))
1137 ret = -EFAULT;
1138 else
1139 ret = mtdchar_writeoob(file, mtd, buf.start,
1140 buf.length, compat_ptr(buf.ptr),
1141 &buf_user->length);
1142 break;
1143 }
1144
1145 case MEMREADOOB32:
1146 {
1147 struct mtd_oob_buf32 buf;
1148 struct mtd_oob_buf32 __user *buf_user = argp;
1149
1150 /* NOTE: writes return length to buf->start */
1151 if (copy_from_user(&buf, argp, sizeof(buf)))
1152 ret = -EFAULT;
1153 else
1154 ret = mtdchar_readoob(file, mtd, buf.start,
1155 buf.length, compat_ptr(buf.ptr),
1156 &buf_user->start);
1157 break;
1158 }
1159
1160 case BLKPG:
1161 {
1162 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
1163 struct blkpg_compat_ioctl_arg __user *uarg = argp;
1164 struct blkpg_compat_ioctl_arg compat_arg;
1165 struct blkpg_ioctl_arg a;
1166
1167 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
1168 ret = -EFAULT;
1169 break;
1170 }
1171
1172 memset(&a, 0, sizeof(a));
1173 a.op = compat_arg.op;
1174 a.flags = compat_arg.flags;
1175 a.datalen = compat_arg.datalen;
1176 a.data = compat_ptr(compat_arg.data);
1177
1178 ret = mtdchar_blkpg_ioctl(mtd, &a);
1179 break;
1180 }
1181
1182 default:
1183 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1184 }
1185
1186 mutex_unlock(&master->master.chrdev_lock);
1187
1188 return ret;
1189 }
1190
1191 #endif /* CONFIG_COMPAT */
1192
1193 /*
1194 * try to determine where a shared mapping can be made
1195 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1196 * mappings)
1197 */
1198 #ifndef CONFIG_MMU
mtdchar_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1199 static unsigned long mtdchar_get_unmapped_area(struct file *file,
1200 unsigned long addr,
1201 unsigned long len,
1202 unsigned long pgoff,
1203 unsigned long flags)
1204 {
1205 struct mtd_file_info *mfi = file->private_data;
1206 struct mtd_info *mtd = mfi->mtd;
1207 unsigned long offset;
1208 int ret;
1209
1210 if (addr != 0)
1211 return (unsigned long) -EINVAL;
1212
1213 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1214 return (unsigned long) -EINVAL;
1215
1216 offset = pgoff << PAGE_SHIFT;
1217 if (offset > mtd->size - len)
1218 return (unsigned long) -EINVAL;
1219
1220 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1221 return ret == -EOPNOTSUPP ? -ENODEV : ret;
1222 }
1223
mtdchar_mmap_capabilities(struct file * file)1224 static unsigned mtdchar_mmap_capabilities(struct file *file)
1225 {
1226 struct mtd_file_info *mfi = file->private_data;
1227
1228 return mtd_mmap_capabilities(mfi->mtd);
1229 }
1230 #endif
1231
1232 /*
1233 * set up a mapping for shared memory segments
1234 */
mtdchar_mmap(struct file * file,struct vm_area_struct * vma)1235 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1236 {
1237 #ifdef CONFIG_MMU
1238 struct mtd_file_info *mfi = file->private_data;
1239 struct mtd_info *mtd = mfi->mtd;
1240 struct map_info *map = mtd->priv;
1241
1242 /* This is broken because it assumes the MTD device is map-based
1243 and that mtd->priv is a valid struct map_info. It should be
1244 replaced with something that uses the mtd_get_unmapped_area()
1245 operation properly. */
1246 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1247 #ifdef pgprot_noncached
1248 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1249 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1250 #endif
1251 return vm_iomap_memory(vma, map->phys, map->size);
1252 }
1253 return -ENODEV;
1254 #else
1255 return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1256 #endif
1257 }
1258
1259 static const struct file_operations mtd_fops = {
1260 .owner = THIS_MODULE,
1261 .llseek = mtdchar_lseek,
1262 .read = mtdchar_read,
1263 .write = mtdchar_write,
1264 .unlocked_ioctl = mtdchar_unlocked_ioctl,
1265 #ifdef CONFIG_COMPAT
1266 .compat_ioctl = mtdchar_compat_ioctl,
1267 #endif
1268 .open = mtdchar_open,
1269 .release = mtdchar_close,
1270 .mmap = mtdchar_mmap,
1271 #ifndef CONFIG_MMU
1272 .get_unmapped_area = mtdchar_get_unmapped_area,
1273 .mmap_capabilities = mtdchar_mmap_capabilities,
1274 #endif
1275 };
1276
init_mtdchar(void)1277 int __init init_mtdchar(void)
1278 {
1279 int ret;
1280
1281 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1282 "mtd", &mtd_fops);
1283 if (ret < 0) {
1284 pr_err("Can't allocate major number %d for MTD\n",
1285 MTD_CHAR_MAJOR);
1286 return ret;
1287 }
1288
1289 return ret;
1290 }
1291
cleanup_mtdchar(void)1292 void __exit cleanup_mtdchar(void)
1293 {
1294 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1295 }
1296
1297 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1298