1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36 
size_inside_page(unsigned long start,unsigned long size)37 static inline unsigned long size_inside_page(unsigned long start,
38 					     unsigned long size)
39 {
40 	unsigned long sz;
41 
42 	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43 
44 	return min(sz, size);
45 }
46 
47 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
valid_phys_addr_range(unsigned long addr,size_t count)48 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
49 {
50 	return addr + count <= __pa(high_memory);
51 }
52 
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)53 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
54 {
55 	return 1;
56 }
57 #endif
58 
59 #ifdef CONFIG_STRICT_DEVMEM
range_is_allowed(unsigned long pfn,unsigned long size)60 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
61 {
62 	u64 from = ((u64)pfn) << PAGE_SHIFT;
63 	u64 to = from + size;
64 	u64 cursor = from;
65 
66 	while (cursor < to) {
67 		if (!devmem_is_allowed(pfn)) {
68 			printk(KERN_INFO
69 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
70 				current->comm, from, to);
71 			return 0;
72 		}
73 		cursor += PAGE_SIZE;
74 		pfn++;
75 	}
76 	return 1;
77 }
78 #else
range_is_allowed(unsigned long pfn,unsigned long size)79 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
80 {
81 	return 1;
82 }
83 #endif
84 
unxlate_dev_mem_ptr(unsigned long phys,void * addr)85 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
86 {
87 }
88 
89 /*
90  * This funcion reads the *physical* memory. The f_pos points directly to the
91  * memory location.
92  */
read_mem(struct file * file,char __user * buf,size_t count,loff_t * ppos)93 static ssize_t read_mem(struct file *file, char __user *buf,
94 			size_t count, loff_t *ppos)
95 {
96 	unsigned long p = *ppos;
97 	ssize_t read, sz;
98 	char *ptr;
99 
100 	if (!valid_phys_addr_range(p, count))
101 		return -EFAULT;
102 	read = 0;
103 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
104 	/* we don't have page 0 mapped on sparc and m68k.. */
105 	if (p < PAGE_SIZE) {
106 		sz = size_inside_page(p, count);
107 		if (sz > 0) {
108 			if (clear_user(buf, sz))
109 				return -EFAULT;
110 			buf += sz;
111 			p += sz;
112 			count -= sz;
113 			read += sz;
114 		}
115 	}
116 #endif
117 
118 	while (count > 0) {
119 		unsigned long remaining;
120 
121 		sz = size_inside_page(p, count);
122 
123 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
124 			return -EPERM;
125 
126 		/*
127 		 * On ia64 if a page has been mapped somewhere as uncached, then
128 		 * it must also be accessed uncached by the kernel or data
129 		 * corruption may occur.
130 		 */
131 		ptr = xlate_dev_mem_ptr(p);
132 		if (!ptr)
133 			return -EFAULT;
134 
135 		remaining = copy_to_user(buf, ptr, sz);
136 		unxlate_dev_mem_ptr(p, ptr);
137 		if (remaining)
138 			return -EFAULT;
139 
140 		buf += sz;
141 		p += sz;
142 		count -= sz;
143 		read += sz;
144 	}
145 
146 	*ppos += read;
147 	return read;
148 }
149 
write_mem(struct file * file,const char __user * buf,size_t count,loff_t * ppos)150 static ssize_t write_mem(struct file *file, const char __user *buf,
151 			 size_t count, loff_t *ppos)
152 {
153 	unsigned long p = *ppos;
154 	ssize_t written, sz;
155 	unsigned long copied;
156 	void *ptr;
157 
158 	if (!valid_phys_addr_range(p, count))
159 		return -EFAULT;
160 
161 	written = 0;
162 
163 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
164 	/* we don't have page 0 mapped on sparc and m68k.. */
165 	if (p < PAGE_SIZE) {
166 		sz = size_inside_page(p, count);
167 		/* Hmm. Do something? */
168 		buf += sz;
169 		p += sz;
170 		count -= sz;
171 		written += sz;
172 	}
173 #endif
174 
175 	while (count > 0) {
176 		sz = size_inside_page(p, count);
177 
178 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
179 			return -EPERM;
180 
181 		/*
182 		 * On ia64 if a page has been mapped somewhere as uncached, then
183 		 * it must also be accessed uncached by the kernel or data
184 		 * corruption may occur.
185 		 */
186 		ptr = xlate_dev_mem_ptr(p);
187 		if (!ptr) {
188 			if (written)
189 				break;
190 			return -EFAULT;
191 		}
192 
193 		copied = copy_from_user(ptr, buf, sz);
194 		unxlate_dev_mem_ptr(p, ptr);
195 		if (copied) {
196 			written += sz - copied;
197 			if (written)
198 				break;
199 			return -EFAULT;
200 		}
201 
202 		buf += sz;
203 		p += sz;
204 		count -= sz;
205 		written += sz;
206 	}
207 
208 	*ppos += written;
209 	return written;
210 }
211 
phys_mem_access_prot_allowed(struct file * file,unsigned long pfn,unsigned long size,pgprot_t * vma_prot)212 int __weak phys_mem_access_prot_allowed(struct file *file,
213 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
214 {
215 	return 1;
216 }
217 
218 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
219 
220 /*
221  * Architectures vary in how they handle caching for addresses
222  * outside of main memory.
223  *
224  */
225 #ifdef pgprot_noncached
uncached_access(struct file * file,unsigned long addr)226 static int uncached_access(struct file *file, unsigned long addr)
227 {
228 #if defined(CONFIG_IA64)
229 	/*
230 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
231 	 * attribute aliases.
232 	 */
233 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
234 #elif defined(CONFIG_MIPS)
235 	{
236 		extern int __uncached_access(struct file *file,
237 					     unsigned long addr);
238 
239 		return __uncached_access(file, addr);
240 	}
241 #else
242 	/*
243 	 * Accessing memory above the top the kernel knows about or through a
244 	 * file pointer
245 	 * that was marked O_DSYNC will be done non-cached.
246 	 */
247 	if (file->f_flags & O_DSYNC)
248 		return 1;
249 	return addr >= __pa(high_memory);
250 #endif
251 }
252 #endif
253 
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)254 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
255 				     unsigned long size, pgprot_t vma_prot)
256 {
257 #ifdef pgprot_noncached
258 	unsigned long offset = pfn << PAGE_SHIFT;
259 
260 	if (uncached_access(file, offset))
261 		return pgprot_noncached(vma_prot);
262 #endif
263 	return vma_prot;
264 }
265 #endif
266 
267 #ifndef CONFIG_MMU
get_unmapped_area_mem(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)268 static unsigned long get_unmapped_area_mem(struct file *file,
269 					   unsigned long addr,
270 					   unsigned long len,
271 					   unsigned long pgoff,
272 					   unsigned long flags)
273 {
274 	if (!valid_mmap_phys_addr_range(pgoff, len))
275 		return (unsigned long) -EINVAL;
276 	return pgoff << PAGE_SHIFT;
277 }
278 
279 /* can't do an in-place private mapping if there's no MMU */
private_mapping_ok(struct vm_area_struct * vma)280 static inline int private_mapping_ok(struct vm_area_struct *vma)
281 {
282 	return vma->vm_flags & VM_MAYSHARE;
283 }
284 #else
285 #define get_unmapped_area_mem	NULL
286 
private_mapping_ok(struct vm_area_struct * vma)287 static inline int private_mapping_ok(struct vm_area_struct *vma)
288 {
289 	return 1;
290 }
291 #endif
292 
293 static const struct vm_operations_struct mmap_mem_ops = {
294 #ifdef CONFIG_HAVE_IOREMAP_PROT
295 	.access = generic_access_phys
296 #endif
297 };
298 
mmap_mem(struct file * file,struct vm_area_struct * vma)299 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
300 {
301 	size_t size = vma->vm_end - vma->vm_start;
302 
303 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
304 		return -EINVAL;
305 
306 	if (!private_mapping_ok(vma))
307 		return -ENOSYS;
308 
309 	if (!range_is_allowed(vma->vm_pgoff, size))
310 		return -EPERM;
311 
312 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
313 						&vma->vm_page_prot))
314 		return -EINVAL;
315 
316 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
317 						 size,
318 						 vma->vm_page_prot);
319 
320 	vma->vm_ops = &mmap_mem_ops;
321 
322 	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
323 	if (remap_pfn_range(vma,
324 			    vma->vm_start,
325 			    vma->vm_pgoff,
326 			    size,
327 			    vma->vm_page_prot)) {
328 		return -EAGAIN;
329 	}
330 	return 0;
331 }
332 
333 #ifdef CONFIG_DEVKMEM
mmap_kmem(struct file * file,struct vm_area_struct * vma)334 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
335 {
336 	unsigned long pfn;
337 
338 	/* Turn a kernel-virtual address into a physical page frame */
339 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
340 
341 	/*
342 	 * RED-PEN: on some architectures there is more mapped memory than
343 	 * available in mem_map which pfn_valid checks for. Perhaps should add a
344 	 * new macro here.
345 	 *
346 	 * RED-PEN: vmalloc is not supported right now.
347 	 */
348 	if (!pfn_valid(pfn))
349 		return -EIO;
350 
351 	vma->vm_pgoff = pfn;
352 	return mmap_mem(file, vma);
353 }
354 #endif
355 
356 #ifdef CONFIG_CRASH_DUMP
357 /*
358  * Read memory corresponding to the old kernel.
359  */
read_oldmem(struct file * file,char __user * buf,size_t count,loff_t * ppos)360 static ssize_t read_oldmem(struct file *file, char __user *buf,
361 				size_t count, loff_t *ppos)
362 {
363 	unsigned long pfn, offset;
364 	size_t read = 0, csize;
365 	int rc = 0;
366 
367 	while (count) {
368 		pfn = *ppos / PAGE_SIZE;
369 		if (pfn > saved_max_pfn)
370 			return read;
371 
372 		offset = (unsigned long)(*ppos % PAGE_SIZE);
373 		if (count > PAGE_SIZE - offset)
374 			csize = PAGE_SIZE - offset;
375 		else
376 			csize = count;
377 
378 		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
379 		if (rc < 0)
380 			return rc;
381 		buf += csize;
382 		*ppos += csize;
383 		read += csize;
384 		count -= csize;
385 	}
386 	return read;
387 }
388 #endif
389 
390 #ifdef CONFIG_DEVKMEM
391 /*
392  * This function reads the *virtual* memory as seen by the kernel.
393  */
read_kmem(struct file * file,char __user * buf,size_t count,loff_t * ppos)394 static ssize_t read_kmem(struct file *file, char __user *buf,
395 			 size_t count, loff_t *ppos)
396 {
397 	unsigned long p = *ppos;
398 	ssize_t low_count, read, sz;
399 	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
400 	int err = 0;
401 
402 	read = 0;
403 	if (p < (unsigned long) high_memory) {
404 		low_count = count;
405 		if (count > (unsigned long)high_memory - p)
406 			low_count = (unsigned long)high_memory - p;
407 
408 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
409 		/* we don't have page 0 mapped on sparc and m68k.. */
410 		if (p < PAGE_SIZE && low_count > 0) {
411 			sz = size_inside_page(p, low_count);
412 			if (clear_user(buf, sz))
413 				return -EFAULT;
414 			buf += sz;
415 			p += sz;
416 			read += sz;
417 			low_count -= sz;
418 			count -= sz;
419 		}
420 #endif
421 		while (low_count > 0) {
422 			sz = size_inside_page(p, low_count);
423 
424 			/*
425 			 * On ia64 if a page has been mapped somewhere as
426 			 * uncached, then it must also be accessed uncached
427 			 * by the kernel or data corruption may occur
428 			 */
429 			kbuf = xlate_dev_kmem_ptr((char *)p);
430 
431 			if (copy_to_user(buf, kbuf, sz))
432 				return -EFAULT;
433 			buf += sz;
434 			p += sz;
435 			read += sz;
436 			low_count -= sz;
437 			count -= sz;
438 		}
439 	}
440 
441 	if (count > 0) {
442 		kbuf = (char *)__get_free_page(GFP_KERNEL);
443 		if (!kbuf)
444 			return -ENOMEM;
445 		while (count > 0) {
446 			sz = size_inside_page(p, count);
447 			if (!is_vmalloc_or_module_addr((void *)p)) {
448 				err = -ENXIO;
449 				break;
450 			}
451 			sz = vread(kbuf, (char *)p, sz);
452 			if (!sz)
453 				break;
454 			if (copy_to_user(buf, kbuf, sz)) {
455 				err = -EFAULT;
456 				break;
457 			}
458 			count -= sz;
459 			buf += sz;
460 			read += sz;
461 			p += sz;
462 		}
463 		free_page((unsigned long)kbuf);
464 	}
465 	*ppos = p;
466 	return read ? read : err;
467 }
468 
469 
do_write_kmem(unsigned long p,const char __user * buf,size_t count,loff_t * ppos)470 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
471 				size_t count, loff_t *ppos)
472 {
473 	ssize_t written, sz;
474 	unsigned long copied;
475 
476 	written = 0;
477 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
478 	/* we don't have page 0 mapped on sparc and m68k.. */
479 	if (p < PAGE_SIZE) {
480 		sz = size_inside_page(p, count);
481 		/* Hmm. Do something? */
482 		buf += sz;
483 		p += sz;
484 		count -= sz;
485 		written += sz;
486 	}
487 #endif
488 
489 	while (count > 0) {
490 		char *ptr;
491 
492 		sz = size_inside_page(p, count);
493 
494 		/*
495 		 * On ia64 if a page has been mapped somewhere as uncached, then
496 		 * it must also be accessed uncached by the kernel or data
497 		 * corruption may occur.
498 		 */
499 		ptr = xlate_dev_kmem_ptr((char *)p);
500 
501 		copied = copy_from_user(ptr, buf, sz);
502 		if (copied) {
503 			written += sz - copied;
504 			if (written)
505 				break;
506 			return -EFAULT;
507 		}
508 		buf += sz;
509 		p += sz;
510 		count -= sz;
511 		written += sz;
512 	}
513 
514 	*ppos += written;
515 	return written;
516 }
517 
518 /*
519  * This function writes to the *virtual* memory as seen by the kernel.
520  */
write_kmem(struct file * file,const char __user * buf,size_t count,loff_t * ppos)521 static ssize_t write_kmem(struct file *file, const char __user *buf,
522 			  size_t count, loff_t *ppos)
523 {
524 	unsigned long p = *ppos;
525 	ssize_t wrote = 0;
526 	ssize_t virtr = 0;
527 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
528 	int err = 0;
529 
530 	if (p < (unsigned long) high_memory) {
531 		unsigned long to_write = min_t(unsigned long, count,
532 					       (unsigned long)high_memory - p);
533 		wrote = do_write_kmem(p, buf, to_write, ppos);
534 		if (wrote != to_write)
535 			return wrote;
536 		p += wrote;
537 		buf += wrote;
538 		count -= wrote;
539 	}
540 
541 	if (count > 0) {
542 		kbuf = (char *)__get_free_page(GFP_KERNEL);
543 		if (!kbuf)
544 			return wrote ? wrote : -ENOMEM;
545 		while (count > 0) {
546 			unsigned long sz = size_inside_page(p, count);
547 			unsigned long n;
548 
549 			if (!is_vmalloc_or_module_addr((void *)p)) {
550 				err = -ENXIO;
551 				break;
552 			}
553 			n = copy_from_user(kbuf, buf, sz);
554 			if (n) {
555 				err = -EFAULT;
556 				break;
557 			}
558 			vwrite(kbuf, (char *)p, sz);
559 			count -= sz;
560 			buf += sz;
561 			virtr += sz;
562 			p += sz;
563 		}
564 		free_page((unsigned long)kbuf);
565 	}
566 
567 	*ppos = p;
568 	return virtr + wrote ? : err;
569 }
570 #endif
571 
572 #ifdef CONFIG_DEVPORT
read_port(struct file * file,char __user * buf,size_t count,loff_t * ppos)573 static ssize_t read_port(struct file *file, char __user *buf,
574 			 size_t count, loff_t *ppos)
575 {
576 	unsigned long i = *ppos;
577 	char __user *tmp = buf;
578 
579 	if (!access_ok(VERIFY_WRITE, buf, count))
580 		return -EFAULT;
581 	while (count-- > 0 && i < 65536) {
582 		if (__put_user(inb(i), tmp) < 0)
583 			return -EFAULT;
584 		i++;
585 		tmp++;
586 	}
587 	*ppos = i;
588 	return tmp-buf;
589 }
590 
write_port(struct file * file,const char __user * buf,size_t count,loff_t * ppos)591 static ssize_t write_port(struct file *file, const char __user *buf,
592 			  size_t count, loff_t *ppos)
593 {
594 	unsigned long i = *ppos;
595 	const char __user * tmp = buf;
596 
597 	if (!access_ok(VERIFY_READ, buf, count))
598 		return -EFAULT;
599 	while (count-- > 0 && i < 65536) {
600 		char c;
601 		if (__get_user(c, tmp)) {
602 			if (tmp > buf)
603 				break;
604 			return -EFAULT;
605 		}
606 		outb(c, i);
607 		i++;
608 		tmp++;
609 	}
610 	*ppos = i;
611 	return tmp-buf;
612 }
613 #endif
614 
read_null(struct file * file,char __user * buf,size_t count,loff_t * ppos)615 static ssize_t read_null(struct file *file, char __user *buf,
616 			 size_t count, loff_t *ppos)
617 {
618 	return 0;
619 }
620 
write_null(struct file * file,const char __user * buf,size_t count,loff_t * ppos)621 static ssize_t write_null(struct file *file, const char __user *buf,
622 			  size_t count, loff_t *ppos)
623 {
624 	return count;
625 }
626 
pipe_to_null(struct pipe_inode_info * info,struct pipe_buffer * buf,struct splice_desc * sd)627 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
628 			struct splice_desc *sd)
629 {
630 	return sd->len;
631 }
632 
splice_write_null(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)633 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
634 				 loff_t *ppos, size_t len, unsigned int flags)
635 {
636 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
637 }
638 
read_zero(struct file * file,char __user * buf,size_t count,loff_t * ppos)639 static ssize_t read_zero(struct file *file, char __user *buf,
640 			 size_t count, loff_t *ppos)
641 {
642 	size_t written;
643 
644 	if (!count)
645 		return 0;
646 
647 	if (!access_ok(VERIFY_WRITE, buf, count))
648 		return -EFAULT;
649 
650 	written = 0;
651 	while (count) {
652 		unsigned long unwritten;
653 		size_t chunk = count;
654 
655 		if (chunk > PAGE_SIZE)
656 			chunk = PAGE_SIZE;	/* Just for latency reasons */
657 		unwritten = __clear_user(buf, chunk);
658 		written += chunk - unwritten;
659 		if (unwritten)
660 			break;
661 		if (signal_pending(current))
662 			return written ? written : -ERESTARTSYS;
663 		buf += chunk;
664 		count -= chunk;
665 		cond_resched();
666 	}
667 	return written ? written : -EFAULT;
668 }
669 
mmap_zero(struct file * file,struct vm_area_struct * vma)670 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
671 {
672 #ifndef CONFIG_MMU
673 	return -ENOSYS;
674 #endif
675 	if (vma->vm_flags & VM_SHARED)
676 		return shmem_zero_setup(vma);
677 	return 0;
678 }
679 
write_full(struct file * file,const char __user * buf,size_t count,loff_t * ppos)680 static ssize_t write_full(struct file *file, const char __user *buf,
681 			  size_t count, loff_t *ppos)
682 {
683 	return -ENOSPC;
684 }
685 
686 /*
687  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
688  * can fopen() both devices with "a" now.  This was previously impossible.
689  * -- SRB.
690  */
null_lseek(struct file * file,loff_t offset,int orig)691 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
692 {
693 	return file->f_pos = 0;
694 }
695 
696 /*
697  * The memory devices use the full 32/64 bits of the offset, and so we cannot
698  * check against negative addresses: they are ok. The return value is weird,
699  * though, in that case (0).
700  *
701  * also note that seeking relative to the "end of file" isn't supported:
702  * it has no meaning, so it returns -EINVAL.
703  */
memory_lseek(struct file * file,loff_t offset,int orig)704 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
705 {
706 	loff_t ret;
707 
708 	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
709 	switch (orig) {
710 	case SEEK_CUR:
711 		offset += file->f_pos;
712 	case SEEK_SET:
713 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
714 		if ((unsigned long long)offset >= ~0xFFFULL) {
715 			ret = -EOVERFLOW;
716 			break;
717 		}
718 		file->f_pos = offset;
719 		ret = file->f_pos;
720 		force_successful_syscall_return();
721 		break;
722 	default:
723 		ret = -EINVAL;
724 	}
725 	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
726 	return ret;
727 }
728 
open_port(struct inode * inode,struct file * filp)729 static int open_port(struct inode * inode, struct file * filp)
730 {
731 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
732 }
733 
734 #define zero_lseek	null_lseek
735 #define full_lseek      null_lseek
736 #define write_zero	write_null
737 #define read_full       read_zero
738 #define open_mem	open_port
739 #define open_kmem	open_mem
740 #define open_oldmem	open_mem
741 
742 static const struct file_operations mem_fops = {
743 	.llseek		= memory_lseek,
744 	.read		= read_mem,
745 	.write		= write_mem,
746 	.mmap		= mmap_mem,
747 	.open		= open_mem,
748 	.get_unmapped_area = get_unmapped_area_mem,
749 };
750 
751 #ifdef CONFIG_DEVKMEM
752 static const struct file_operations kmem_fops = {
753 	.llseek		= memory_lseek,
754 	.read		= read_kmem,
755 	.write		= write_kmem,
756 	.mmap		= mmap_kmem,
757 	.open		= open_kmem,
758 	.get_unmapped_area = get_unmapped_area_mem,
759 };
760 #endif
761 
762 static const struct file_operations null_fops = {
763 	.llseek		= null_lseek,
764 	.read		= read_null,
765 	.write		= write_null,
766 	.splice_write	= splice_write_null,
767 };
768 
769 #ifdef CONFIG_DEVPORT
770 static const struct file_operations port_fops = {
771 	.llseek		= memory_lseek,
772 	.read		= read_port,
773 	.write		= write_port,
774 	.open		= open_port,
775 };
776 #endif
777 
778 static const struct file_operations zero_fops = {
779 	.llseek		= zero_lseek,
780 	.read		= read_zero,
781 	.write		= write_zero,
782 	.mmap		= mmap_zero,
783 };
784 
785 /*
786  * capabilities for /dev/zero
787  * - permits private mappings, "copies" are taken of the source of zeros
788  * - no writeback happens
789  */
790 static struct backing_dev_info zero_bdi = {
791 	.name		= "char/mem",
792 	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
793 };
794 
795 static const struct file_operations full_fops = {
796 	.llseek		= full_lseek,
797 	.read		= read_full,
798 	.write		= write_full,
799 };
800 
801 #ifdef CONFIG_CRASH_DUMP
802 static const struct file_operations oldmem_fops = {
803 	.read	= read_oldmem,
804 	.open	= open_oldmem,
805 	.llseek = default_llseek,
806 };
807 #endif
808 
kmsg_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)809 static ssize_t kmsg_write(struct file *file, const char __user *buf,
810 			  size_t count, loff_t *ppos)
811 {
812 	char *tmp;
813 	ssize_t ret;
814 
815 	tmp = kmalloc(count + 1, GFP_KERNEL);
816 	if (tmp == NULL)
817 		return -ENOMEM;
818 	ret = -EFAULT;
819 	if (!copy_from_user(tmp, buf, count)) {
820 		tmp[count] = 0;
821 		ret = printk("%s", tmp);
822 		if (ret > count)
823 			/* printk can add a prefix */
824 			ret = count;
825 	}
826 	kfree(tmp);
827 	return ret;
828 }
829 
830 static const struct file_operations kmsg_fops = {
831 	.write = kmsg_write,
832 	.llseek = noop_llseek,
833 };
834 
835 static const struct memdev {
836 	const char *name;
837 	mode_t mode;
838 	const struct file_operations *fops;
839 	struct backing_dev_info *dev_info;
840 } devlist[] = {
841 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
842 #ifdef CONFIG_DEVKMEM
843 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
844 #endif
845 	 [3] = { "null", 0666, &null_fops, NULL },
846 #ifdef CONFIG_DEVPORT
847 	 [4] = { "port", 0, &port_fops, NULL },
848 #endif
849 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
850 	 [7] = { "full", 0666, &full_fops, NULL },
851 	 [8] = { "random", 0666, &random_fops, NULL },
852 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
853 	[11] = { "kmsg", 0, &kmsg_fops, NULL },
854 #ifdef CONFIG_CRASH_DUMP
855 	[12] = { "oldmem", 0, &oldmem_fops, NULL },
856 #endif
857 };
858 
memory_open(struct inode * inode,struct file * filp)859 static int memory_open(struct inode *inode, struct file *filp)
860 {
861 	int minor;
862 	const struct memdev *dev;
863 
864 	minor = iminor(inode);
865 	if (minor >= ARRAY_SIZE(devlist))
866 		return -ENXIO;
867 
868 	dev = &devlist[minor];
869 	if (!dev->fops)
870 		return -ENXIO;
871 
872 	filp->f_op = dev->fops;
873 	if (dev->dev_info)
874 		filp->f_mapping->backing_dev_info = dev->dev_info;
875 
876 	/* Is /dev/mem or /dev/kmem ? */
877 	if (dev->dev_info == &directly_mappable_cdev_bdi)
878 		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
879 
880 	if (dev->fops->open)
881 		return dev->fops->open(inode, filp);
882 
883 	return 0;
884 }
885 
886 static const struct file_operations memory_fops = {
887 	.open = memory_open,
888 	.llseek = noop_llseek,
889 };
890 
mem_devnode(struct device * dev,mode_t * mode)891 static char *mem_devnode(struct device *dev, mode_t *mode)
892 {
893 	if (mode && devlist[MINOR(dev->devt)].mode)
894 		*mode = devlist[MINOR(dev->devt)].mode;
895 	return NULL;
896 }
897 
898 static struct class *mem_class;
899 
chr_dev_init(void)900 static int __init chr_dev_init(void)
901 {
902 	int minor;
903 	int err;
904 
905 	err = bdi_init(&zero_bdi);
906 	if (err)
907 		return err;
908 
909 	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
910 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
911 
912 	mem_class = class_create(THIS_MODULE, "mem");
913 	if (IS_ERR(mem_class))
914 		return PTR_ERR(mem_class);
915 
916 	mem_class->devnode = mem_devnode;
917 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
918 		if (!devlist[minor].name)
919 			continue;
920 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
921 			      NULL, devlist[minor].name);
922 	}
923 
924 	return tty_init();
925 }
926 
927 fs_initcall(chr_dev_init);
928