1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/tpqic02.h>
15 #include <linux/ftape.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mman.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/raw.h>
22 #include <linux/tty.h>
23 #include <linux/capability.h>
24 #include <linux/ptrace.h>
25
26 #include <asm/uaccess.h>
27 #include <asm/io.h>
28 #include <asm/pgalloc.h>
29
30 #ifdef CONFIG_I2C
31 extern int i2c_init_all(void);
32 #endif
33 #ifdef CONFIG_FB
34 extern void fbmem_init(void);
35 #endif
36 #ifdef CONFIG_PROM_CONSOLE
37 extern void prom_con_init(void);
38 #endif
39 #ifdef CONFIG_MDA_CONSOLE
40 extern void mda_console_init(void);
41 #endif
42 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
43 extern void tapechar_init(void);
44 #endif
45
do_write_mem(struct file * file,void * p,unsigned long realp,const char * buf,size_t count,loff_t * ppos)46 static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
47 const char * buf, size_t count, loff_t *ppos)
48 {
49 ssize_t written;
50
51 written = 0;
52 #if defined(__sparc__) || defined(__mc68000__)
53 /* we don't have page 0 mapped on sparc and m68k.. */
54 if (realp < PAGE_SIZE) {
55 unsigned long sz = PAGE_SIZE-realp;
56 if (sz > count) sz = count;
57 /* Hmm. Do something? */
58 buf+=sz;
59 p+=sz;
60 count-=sz;
61 written+=sz;
62 }
63 #endif
64 if (copy_from_user(p, buf, count))
65 return -EFAULT;
66 written += count;
67 *ppos = realp + written;
68 return written;
69 }
70
71
72 /*
73 * This funcion reads the *physical* memory. The f_pos points directly to the
74 * memory location.
75 */
read_mem(struct file * file,char * buf,size_t count,loff_t * ppos)76 static ssize_t read_mem(struct file * file, char * buf,
77 size_t count, loff_t *ppos)
78 {
79 unsigned long p = *ppos;
80 unsigned long end_mem;
81 ssize_t read;
82
83 end_mem = __pa(high_memory);
84 if (p >= end_mem)
85 return 0;
86 if (count > end_mem - p)
87 count = end_mem - p;
88 read = 0;
89 #if defined(__sparc__) || defined(__mc68000__)
90 /* we don't have page 0 mapped on sparc and m68k.. */
91 if (p < PAGE_SIZE) {
92 unsigned long sz = PAGE_SIZE-p;
93 if (sz > count)
94 sz = count;
95 if (sz > 0) {
96 if (clear_user(buf, sz))
97 return -EFAULT;
98 buf += sz;
99 p += sz;
100 count -= sz;
101 read += sz;
102 }
103 }
104 #endif
105 if (copy_to_user(buf, __va(p), count))
106 return -EFAULT;
107 read += count;
108 *ppos = p + read;
109 return read;
110 }
111
write_mem(struct file * file,const char * buf,size_t count,loff_t * ppos)112 static ssize_t write_mem(struct file * file, const char * buf,
113 size_t count, loff_t *ppos)
114 {
115 unsigned long p = *ppos;
116 unsigned long end_mem;
117
118 end_mem = __pa(high_memory);
119 if (p >= end_mem)
120 return 0;
121 if (count > end_mem - p)
122 count = end_mem - p;
123 return do_write_mem(file, __va(p), p, buf, count, ppos);
124 }
125
126 #ifndef pgprot_noncached
127
128 /*
129 * This should probably be per-architecture in <asm/pgtable.h>
130 */
pgprot_noncached(pgprot_t _prot)131 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
132 {
133 unsigned long prot = pgprot_val(_prot);
134
135 #if defined(__i386__) || defined(__x86_64__)
136 /* On PPro and successors, PCD alone doesn't always mean
137 uncached because of interactions with the MTRRs. PCD | PWT
138 means definitely uncached. */
139 if (boot_cpu_data.x86 > 3)
140 prot |= _PAGE_PCD | _PAGE_PWT;
141 #elif defined(__powerpc__)
142 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
143 #elif defined(__mc68000__)
144 #ifdef SUN3_PAGE_NOCACHE
145 if (MMU_IS_SUN3)
146 prot |= SUN3_PAGE_NOCACHE;
147 else
148 #endif
149 if (MMU_IS_851 || MMU_IS_030)
150 prot |= _PAGE_NOCACHE030;
151 /* Use no-cache mode, serialized */
152 else if (MMU_IS_040 || MMU_IS_060)
153 prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
154 #endif
155
156 return __pgprot(prot);
157 }
158
159 #endif /* !pgprot_noncached */
160
161 /*
162 * Architectures vary in how they handle caching for addresses
163 * outside of main memory.
164 */
noncached_address(unsigned long addr)165 static inline int noncached_address(unsigned long addr)
166 {
167 #if defined(__i386__)
168 /*
169 * On the PPro and successors, the MTRRs are used to set
170 * memory types for physical addresses outside main memory,
171 * so blindly setting PCD or PWT on those pages is wrong.
172 * For Pentiums and earlier, the surround logic should disable
173 * caching for the high addresses through the KEN pin, but
174 * we maintain the tradition of paranoia in this code.
175 */
176 return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
177 test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
178 test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
179 test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
180 && addr >= __pa(high_memory);
181 #else
182 return addr >= __pa(high_memory);
183 #endif
184 }
185
mmap_mem(struct file * file,struct vm_area_struct * vma)186 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
187 {
188 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
189
190 /*
191 * Accessing memory above the top the kernel knows about or
192 * through a file pointer that was marked O_SYNC will be
193 * done non-cached.
194 */
195 if (noncached_address(offset) || (file->f_flags & O_SYNC))
196 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
197
198 /* Don't try to swap out physical pages.. */
199 vma->vm_flags |= VM_RESERVED;
200
201 /*
202 * Don't dump addresses that are not real memory to a core file.
203 */
204 if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
205 vma->vm_flags |= VM_IO;
206
207 if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
208 vma->vm_page_prot))
209 return -EAGAIN;
210 return 0;
211 }
212
213 /*
214 * This function reads the *virtual* memory as seen by the kernel.
215 */
read_kmem(struct file * file,char * buf,size_t count,loff_t * ppos)216 static ssize_t read_kmem(struct file *file, char *buf,
217 size_t count, loff_t *ppos)
218 {
219 unsigned long p = *ppos;
220 ssize_t read = 0;
221 ssize_t virtr = 0;
222 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
223
224 if (p < (unsigned long) high_memory) {
225 read = count;
226 if (count > (unsigned long) high_memory - p)
227 read = (unsigned long) high_memory - p;
228
229 #if defined(__sparc__) || defined(__mc68000__)
230 /* we don't have page 0 mapped on sparc and m68k.. */
231 if (p < PAGE_SIZE && read > 0) {
232 size_t tmp = PAGE_SIZE - p;
233 if (tmp > read) tmp = read;
234 if (clear_user(buf, tmp))
235 return -EFAULT;
236 buf += tmp;
237 p += tmp;
238 read -= tmp;
239 count -= tmp;
240 }
241 #endif
242 if (copy_to_user(buf, (char *)p, read))
243 return -EFAULT;
244 p += read;
245 buf += read;
246 count -= read;
247 }
248
249 if (count > 0) {
250 kbuf = (char *)__get_free_page(GFP_KERNEL);
251 if (!kbuf)
252 return -ENOMEM;
253 while (count > 0) {
254 int len = count;
255
256 if (len > PAGE_SIZE)
257 len = PAGE_SIZE;
258 len = vread(kbuf, (char *)p, len);
259 if (!len)
260 break;
261 if (copy_to_user(buf, kbuf, len)) {
262 free_page((unsigned long)kbuf);
263 return -EFAULT;
264 }
265 count -= len;
266 buf += len;
267 virtr += len;
268 p += len;
269 }
270 free_page((unsigned long)kbuf);
271 }
272 *ppos = p;
273 return virtr + read;
274 }
275
276 extern long vwrite(char *buf, char *addr, unsigned long count);
277
278 /*
279 * This function writes to the *virtual* memory as seen by the kernel.
280 */
write_kmem(struct file * file,const char * buf,size_t count,loff_t * ppos)281 static ssize_t write_kmem(struct file * file, const char * buf,
282 size_t count, loff_t *ppos)
283 {
284 unsigned long p = *ppos;
285 ssize_t wrote = 0;
286 ssize_t virtr = 0;
287 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
288
289 if (p < (unsigned long) high_memory) {
290 wrote = count;
291 if (count > (unsigned long) high_memory - p)
292 wrote = (unsigned long) high_memory - p;
293
294 wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
295
296 p += wrote;
297 buf += wrote;
298 count -= wrote;
299 }
300
301 if (count > 0) {
302 kbuf = (char *)__get_free_page(GFP_KERNEL);
303 if (!kbuf)
304 return -ENOMEM;
305 while (count > 0) {
306 int len = count;
307
308 if (len > PAGE_SIZE)
309 len = PAGE_SIZE;
310 if (len && copy_from_user(kbuf, buf, len)) {
311 free_page((unsigned long)kbuf);
312 return -EFAULT;
313 }
314 len = vwrite(kbuf, (char *)p, len);
315 count -= len;
316 buf += len;
317 virtr += len;
318 p += len;
319 }
320 free_page((unsigned long)kbuf);
321 }
322
323 *ppos = p;
324 return virtr + wrote;
325 }
326
327 #if defined(CONFIG_ISA) || !defined(__mc68000__)
read_port(struct file * file,char * buf,size_t count,loff_t * ppos)328 static ssize_t read_port(struct file * file, char * buf,
329 size_t count, loff_t *ppos)
330 {
331 unsigned long i = *ppos;
332 char *tmp = buf;
333
334 if (verify_area(VERIFY_WRITE,buf,count))
335 return -EFAULT;
336 while (count-- > 0 && i < 65536) {
337 if (__put_user(inb(i),tmp) < 0)
338 return -EFAULT;
339 i++;
340 tmp++;
341 }
342 *ppos = i;
343 return tmp-buf;
344 }
345
write_port(struct file * file,const char * buf,size_t count,loff_t * ppos)346 static ssize_t write_port(struct file * file, const char * buf,
347 size_t count, loff_t *ppos)
348 {
349 unsigned long i = *ppos;
350 const char * tmp = buf;
351
352 if (verify_area(VERIFY_READ,buf,count))
353 return -EFAULT;
354 while (count-- > 0 && i < 65536) {
355 char c;
356 if (__get_user(c, tmp))
357 return -EFAULT;
358 outb(c,i);
359 i++;
360 tmp++;
361 }
362 *ppos = i;
363 return tmp-buf;
364 }
365 #endif
366
read_null(struct file * file,char * buf,size_t count,loff_t * ppos)367 static ssize_t read_null(struct file * file, char * buf,
368 size_t count, loff_t *ppos)
369 {
370 return 0;
371 }
372
write_null(struct file * file,const char * buf,size_t count,loff_t * ppos)373 static ssize_t write_null(struct file * file, const char * buf,
374 size_t count, loff_t *ppos)
375 {
376 return count;
377 }
378
379 /*
380 * For fun, we are using the MMU for this.
381 */
read_zero_pagealigned(char * buf,size_t size)382 static inline size_t read_zero_pagealigned(char * buf, size_t size)
383 {
384 struct mm_struct *mm;
385 struct vm_area_struct * vma;
386 unsigned long addr=(unsigned long)buf;
387
388 mm = current->mm;
389 /* Oops, this was forgotten before. -ben */
390 down_read(&mm->mmap_sem);
391
392 /* For private mappings, just map in zero pages. */
393 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
394 unsigned long count;
395
396 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
397 goto out_up;
398 if (vma->vm_flags & VM_SHARED)
399 break;
400 count = vma->vm_end - addr;
401 if (count > size)
402 count = size;
403
404 zap_page_range(mm, addr, count);
405 if (zeromap_page_range(addr, count, PAGE_COPY))
406 break;
407
408 size -= count;
409 buf += count;
410 addr += count;
411 if (size == 0)
412 goto out_up;
413 }
414
415 up_read(&mm->mmap_sem);
416
417 /* The shared case is hard. Let's do the conventional zeroing. */
418 do {
419 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
420 if (unwritten)
421 return size + unwritten - PAGE_SIZE;
422 if (current->need_resched)
423 schedule();
424 buf += PAGE_SIZE;
425 size -= PAGE_SIZE;
426 } while (size);
427
428 return size;
429 out_up:
430 up_read(&mm->mmap_sem);
431 return size;
432 }
433
read_zero(struct file * file,char * buf,size_t count,loff_t * ppos)434 static ssize_t read_zero(struct file * file, char * buf,
435 size_t count, loff_t *ppos)
436 {
437 unsigned long left, unwritten, written = 0;
438
439 if (!count)
440 return 0;
441
442 if (!access_ok(VERIFY_WRITE, buf, count))
443 return -EFAULT;
444
445 left = count;
446
447 /* do we want to be clever? Arbitrary cut-off */
448 if (count >= PAGE_SIZE*4) {
449 unsigned long partial;
450
451 /* How much left of the page? */
452 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
453 unwritten = clear_user(buf, partial);
454 written = partial - unwritten;
455 if (unwritten)
456 goto out;
457 left -= partial;
458 buf += partial;
459 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
460 written += (left & PAGE_MASK) - unwritten;
461 if (unwritten)
462 goto out;
463 buf += left & PAGE_MASK;
464 left &= ~PAGE_MASK;
465 }
466 unwritten = clear_user(buf, left);
467 written += left - unwritten;
468 out:
469 return written ? written : -EFAULT;
470 }
471
mmap_zero(struct file * file,struct vm_area_struct * vma)472 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
473 {
474 if (vma->vm_flags & VM_SHARED)
475 return shmem_zero_setup(vma);
476 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
477 return -EAGAIN;
478 return 0;
479 }
480
write_full(struct file * file,const char * buf,size_t count,loff_t * ppos)481 static ssize_t write_full(struct file * file, const char * buf,
482 size_t count, loff_t *ppos)
483 {
484 return -ENOSPC;
485 }
486
487 /*
488 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
489 * can fopen() both devices with "a" now. This was previously impossible.
490 * -- SRB.
491 */
492
null_lseek(struct file * file,loff_t offset,int orig)493 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
494 {
495 return file->f_pos = 0;
496 }
497
498 /*
499 * The memory devices use the full 32/64 bits of the offset, and so we cannot
500 * check against negative addresses: they are ok. The return value is weird,
501 * though, in that case (0).
502 *
503 * also note that seeking relative to the "end of file" isn't supported:
504 * it has no meaning, so it returns -EINVAL.
505 */
memory_lseek(struct file * file,loff_t offset,int orig)506 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
507 {
508 loff_t ret;
509
510 switch (orig) {
511 case 0:
512 file->f_pos = offset;
513 ret = file->f_pos;
514 force_successful_syscall_return();
515 break;
516 case 1:
517 file->f_pos += offset;
518 ret = file->f_pos;
519 force_successful_syscall_return();
520 break;
521 default:
522 ret = -EINVAL;
523 }
524 return ret;
525 }
526
open_port(struct inode * inode,struct file * filp)527 static int open_port(struct inode * inode, struct file * filp)
528 {
529 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
530 }
531
kmem_vm_nopage(struct vm_area_struct * vma,unsigned long address,int write)532 struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
533 {
534 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
535 unsigned long kaddr;
536 pgd_t *pgd;
537 pmd_t *pmd;
538 pte_t *ptep, pte;
539 struct page *page = NULL;
540
541 /* address is user VA; convert to kernel VA of desired page */
542 kaddr = (address - vma->vm_start) + offset;
543 kaddr = VMALLOC_VMADDR(kaddr);
544
545 spin_lock(&init_mm.page_table_lock);
546
547 /* Lookup page structure for kernel VA */
548 pgd = pgd_offset(&init_mm, kaddr);
549 if (pgd_none(*pgd) || pgd_bad(*pgd))
550 goto out;
551 pmd = pmd_offset(pgd, kaddr);
552 if (pmd_none(*pmd) || pmd_bad(*pmd))
553 goto out;
554 ptep = pte_offset(pmd, kaddr);
555 if (!ptep)
556 goto out;
557 pte = *ptep;
558 if (!pte_present(pte))
559 goto out;
560 if (write && !pte_write(pte))
561 goto out;
562 page = pte_page(pte);
563 if (!VALID_PAGE(page)) {
564 page = NULL;
565 goto out;
566 }
567
568 /* Increment reference count on page */
569 get_page(page);
570
571 out:
572 spin_unlock(&init_mm.page_table_lock);
573
574 return page;
575 }
576
577 struct vm_operations_struct kmem_vm_ops = {
578 nopage: kmem_vm_nopage,
579 };
580
mmap_kmem(struct file * file,struct vm_area_struct * vma)581 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
582 {
583 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
584 unsigned long size = vma->vm_end - vma->vm_start;
585
586 /*
587 * If the user is not attempting to mmap a high memory address then
588 * the standard mmap_mem mechanism will work. High memory addresses
589 * need special handling, as remap_page_range expects a physically-
590 * contiguous range of kernel addresses (such as obtained in kmalloc).
591 */
592 if ((offset + size) < (unsigned long) high_memory)
593 return mmap_mem(file, vma);
594
595 /*
596 * Accessing memory above the top the kernel knows about or
597 * through a file pointer that was marked O_SYNC will be
598 * done non-cached.
599 */
600 if (noncached_address(offset) || (file->f_flags & O_SYNC))
601 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
602
603 /* Don't do anything here; "nopage" will fill the holes */
604 vma->vm_ops = &kmem_vm_ops;
605
606 /* Don't try to swap out physical pages.. */
607 vma->vm_flags |= VM_RESERVED;
608
609 /*
610 * Don't dump addresses that are not real memory to a core file.
611 */
612 vma->vm_flags |= VM_IO;
613
614 return 0;
615 }
616
617 #define zero_lseek null_lseek
618 #define full_lseek null_lseek
619 #define write_zero write_null
620 #define read_full read_zero
621 #define open_mem open_port
622 #define open_kmem open_mem
623
624 static struct file_operations mem_fops = {
625 llseek: memory_lseek,
626 read: read_mem,
627 write: write_mem,
628 mmap: mmap_mem,
629 open: open_mem,
630 };
631
632 static struct file_operations kmem_fops = {
633 llseek: memory_lseek,
634 read: read_kmem,
635 write: write_kmem,
636 mmap: mmap_kmem,
637 open: open_kmem,
638 };
639
640 static struct file_operations null_fops = {
641 llseek: null_lseek,
642 read: read_null,
643 write: write_null,
644 };
645
646 #if defined(CONFIG_ISA) || !defined(__mc68000__)
647 static struct file_operations port_fops = {
648 llseek: memory_lseek,
649 read: read_port,
650 write: write_port,
651 open: open_port,
652 };
653 #endif
654
655 static struct file_operations zero_fops = {
656 llseek: zero_lseek,
657 read: read_zero,
658 write: write_zero,
659 mmap: mmap_zero,
660 };
661
662 static struct file_operations full_fops = {
663 llseek: full_lseek,
664 read: read_full,
665 write: write_full,
666 };
667
memory_open(struct inode * inode,struct file * filp)668 static int memory_open(struct inode * inode, struct file * filp)
669 {
670 switch (MINOR(inode->i_rdev)) {
671 case 1:
672 filp->f_op = &mem_fops;
673 break;
674 case 2:
675 filp->f_op = &kmem_fops;
676 break;
677 case 3:
678 filp->f_op = &null_fops;
679 break;
680 #if defined(CONFIG_ISA) || !defined(__mc68000__)
681 case 4:
682 filp->f_op = &port_fops;
683 break;
684 #endif
685 case 5:
686 filp->f_op = &zero_fops;
687 break;
688 case 7:
689 filp->f_op = &full_fops;
690 break;
691 case 8:
692 filp->f_op = &random_fops;
693 break;
694 case 9:
695 filp->f_op = &urandom_fops;
696 break;
697 default:
698 return -ENXIO;
699 }
700 if (filp->f_op && filp->f_op->open)
701 return filp->f_op->open(inode,filp);
702 return 0;
703 }
704
memory_devfs_register(void)705 void __init memory_devfs_register (void)
706 {
707 /* These are never unregistered */
708 static const struct {
709 unsigned short minor;
710 char *name;
711 umode_t mode;
712 struct file_operations *fops;
713 } list[] = { /* list of minor devices */
714 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
715 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
716 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
717 #if defined(CONFIG_ISA) || !defined(__mc68000__)
718 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
719 #endif
720 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
721 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
722 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
723 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
724 };
725 int i;
726
727 for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
728 devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
729 MEM_MAJOR, list[i].minor,
730 list[i].mode | S_IFCHR,
731 list[i].fops, NULL);
732 }
733
734 static struct file_operations memory_fops = {
735 open: memory_open, /* just a selector for the real open */
736 };
737
chr_dev_init(void)738 int __init chr_dev_init(void)
739 {
740 if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
741 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
742 memory_devfs_register();
743 rand_initialize();
744 #ifdef CONFIG_I2C
745 i2c_init_all();
746 #endif
747 #if defined (CONFIG_FB)
748 fbmem_init();
749 #endif
750 #if defined (CONFIG_PROM_CONSOLE)
751 prom_con_init();
752 #endif
753 #if defined (CONFIG_MDA_CONSOLE)
754 mda_console_init();
755 #endif
756 tty_init();
757 #ifdef CONFIG_M68K_PRINTER
758 lp_m68k_init();
759 #endif
760 misc_init();
761 #if CONFIG_QIC02_TAPE
762 qic02_tape_init();
763 #endif
764 #ifdef CONFIG_FTAPE
765 ftape_init();
766 #endif
767 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
768 tapechar_init();
769 #endif
770 return 0;
771 }
772
773 __initcall(chr_dev_init);
774