1 /* arch/x86_64/mm/modutil.c
2 *
3 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * Based upon code written by Linus Torvalds and others.
5 *
6 * Blatantly copied from sparc64 for x86-64 by Andi Kleen.
7 * Should use direct mapping with 2MB pages. This would need extension
8 * of the kernel mapping.
9 */
10
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/init.h>
14
15 #include <asm/uaccess.h>
16 #include <asm/system.h>
17 #include <asm/proto.h>
18
19 extern char _text[], _end[];
20
21 /* Kernel mapping to make the kernel alias visible to
22 /proc/kcore and /dev/mem
23
24 RED-PEN may want vsyscall mappings too
25 */
26
27 static struct vm_struct kernel_mapping = {
28 .addr = (void *)KERNEL_TEXT_START,
29 };
30
module_unmap(void * addr)31 void module_unmap (void * addr)
32 {
33 struct vm_struct **p, *tmp;
34
35 if (!addr)
36 return;
37 if ((PAGE_SIZE-1) & (unsigned long) addr) {
38 printk("Trying to unmap module with bad address (%p)\n", addr);
39 return;
40 }
41 write_lock(&vmlist_lock);
42 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
43 if (tmp->addr == addr) {
44 *p = tmp->next;
45 write_unlock(&vmlist_lock);
46 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
47 kfree(tmp);
48 return;
49 }
50 }
51 printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
52 }
53
module_map(unsigned long size)54 void * module_map (unsigned long size)
55 {
56 void * addr;
57 struct vm_struct **p, *tmp, *area;
58
59 size = PAGE_ALIGN(size);
60 if (!size || size > MODULES_LEN) return NULL;
61
62 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area) return NULL;
64
65 memset(area, 0, sizeof(struct vm_struct));
66
67 size = round_up(size, PAGE_SIZE);
68
69 addr = (void *) MODULES_VADDR;
70 write_lock(&vmlist_lock);
71 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
72 void *next;
73 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
74 break;
75 next = (void *) (tmp->size + (unsigned long) tmp->addr);
76 if (next > addr)
77 addr = next;
78 }
79 if ((unsigned long) addr + size >= MODULES_END) {
80 write_unlock(&vmlist_lock);
81 kfree(area);
82 return NULL;
83 }
84
85 area->size = size;
86 area->addr = addr;
87 area->next = *p;
88 *p = area;
89 write_unlock(&vmlist_lock);
90
91 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, GFP_KERNEL, PAGE_KERNEL_EXECUTABLE)) {
92 module_unmap(addr);
93 return NULL;
94 }
95 return addr;
96 }
97
mod_vmlist_init(void)98 static int __init mod_vmlist_init(void)
99 {
100 struct vm_struct *vm, **base;
101 write_lock(&vmlist_lock);
102 for (base = &vmlist, vm = *base; vm; base = &vm->next, vm = *base) {
103 if (vm->addr > (void *)KERNEL_TEXT_START)
104 break;
105 }
106 kernel_mapping.size = _end - _text;
107 kernel_mapping.next = vm;
108 *base = &kernel_mapping;
109 write_unlock(&vmlist_lock);
110 return 0;
111 }
112
113 __initcall(mod_vmlist_init);
114