1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Helge Deller <deller@gmx.de>
4 *
5 * based on arch/s390/kernel/vdso.c which is
6 * Copyright IBM Corp. 2008
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/elf.h>
14 #include <linux/timekeeper_internal.h>
15 #include <linux/compat.h>
16 #include <linux/nsproxy.h>
17 #include <linux/time_namespace.h>
18 #include <linux/random.h>
19
20 #include <asm/pgtable.h>
21 #include <asm/page.h>
22 #include <asm/sections.h>
23 #include <asm/vdso.h>
24 #include <asm/cacheflush.h>
25
26 extern char vdso32_start, vdso32_end;
27 extern char vdso64_start, vdso64_end;
28
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * vma)29 static int vdso_mremap(const struct vm_special_mapping *sm,
30 struct vm_area_struct *vma)
31 {
32 current->mm->context.vdso_base = vma->vm_start;
33 return 0;
34 }
35
36 #ifdef CONFIG_64BIT
37 static struct vm_special_mapping vdso64_mapping = {
38 .name = "[vdso]",
39 .mremap = vdso_mremap,
40 };
41 #endif
42
43 static struct vm_special_mapping vdso32_mapping = {
44 .name = "[vdso]",
45 .mremap = vdso_mremap,
46 };
47
48 /*
49 * This is called from binfmt_elf, we create the special vma for the
50 * vDSO and insert it into the mm struct tree
51 */
arch_setup_additional_pages(struct linux_binprm * bprm,int executable_stack)52 int arch_setup_additional_pages(struct linux_binprm *bprm,
53 int executable_stack)
54 {
55
56 unsigned long vdso_text_start, vdso_text_len, map_base;
57 struct vm_special_mapping *vdso_mapping;
58 struct mm_struct *mm = current->mm;
59 struct vm_area_struct *vma;
60 int rc;
61
62 if (mmap_write_lock_killable(mm))
63 return -EINTR;
64
65 #ifdef CONFIG_64BIT
66 if (!is_compat_task()) {
67 vdso_text_len = &vdso64_end - &vdso64_start;
68 vdso_mapping = &vdso64_mapping;
69 } else
70 #endif
71 {
72 vdso_text_len = &vdso32_end - &vdso32_start;
73 vdso_mapping = &vdso32_mapping;
74 }
75
76 map_base = mm->mmap_base;
77 if (current->flags & PF_RANDOMIZE)
78 map_base -= get_random_u32_below(0x20) * PAGE_SIZE;
79
80 vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
81
82 /* VM_MAYWRITE for COW so gdb can set breakpoints */
83 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
84 VM_READ|VM_EXEC|
85 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
86 vdso_mapping);
87 if (IS_ERR(vma)) {
88 do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL);
89 rc = PTR_ERR(vma);
90 } else {
91 current->mm->context.vdso_base = vdso_text_start;
92 rc = 0;
93 }
94
95 mmap_write_unlock(mm);
96 return rc;
97 }
98
vdso_setup_pages(void * start,void * end)99 static struct page ** __init vdso_setup_pages(void *start, void *end)
100 {
101 int pages = (end - start) >> PAGE_SHIFT;
102 struct page **pagelist;
103 int i;
104
105 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
106 if (!pagelist)
107 panic("%s: Cannot allocate page list for VDSO", __func__);
108 for (i = 0; i < pages; i++)
109 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
110 return pagelist;
111 }
112
vdso_init(void)113 static int __init vdso_init(void)
114 {
115 #ifdef CONFIG_64BIT
116 vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
117 #endif
118 if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT))
119 vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
120 return 0;
121 }
122 arch_initcall(vdso_init);
123