1 /*
2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
4 *
5 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 *
17 * Note: the concept clashes with user mode linux. UML users should
18 * use the vDSO.
19 */
20
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/seqlock.h>
26 #include <linux/jiffies.h>
27 #include <linux/sysctl.h>
28 #include <linux/topology.h>
29 #include <linux/clocksource.h>
30 #include <linux/getcpu.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/notifier.h>
34 #include <linux/syscalls.h>
35 #include <linux/ratelimit.h>
36
37 #include <asm/vsyscall.h>
38 #include <asm/pgtable.h>
39 #include <asm/compat.h>
40 #include <asm/page.h>
41 #include <asm/unistd.h>
42 #include <asm/fixmap.h>
43 #include <asm/errno.h>
44 #include <asm/io.h>
45 #include <asm/segment.h>
46 #include <asm/desc.h>
47 #include <asm/topology.h>
48 #include <asm/vgtod.h>
49 #include <asm/traps.h>
50
51 #define CREATE_TRACE_POINTS
52 #include "vsyscall_trace.h"
53
54 DEFINE_VVAR(int, vgetcpu_mode);
55 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56
57 static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
58
vsyscall_setup(char * str)59 static int __init vsyscall_setup(char *str)
60 {
61 if (str) {
62 if (!strcmp("emulate", str))
63 vsyscall_mode = EMULATE;
64 else if (!strcmp("native", str))
65 vsyscall_mode = NATIVE;
66 else if (!strcmp("none", str))
67 vsyscall_mode = NONE;
68 else
69 return -EINVAL;
70
71 return 0;
72 }
73
74 return -EINVAL;
75 }
76 early_param("vsyscall", vsyscall_setup);
77
update_vsyscall_tz(void)78 void update_vsyscall_tz(void)
79 {
80 vsyscall_gtod_data.sys_tz = sys_tz;
81 }
82
update_vsyscall(struct timespec * wall_time,struct timespec * wtm,struct clocksource * clock,u32 mult)83 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
84 struct clocksource *clock, u32 mult)
85 {
86 struct timespec monotonic;
87
88 write_seqcount_begin(&vsyscall_gtod_data.seq);
89
90 /* copy vsyscall data */
91 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
92 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
93 vsyscall_gtod_data.clock.mask = clock->mask;
94 vsyscall_gtod_data.clock.mult = mult;
95 vsyscall_gtod_data.clock.shift = clock->shift;
96
97 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
99
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
107
108 write_seqcount_end(&vsyscall_gtod_data.seq);
109 }
110
warn_bad_vsyscall(const char * level,struct pt_regs * regs,const char * message)111 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
112 const char *message)
113 {
114 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
115 struct task_struct *tsk;
116
117 if (!show_unhandled_signals || !__ratelimit(&rs))
118 return;
119
120 tsk = current;
121
122 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
123 level, tsk->comm, task_pid_nr(tsk),
124 message, regs->ip, regs->cs,
125 regs->sp, regs->ax, regs->si, regs->di);
126 }
127
addr_to_vsyscall_nr(unsigned long addr)128 static int addr_to_vsyscall_nr(unsigned long addr)
129 {
130 int nr;
131
132 if ((addr & ~0xC00UL) != VSYSCALL_START)
133 return -EINVAL;
134
135 nr = (addr & 0xC00UL) >> 10;
136 if (nr >= 3)
137 return -EINVAL;
138
139 return nr;
140 }
141
write_ok_or_segv(unsigned long ptr,size_t size)142 static bool write_ok_or_segv(unsigned long ptr, size_t size)
143 {
144 /*
145 * XXX: if access_ok, get_user, and put_user handled
146 * sig_on_uaccess_error, this could go away.
147 */
148
149 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
150 siginfo_t info;
151 struct thread_struct *thread = ¤t->thread;
152
153 thread->error_code = 6; /* user fault, no page, write */
154 thread->cr2 = ptr;
155 thread->trap_nr = X86_TRAP_PF;
156
157 memset(&info, 0, sizeof(info));
158 info.si_signo = SIGSEGV;
159 info.si_errno = 0;
160 info.si_code = SEGV_MAPERR;
161 info.si_addr = (void __user *)ptr;
162
163 force_sig_info(SIGSEGV, &info, current);
164 return false;
165 } else {
166 return true;
167 }
168 }
169
emulate_vsyscall(struct pt_regs * regs,unsigned long address)170 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
171 {
172 struct task_struct *tsk;
173 unsigned long caller;
174 int vsyscall_nr;
175 int prev_sig_on_uaccess_error;
176 long ret;
177
178 /*
179 * No point in checking CS -- the only way to get here is a user mode
180 * trap to a high address, which means that we're in 64-bit user code.
181 */
182
183 WARN_ON_ONCE(address != regs->ip);
184
185 if (vsyscall_mode == NONE) {
186 warn_bad_vsyscall(KERN_INFO, regs,
187 "vsyscall attempted with vsyscall=none");
188 return false;
189 }
190
191 vsyscall_nr = addr_to_vsyscall_nr(address);
192
193 trace_emulate_vsyscall(vsyscall_nr);
194
195 if (vsyscall_nr < 0) {
196 warn_bad_vsyscall(KERN_WARNING, regs,
197 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
198 goto sigsegv;
199 }
200
201 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
202 warn_bad_vsyscall(KERN_WARNING, regs,
203 "vsyscall with bad stack (exploit attempt?)");
204 goto sigsegv;
205 }
206
207 tsk = current;
208 if (seccomp_mode(&tsk->seccomp))
209 do_exit(SIGKILL);
210
211 /*
212 * With a real vsyscall, page faults cause SIGSEGV. We want to
213 * preserve that behavior to make writing exploits harder.
214 */
215 prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
216 current_thread_info()->sig_on_uaccess_error = 1;
217
218 /*
219 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
220 * 64-bit, so we don't need to special-case it here. For all the
221 * vsyscalls, NULL means "don't write anything" not "write it at
222 * address 0".
223 */
224 ret = -EFAULT;
225 switch (vsyscall_nr) {
226 case 0:
227 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
228 !write_ok_or_segv(regs->si, sizeof(struct timezone)))
229 break;
230
231 ret = sys_gettimeofday(
232 (struct timeval __user *)regs->di,
233 (struct timezone __user *)regs->si);
234 break;
235
236 case 1:
237 if (!write_ok_or_segv(regs->di, sizeof(time_t)))
238 break;
239
240 ret = sys_time((time_t __user *)regs->di);
241 break;
242
243 case 2:
244 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
245 !write_ok_or_segv(regs->si, sizeof(unsigned)))
246 break;
247
248 ret = sys_getcpu((unsigned __user *)regs->di,
249 (unsigned __user *)regs->si,
250 NULL);
251 break;
252 }
253
254 current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
255
256 if (ret == -EFAULT) {
257 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
258 warn_bad_vsyscall(KERN_INFO, regs,
259 "vsyscall fault (exploit attempt?)");
260
261 /*
262 * If we failed to generate a signal for any reason,
263 * generate one here. (This should be impossible.)
264 */
265 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
266 !sigismember(&tsk->pending.signal, SIGSEGV)))
267 goto sigsegv;
268
269 return true; /* Don't emulate the ret. */
270 }
271
272 regs->ax = ret;
273
274 /* Emulate a ret instruction. */
275 regs->ip = caller;
276 regs->sp += 8;
277
278 return true;
279
280 sigsegv:
281 force_sig(SIGSEGV, current);
282 return true;
283 }
284
285 /*
286 * Assume __initcall executes before all user space. Hopefully kmod
287 * doesn't violate that. We'll find out if it does.
288 */
vsyscall_set_cpu(int cpu)289 static void __cpuinit vsyscall_set_cpu(int cpu)
290 {
291 unsigned long d;
292 unsigned long node = 0;
293 #ifdef CONFIG_NUMA
294 node = cpu_to_node(cpu);
295 #endif
296 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
297 write_rdtscp_aux((node << 12) | cpu);
298
299 /*
300 * Store cpu number in limit so that it can be loaded quickly
301 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
302 */
303 d = 0x0f40000000000ULL;
304 d |= cpu;
305 d |= (node & 0xf) << 12;
306 d |= (node >> 4) << 48;
307
308 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
309 }
310
cpu_vsyscall_init(void * arg)311 static void __cpuinit cpu_vsyscall_init(void *arg)
312 {
313 /* preemption should be already off */
314 vsyscall_set_cpu(raw_smp_processor_id());
315 }
316
317 static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block * n,unsigned long action,void * arg)318 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
319 {
320 long cpu = (long)arg;
321
322 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
323 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
324
325 return NOTIFY_DONE;
326 }
327
map_vsyscall(void)328 void __init map_vsyscall(void)
329 {
330 extern char __vsyscall_page;
331 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
332 extern char __vvar_page;
333 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
334
335 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
336 vsyscall_mode == NATIVE
337 ? PAGE_KERNEL_VSYSCALL
338 : PAGE_KERNEL_VVAR);
339 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
340 (unsigned long)VSYSCALL_START);
341
342 __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
343 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
344 (unsigned long)VVAR_ADDRESS);
345 }
346
vsyscall_init(void)347 static int __init vsyscall_init(void)
348 {
349 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
350
351 on_each_cpu(cpu_vsyscall_init, NULL, 1);
352 /* notifier priority > KVM */
353 hotcpu_notifier(cpu_vsyscall_notifier, 30);
354
355 return 0;
356 }
357 __initcall(vsyscall_init);
358