1 /*
2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
3 *
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000-2002 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * These routines maintain argument size conversion between 32bit and 64bit
13 * environment.
14 */
15
16 #include <linux/config.h>
17 #include <linux/kernel.h>
18 #include <linux/sysctl.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/signal.h>
23 #include <linux/utime.h>
24 #include <linux/resource.h>
25 #include <linux/times.h>
26 #include <linux/utsname.h>
27 #include <linux/timex.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/sem.h>
31 #include <linux/msg.h>
32 #include <linux/mm.h>
33 #include <linux/shm.h>
34 #include <linux/slab.h>
35 #include <linux/uio.h>
36 #include <linux/nfs_fs.h>
37 #include <linux/smb_fs.h>
38 #include <linux/smb_mount.h>
39 #include <linux/ncp_fs.h>
40 #include <linux/quota.h>
41 #include <linux/quotacompat.h>
42 #include <linux/module.h>
43 #include <linux/sunrpc/svc.h>
44 #include <linux/nfsd/nfsd.h>
45 #include <linux/nfsd/cache.h>
46 #include <linux/nfsd/xdr.h>
47 #include <linux/nfsd/syscall.h>
48 #include <linux/poll.h>
49 #include <linux/personality.h>
50 #include <linux/stat.h>
51 #include <linux/ipc.h>
52
53 #include <asm/types.h>
54 #include <asm/uaccess.h>
55 #include <asm/semaphore.h>
56
57 #include <net/scm.h>
58 #include <net/sock.h>
59 #include <asm/ia32.h>
60
61 #define DEBUG 0
62
63 #if DEBUG
64 # define DBG(fmt...) printk(KERN_DEBUG fmt)
65 #else
66 # define DBG(fmt...)
67 #endif
68
69 #define A(__x) ((unsigned long)(__x))
70 #define AA(__x) ((unsigned long)(__x))
71 #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
72 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
73
74 #define OFFSET4K(a) ((a) & 0xfff)
75 #define PAGE_START(addr) ((addr) & PAGE_MASK)
76 #define PAGE_OFF(addr) ((addr) & ~PAGE_MASK)
77 #define MINSIGSTKSZ_IA32 2048
78
79 extern asmlinkage long sys_execve (char *, char **, char **, struct pt_regs *);
80 extern asmlinkage long sys_mprotect (unsigned long, size_t, unsigned long);
81 extern asmlinkage long sys_munmap (unsigned long, size_t);
82 extern unsigned long arch_get_unmapped_area (struct file *, unsigned long, unsigned long,
83 unsigned long, unsigned long);
84
85 /* forward declaration: */
86 asmlinkage long sys32_mprotect (unsigned int, unsigned int, int);
87 asmlinkage unsigned long sys_brk(unsigned long);
88
89 /*
90 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
91 * while doing so.
92 */
93 /* XXX make per-mm: */
94 static DECLARE_MUTEX(ia32_mmap_sem);
95
96 static int
nargs(unsigned int arg,char ** ap,int max)97 nargs (unsigned int arg, char **ap, int max)
98 {
99 unsigned int addr;
100 int n, err;
101
102 if (!arg)
103 return 0;
104
105 n = 0;
106 do {
107 err = get_user(addr, (unsigned int *)A(arg));
108 if (err)
109 return err;
110 if (n > max)
111 return -E2BIG;
112 if (ap)
113 *ap++ = (char *) A(addr);
114 arg += sizeof(unsigned int);
115 n++;
116 if (n >= (MAX_ARG_PAGES * PAGE_SIZE) / sizeof(char *))
117 return -E2BIG;
118 } while (addr);
119 return n - 1;
120 }
121
122 asmlinkage long
sys32_execve(char * filename,unsigned int argv,unsigned int envp,int dummy3,int dummy4,int dummy5,int dummy6,int dummy7,int stack)123 sys32_execve (char *filename, unsigned int argv, unsigned int envp,
124 int dummy3, int dummy4, int dummy5, int dummy6, int dummy7,
125 int stack)
126 {
127 struct pt_regs *regs = (struct pt_regs *)&stack;
128 unsigned long old_map_base, old_task_size, tssd;
129 char **av, **ae;
130 int na, ne, len;
131 long r;
132
133 /* Allocates upto 2x MAX_ARG_PAGES */
134 na = nargs(argv, NULL, (MAX_ARG_PAGES*PAGE_SIZE) / sizeof(char *) - 1);
135 if (na < 0)
136 return na;
137 ne = nargs(envp, NULL, (MAX_ARG_PAGES*PAGE_SIZE) / sizeof(char *) - 1 );
138 if (ne < 0)
139 return ne;
140 len = (na + ne + 2) * sizeof(*av);
141 av = kmalloc(len, GFP_KERNEL);
142 if (!av)
143 return -ENOMEM;
144
145 ae = av + na + 1;
146 av[na] = NULL;
147 ae[ne] = NULL;
148
149 r = nargs(argv, av, na);
150 if (r < 0)
151 goto out;
152 r = nargs(envp, ae, ne);
153 if (r < 0)
154 goto out;
155
156 old_map_base = current->thread.map_base;
157 old_task_size = current->thread.task_size;
158 tssd = ia64_get_kr(IA64_KR_TSSD);
159
160 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
161 current->thread.map_base = DEFAULT_MAP_BASE;
162 current->thread.task_size = DEFAULT_TASK_SIZE;
163 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
164 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
165
166 set_fs(KERNEL_DS);
167 r = sys_execve(filename, av, ae, regs);
168 if (r < 0) {
169 /* oops, execve failed, switch back to old values... */
170 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
171 ia64_set_kr(IA64_KR_TSSD, tssd);
172 current->thread.map_base = old_map_base;
173 current->thread.task_size = old_task_size;
174 set_fs(USER_DS); /* establish new task-size as the address-limit */
175 }
176 out:
177 kfree(av);
178 return r;
179 }
180
181 static inline int
putstat(struct stat32 * ubuf,struct stat * kbuf)182 putstat (struct stat32 *ubuf, struct stat *kbuf)
183 {
184 int err;
185
186 if (clear_user(ubuf, sizeof(*ubuf)))
187 return 1;
188
189 err = __put_user(kbuf->st_dev, &ubuf->st_dev);
190 err |= __put_user(kbuf->st_ino, &ubuf->st_ino);
191 err |= __put_user(kbuf->st_mode, &ubuf->st_mode);
192 err |= __put_user(kbuf->st_nlink, &ubuf->st_nlink);
193 err |= __put_user(kbuf->st_uid, &ubuf->st_uid);
194 err |= __put_user(kbuf->st_gid, &ubuf->st_gid);
195 err |= __put_user(kbuf->st_rdev, &ubuf->st_rdev);
196 err |= __put_user(kbuf->st_size, &ubuf->st_size);
197 err |= __put_user(kbuf->st_atime, &ubuf->st_atime);
198 err |= __put_user(kbuf->st_mtime, &ubuf->st_mtime);
199 err |= __put_user(kbuf->st_ctime, &ubuf->st_ctime);
200 err |= __put_user(kbuf->st_blksize, &ubuf->st_blksize);
201 err |= __put_user(kbuf->st_blocks, &ubuf->st_blocks);
202 return err;
203 }
204
205 extern asmlinkage long sys_newstat (char * filename, struct stat * statbuf);
206
207 asmlinkage long
sys32_newstat(char * filename,struct stat32 * statbuf)208 sys32_newstat (char *filename, struct stat32 *statbuf)
209 {
210 char *name;
211 int ret;
212 struct stat s;
213 mm_segment_t old_fs = get_fs();
214
215 name = getname(filename);
216 if (IS_ERR(name))
217 return PTR_ERR(name);
218 set_fs(KERNEL_DS);
219 ret = sys_newstat(name, &s);
220 set_fs(old_fs);
221 putname(name);
222 if (putstat(statbuf, &s))
223 return -EFAULT;
224 return ret;
225 }
226
227 extern asmlinkage long sys_newlstat(char * filename, struct stat * statbuf);
228
229 asmlinkage long
sys32_newlstat(char * filename,struct stat32 * statbuf)230 sys32_newlstat (char *filename, struct stat32 *statbuf)
231 {
232 char *name;
233 mm_segment_t old_fs = get_fs();
234 struct stat s;
235 int ret;
236
237 name = getname(filename);
238 if (IS_ERR(name))
239 return PTR_ERR(name);
240 set_fs(KERNEL_DS);
241 ret = sys_newlstat(name, &s);
242 set_fs(old_fs);
243 putname(name);
244 if (putstat(statbuf, &s))
245 return -EFAULT;
246 return ret;
247 }
248
249 extern asmlinkage long sys_newfstat(unsigned int fd, struct stat * statbuf);
250
251 asmlinkage long
sys32_newfstat(unsigned int fd,struct stat32 * statbuf)252 sys32_newfstat (unsigned int fd, struct stat32 *statbuf)
253 {
254 mm_segment_t old_fs = get_fs();
255 struct stat s;
256 int ret;
257
258 set_fs(KERNEL_DS);
259 ret = sys_newfstat(fd, &s);
260 set_fs(old_fs);
261 if (putstat(statbuf, &s))
262 return -EFAULT;
263 return ret;
264 }
265
266 #if PAGE_SHIFT > IA32_PAGE_SHIFT
267
268
269 static int
get_page_prot(struct vm_area_struct * vma,unsigned long addr)270 get_page_prot (struct vm_area_struct *vma, unsigned long addr)
271 {
272 int prot = 0;
273
274 if (!vma || vma->vm_start > addr)
275 return 0;
276
277 if (vma->vm_flags & VM_READ)
278 prot |= PROT_READ;
279 if (vma->vm_flags & VM_WRITE)
280 prot |= PROT_WRITE;
281 if (vma->vm_flags & VM_EXEC)
282 prot |= PROT_EXEC;
283 return prot;
284 }
285
286 /*
287 * Map a subpage by creating an anonymous page that contains the union of the old page and
288 * the subpage.
289 */
290 static unsigned long
mmap_subpage(struct file * file,unsigned long start,unsigned long end,int prot,int flags,loff_t off)291 mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
292 loff_t off)
293 {
294 void *page = NULL;
295 struct inode *inode;
296 unsigned long ret = 0;
297 struct vm_area_struct *vma = find_vma(current->mm, start);
298 int old_prot = get_page_prot(vma, start);
299
300 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
301 file, start, end, prot, flags, off);
302
303
304 /* Optimize the case where the old mmap and the new mmap are both anonymous */
305 if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
306 if (clear_user((void *) start, end - start)) {
307 ret = -EFAULT;
308 goto out;
309 }
310 goto skip_mmap;
311 }
312
313 page = (void *) get_zeroed_page(GFP_KERNEL);
314 if (!page)
315 return -ENOMEM;
316
317 if (old_prot)
318 copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE);
319
320 down_write(¤t->mm->mmap_sem);
321 {
322 ret = do_mmap(0, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
323 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
324 }
325 up_write(¤t->mm->mmap_sem);
326
327 if (IS_ERR((void *) ret))
328 goto out;
329
330 if (old_prot) {
331 /* copy back the old page contents. */
332 if (PAGE_OFF(start))
333 copy_to_user((void *) PAGE_START(start), page, PAGE_OFF(start));
334 if (PAGE_OFF(end))
335 copy_to_user((void *) end, page + PAGE_OFF(end),
336 PAGE_SIZE - PAGE_OFF(end));
337 }
338
339 if (!(flags & MAP_ANONYMOUS)) {
340 /* read the file contents */
341 inode = file->f_dentry->d_inode;
342 if (!inode->i_fop || !file->f_op->read
343 || ((*file->f_op->read)(file, (char *) start, end - start, &off) < 0))
344 {
345 ret = -EINVAL;
346 goto out;
347 }
348 }
349
350 skip_mmap:
351 if (!(prot & PROT_WRITE))
352 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
353 out:
354 if (page)
355 free_page((unsigned long) page);
356 return ret;
357 }
358
359 static unsigned long
emulate_mmap(struct file * file,unsigned long start,unsigned long len,int prot,int flags,loff_t off)360 emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
361 loff_t off)
362 {
363 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
364 struct inode *inode;
365 loff_t poff;
366
367 end = start + len;
368 pstart = PAGE_START(start);
369 pend = PAGE_ALIGN(end);
370
371 if (flags & MAP_FIXED) {
372 if (start > pstart) {
373 if (flags & MAP_SHARED)
374 printk(KERN_INFO
375 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
376 current->comm, current->pid, start);
377 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
378 off);
379 if (IS_ERR((void *) ret))
380 return ret;
381 pstart += PAGE_SIZE;
382 if (pstart >= pend)
383 return start; /* done */
384 }
385 if (end < pend) {
386 if (flags & MAP_SHARED)
387 printk(KERN_INFO
388 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
389 current->comm, current->pid, end);
390 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
391 (off + len) - PAGE_OFF(end));
392 if (IS_ERR((void *) ret))
393 return ret;
394 pend -= PAGE_SIZE;
395 if (pstart >= pend)
396 return start; /* done */
397 }
398 } else {
399 /*
400 * If a start address was specified, use it if the entire rounded out area
401 * is available.
402 */
403 if (start && !pstart)
404 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
405 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
406 if (tmp != pstart) {
407 pstart = tmp;
408 start = pstart + PAGE_OFF(off); /* make start congruent with off */
409 end = start + len;
410 pend = PAGE_ALIGN(end);
411 }
412 }
413
414 poff = off + (pstart - start); /* note: (pstart - start) may be negative */
415 is_congruent = (flags & MAP_ANONYMOUS) || (PAGE_OFF(poff) == 0);
416
417 if ((flags & MAP_SHARED) && !is_congruent)
418 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
419 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
420
421 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
422 is_congruent ? "congruent" : "not congruent", poff);
423
424 down_write(¤t->mm->mmap_sem);
425 {
426 if (!(flags & MAP_ANONYMOUS) && is_congruent)
427 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
428 else
429 ret = do_mmap(0, pstart, pend - pstart,
430 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
431 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
432 }
433 up_write(¤t->mm->mmap_sem);
434
435 if (IS_ERR((void *) ret))
436 return ret;
437
438 if (!is_congruent) {
439 /* read the file contents */
440 inode = file->f_dentry->d_inode;
441 if (!inode->i_fop || !file->f_op->read
442 || ((*file->f_op->read)(file, (char *) pstart, pend - pstart, &poff) < 0))
443 {
444 sys_munmap(pstart, pend - pstart);
445 return -EINVAL;
446 }
447 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
448 return -EINVAL;
449 }
450 return start;
451 }
452
453 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
454
455 static inline unsigned int
get_prot32(unsigned int prot)456 get_prot32 (unsigned int prot)
457 {
458 if (prot & PROT_WRITE)
459 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
460 prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
461 else if (prot & (PROT_READ | PROT_EXEC))
462 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
463 prot |= (PROT_READ | PROT_EXEC);
464
465 return prot;
466 }
467
468 unsigned long
ia32_do_mmap(struct file * file,unsigned long addr,unsigned long len,int prot,int flags,loff_t offset)469 ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
470 loff_t offset)
471 {
472 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
473 file, addr, len, prot, flags, offset);
474
475 if (file && (!file->f_op || !file->f_op->mmap))
476 return -ENODEV;
477
478 len = IA32_PAGE_ALIGN(len);
479 if (len == 0)
480 return addr;
481
482 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
483 {
484 if (flags & MAP_FIXED)
485 return -ENOMEM;
486 else
487 return -EINVAL;
488 }
489
490 if (OFFSET4K(offset))
491 return -EINVAL;
492
493 prot = get_prot32(prot);
494
495 #if PAGE_SHIFT > IA32_PAGE_SHIFT
496 down(&ia32_mmap_sem);
497 {
498 addr = emulate_mmap(file, addr, len, prot, flags, offset);
499 }
500 up(&ia32_mmap_sem);
501 #else
502 down_write(¤t->mm->mmap_sem);
503 {
504 addr = do_mmap(file, addr, len, prot, flags, offset);
505 }
506 up_write(¤t->mm->mmap_sem);
507 #endif
508 DBG("ia32_do_mmap: returning 0x%lx\n", addr);
509 return addr;
510 }
511
512 /*
513 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
514 * system calls used a memory block for parameter passing..
515 */
516
517 struct mmap_arg_struct {
518 unsigned int addr;
519 unsigned int len;
520 unsigned int prot;
521 unsigned int flags;
522 unsigned int fd;
523 unsigned int offset;
524 };
525
526 asmlinkage long
sys32_mmap(struct mmap_arg_struct * arg)527 sys32_mmap (struct mmap_arg_struct *arg)
528 {
529 struct mmap_arg_struct a;
530 struct file *file = NULL;
531 unsigned long addr;
532 int flags;
533
534 if (copy_from_user(&a, arg, sizeof(a)))
535 return -EFAULT;
536
537 if (OFFSET4K(a.offset))
538 return -EINVAL;
539
540 flags = a.flags;
541
542 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
543 if (!(flags & MAP_ANONYMOUS)) {
544 file = fget(a.fd);
545 if (!file)
546 return -EBADF;
547 }
548
549 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
550
551 if (file)
552 fput(file);
553 return addr;
554 }
555
556 asmlinkage long
sys32_mmap2(unsigned int addr,unsigned int len,unsigned int prot,unsigned int flags,unsigned int fd,unsigned int pgoff)557 sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
558 unsigned int fd, unsigned int pgoff)
559 {
560 struct file *file = NULL;
561 unsigned long retval;
562
563 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
564 if (!(flags & MAP_ANONYMOUS)) {
565 file = fget(fd);
566 if (!file)
567 return -EBADF;
568 }
569
570 retval = ia32_do_mmap(file, addr, len, prot, flags,
571 (unsigned long) pgoff << IA32_PAGE_SHIFT);
572
573 if (file)
574 fput(file);
575 return retval;
576 }
577
578 asmlinkage long
sys32_munmap(unsigned int start,unsigned int len)579 sys32_munmap (unsigned int start, unsigned int len)
580 {
581 unsigned int end = start + len;
582 long ret;
583
584 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
585 ret = sys_munmap(start, end - start);
586 #else
587 if (start >= end)
588 return -EINVAL;
589
590 start = PAGE_ALIGN(start);
591 end = PAGE_START(end);
592
593 if (start >= end)
594 return 0;
595
596 down(&ia32_mmap_sem);
597 {
598 ret = sys_munmap(start, end - start);
599 }
600 up(&ia32_mmap_sem);
601 #endif
602 return ret;
603 }
604
605 #if PAGE_SHIFT > IA32_PAGE_SHIFT
606
607 /*
608 * When mprotect()ing a partial page, we set the permission to the union of the old
609 * settings and the new settings. In other words, it's only possible to make access to a
610 * partial page less restrictive.
611 */
612 static long
mprotect_subpage(unsigned long address,int new_prot)613 mprotect_subpage (unsigned long address, int new_prot)
614 {
615 int old_prot;
616 struct vm_area_struct *vma;
617
618 if (new_prot == PROT_NONE)
619 return 0; /* optimize case where nothing changes... */
620 vma = find_vma(current->mm, address);
621 old_prot = get_page_prot(vma, address);
622 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
623 }
624
625 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
626
627 asmlinkage long
sys32_mprotect(unsigned int start,unsigned int len,int prot)628 sys32_mprotect (unsigned int start, unsigned int len, int prot)
629 {
630 unsigned long end = start + len;
631 #if PAGE_SHIFT > IA32_PAGE_SHIFT
632 long retval = 0;
633 #endif
634
635 prot = get_prot32(prot);
636
637 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
638 return sys_mprotect(start, end - start, prot);
639 #else
640 if (OFFSET4K(start))
641 return -EINVAL;
642
643 end = IA32_PAGE_ALIGN(end);
644 if (end < start)
645 return -EINVAL;
646
647 down(&ia32_mmap_sem);
648 {
649 if (PAGE_OFF(start)) {
650 /* start address is 4KB aligned but not page aligned. */
651 retval = mprotect_subpage(PAGE_START(start), prot);
652 if (retval < 0)
653 goto out;
654
655 start = PAGE_ALIGN(start);
656 if (start >= end)
657 goto out; /* retval is already zero... */
658 }
659
660 if (PAGE_OFF(end)) {
661 /* end address is 4KB aligned but not page aligned. */
662 retval = mprotect_subpage(PAGE_START(end), prot);
663 if (retval < 0)
664 goto out;
665
666 end = PAGE_START(end);
667 }
668 retval = sys_mprotect(start, end - start, prot);
669 }
670 out:
671 up(&ia32_mmap_sem);
672 return retval;
673 #endif
674 }
675
676 asmlinkage long
sys32_pipe(int * fd)677 sys32_pipe (int *fd)
678 {
679 int retval;
680 int fds[2];
681
682 retval = do_pipe(fds);
683 if (retval)
684 goto out;
685 if (copy_to_user(fd, fds, sizeof(fds)))
686 retval = -EFAULT;
687 out:
688 return retval;
689 }
690
691 static inline int
put_statfs(struct statfs32 * ubuf,struct statfs * kbuf)692 put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
693 {
694 int err;
695
696 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
697 return -EFAULT;
698
699 err = __put_user(kbuf->f_type, &ubuf->f_type);
700 err |= __put_user(kbuf->f_bsize, &ubuf->f_bsize);
701 err |= __put_user(kbuf->f_blocks, &ubuf->f_blocks);
702 err |= __put_user(kbuf->f_bfree, &ubuf->f_bfree);
703 err |= __put_user(kbuf->f_bavail, &ubuf->f_bavail);
704 err |= __put_user(kbuf->f_files, &ubuf->f_files);
705 err |= __put_user(kbuf->f_ffree, &ubuf->f_ffree);
706 err |= __put_user(kbuf->f_namelen, &ubuf->f_namelen);
707 err |= __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]);
708 err |= __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]);
709 return err;
710 }
711
712 extern asmlinkage long sys_statfs(const char * path, struct statfs * buf);
713
714 asmlinkage long
sys32_statfs(const char * path,struct statfs32 * buf)715 sys32_statfs (const char *path, struct statfs32 *buf)
716 {
717 const char *name;
718 int ret;
719 struct statfs s;
720 mm_segment_t old_fs = get_fs();
721
722 name = getname(path);
723 if (IS_ERR(name))
724 return PTR_ERR(name);
725 set_fs(KERNEL_DS);
726 ret = sys_statfs(name, &s);
727 set_fs(old_fs);
728 putname(name);
729 if (put_statfs(buf, &s))
730 return -EFAULT;
731 return ret;
732 }
733
734 extern asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf);
735
736 asmlinkage long
sys32_fstatfs(unsigned int fd,struct statfs32 * buf)737 sys32_fstatfs (unsigned int fd, struct statfs32 *buf)
738 {
739 int ret;
740 struct statfs s;
741 mm_segment_t old_fs = get_fs();
742
743 set_fs(KERNEL_DS);
744 ret = sys_fstatfs(fd, &s);
745 set_fs(old_fs);
746 if (put_statfs(buf, &s))
747 return -EFAULT;
748 return ret;
749 }
750
751 struct timeval32
752 {
753 int tv_sec, tv_usec;
754 };
755
756 struct itimerval32
757 {
758 struct timeval32 it_interval;
759 struct timeval32 it_value;
760 };
761
762 static inline long
get_tv32(struct timeval * o,struct timeval32 * i)763 get_tv32 (struct timeval *o, struct timeval32 *i)
764 {
765 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
766 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
767 }
768
769 static inline long
put_tv32(struct timeval32 * o,struct timeval * i)770 put_tv32 (struct timeval32 *o, struct timeval *i)
771 {
772 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
773 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
774 }
775
776 static inline long
get_it32(struct itimerval * o,struct itimerval32 * i)777 get_it32 (struct itimerval *o, struct itimerval32 *i)
778 {
779 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
780 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
781 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
782 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
783 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
784 }
785
786 static inline long
put_it32(struct itimerval32 * o,struct itimerval * i)787 put_it32 (struct itimerval32 *o, struct itimerval *i)
788 {
789 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
790 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
791 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
792 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
793 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
794 }
795
796 extern int do_getitimer (int which, struct itimerval *value);
797
798 asmlinkage long
sys32_getitimer(int which,struct itimerval32 * it)799 sys32_getitimer (int which, struct itimerval32 *it)
800 {
801 struct itimerval kit;
802 int error;
803
804 error = do_getitimer(which, &kit);
805 if (!error && put_it32(it, &kit))
806 error = -EFAULT;
807
808 return error;
809 }
810
811 extern int do_setitimer (int which, struct itimerval *, struct itimerval *);
812
813 asmlinkage long
sys32_setitimer(int which,struct itimerval32 * in,struct itimerval32 * out)814 sys32_setitimer (int which, struct itimerval32 *in, struct itimerval32 *out)
815 {
816 struct itimerval kin, kout;
817 int error;
818
819 if (in) {
820 if (get_it32(&kin, in))
821 return -EFAULT;
822 } else
823 memset(&kin, 0, sizeof(kin));
824
825 error = do_setitimer(which, &kin, out ? &kout : NULL);
826 if (error || !out)
827 return error;
828 if (put_it32(out, &kout))
829 return -EFAULT;
830
831 return 0;
832
833 }
834
835 asmlinkage unsigned long
sys32_alarm(unsigned int seconds)836 sys32_alarm (unsigned int seconds)
837 {
838 struct itimerval it_new, it_old;
839 unsigned int oldalarm;
840
841 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
842 it_new.it_value.tv_sec = seconds;
843 it_new.it_value.tv_usec = 0;
844 do_setitimer(ITIMER_REAL, &it_new, &it_old);
845 oldalarm = it_old.it_value.tv_sec;
846 /* ehhh.. We can't return 0 if we have an alarm pending.. */
847 /* And we'd better return too much than too little anyway */
848 if (it_old.it_value.tv_usec)
849 oldalarm++;
850 return oldalarm;
851 }
852
853 /* Translations due to time_t size differences. Which affects all
854 sorts of things, like timeval and itimerval. */
855
856 struct utimbuf_32 {
857 int atime;
858 int mtime;
859 };
860
861 extern asmlinkage long sys_utimes(char * filename, struct timeval * utimes);
862 extern asmlinkage long sys_gettimeofday (struct timeval *tv, struct timezone *tz);
863
864 asmlinkage long
sys32_utime(char * filename,struct utimbuf_32 * times32)865 sys32_utime (char *filename, struct utimbuf_32 *times32)
866 {
867 mm_segment_t old_fs = get_fs();
868 struct timeval tv[2], *tvp;
869 long ret;
870
871 if (times32) {
872 if (get_user(tv[0].tv_sec, ×32->atime))
873 return -EFAULT;
874 tv[0].tv_usec = 0;
875 if (get_user(tv[1].tv_sec, ×32->mtime))
876 return -EFAULT;
877 tv[1].tv_usec = 0;
878 set_fs(KERNEL_DS);
879 tvp = tv;
880 } else
881 tvp = NULL;
882 ret = sys_utimes(filename, tvp);
883 set_fs(old_fs);
884 return ret;
885 }
886
887 extern struct timezone sys_tz;
888 extern int do_sys_settimeofday (struct timeval *tv, struct timezone *tz);
889
890 asmlinkage long
sys32_gettimeofday(struct timeval32 * tv,struct timezone * tz)891 sys32_gettimeofday (struct timeval32 *tv, struct timezone *tz)
892 {
893 if (tv) {
894 struct timeval ktv;
895 do_gettimeofday(&ktv);
896 if (put_tv32(tv, &ktv))
897 return -EFAULT;
898 }
899 if (tz) {
900 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
901 return -EFAULT;
902 }
903 return 0;
904 }
905
906 asmlinkage long
sys32_settimeofday(struct timeval32 * tv,struct timezone * tz)907 sys32_settimeofday (struct timeval32 *tv, struct timezone *tz)
908 {
909 struct timeval ktv;
910 struct timezone ktz;
911
912 if (tv) {
913 if (get_tv32(&ktv, tv))
914 return -EFAULT;
915 }
916 if (tz) {
917 if (copy_from_user(&ktz, tz, sizeof(ktz)))
918 return -EFAULT;
919 }
920
921 return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL);
922 }
923
924 struct getdents32_callback {
925 struct linux32_dirent * current_dir;
926 struct linux32_dirent * previous;
927 int count;
928 int error;
929 };
930
931 struct readdir32_callback {
932 struct old_linux32_dirent * dirent;
933 int count;
934 };
935
936 static int
filldir32(void * __buf,const char * name,int namlen,loff_t offset,ino_t ino,unsigned int d_type)937 filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
938 unsigned int d_type)
939 {
940 struct linux32_dirent * dirent;
941 struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
942 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
943
944 buf->error = -EINVAL; /* only used if we fail.. */
945 if (reclen > buf->count)
946 return -EINVAL;
947 buf->error = -EFAULT; /* only used if we fail.. */
948 dirent = buf->previous;
949 if (dirent)
950 if (put_user(offset, &dirent->d_off))
951 return -EFAULT;
952 dirent = buf->current_dir;
953 buf->previous = dirent;
954 if (put_user(ino, &dirent->d_ino)
955 || put_user(reclen, &dirent->d_reclen)
956 || copy_to_user(dirent->d_name, name, namlen)
957 || put_user(0, dirent->d_name + namlen))
958 return -EFAULT;
959 ((char *) dirent) += reclen;
960 buf->current_dir = dirent;
961 buf->count -= reclen;
962 return 0;
963 }
964
965 asmlinkage long
sys32_getdents(unsigned int fd,struct linux32_dirent * dirent,unsigned int count)966 sys32_getdents (unsigned int fd, struct linux32_dirent *dirent, unsigned int count)
967 {
968 struct file * file;
969 struct linux32_dirent * lastdirent;
970 struct getdents32_callback buf;
971 int error;
972
973 error = -EBADF;
974 file = fget(fd);
975 if (!file)
976 goto out;
977
978 buf.current_dir = dirent;
979 buf.previous = NULL;
980 buf.count = count;
981 buf.error = 0;
982
983 error = vfs_readdir(file, filldir32, &buf);
984 if (error < 0)
985 goto out_putf;
986 error = buf.error;
987 lastdirent = buf.previous;
988 if (lastdirent) {
989 error = -EINVAL;
990 if (put_user(file->f_pos, &lastdirent->d_off))
991 goto out_putf;
992 error = count - buf.count;
993 }
994
995 out_putf:
996 fput(file);
997 out:
998 return error;
999 }
1000
1001 static int
fillonedir32(void * __buf,const char * name,int namlen,loff_t offset,ino_t ino,unsigned int d_type)1002 fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
1003 unsigned int d_type)
1004 {
1005 struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
1006 struct old_linux32_dirent * dirent;
1007
1008 if (buf->count)
1009 return -EINVAL;
1010 buf->count++;
1011 dirent = buf->dirent;
1012 if (put_user(ino, &dirent->d_ino)
1013 || put_user(offset, &dirent->d_offset)
1014 || put_user(namlen, &dirent->d_namlen)
1015 || copy_to_user(dirent->d_name, name, namlen)
1016 || put_user(0, dirent->d_name + namlen))
1017 return -EFAULT;
1018 return 0;
1019 }
1020
1021 asmlinkage long
sys32_readdir(unsigned int fd,void * dirent,unsigned int count)1022 sys32_readdir (unsigned int fd, void *dirent, unsigned int count)
1023 {
1024 int error;
1025 struct file * file;
1026 struct readdir32_callback buf;
1027
1028 error = -EBADF;
1029 file = fget(fd);
1030 if (!file)
1031 goto out;
1032
1033 buf.count = 0;
1034 buf.dirent = dirent;
1035
1036 error = vfs_readdir(file, fillonedir32, &buf);
1037 if (error >= 0)
1038 error = buf.count;
1039 fput(file);
1040 out:
1041 return error;
1042 }
1043
1044 /*
1045 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1046 * like to be certain this leads to no problems. So I return
1047 * EINTR just for safety.
1048 *
1049 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1050 * I'm trying ERESTARTNOHAND which restart only when you want to.
1051 */
1052 #define MAX_SELECT_SECONDS \
1053 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
1054 #define ROUND_UP_TIME(x,y) (((x)+(y)-1)/(y))
1055
1056 asmlinkage long
sys32_select(int n,fd_set * inp,fd_set * outp,fd_set * exp,struct timeval32 * tvp32)1057 sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval32 *tvp32)
1058 {
1059 fd_set_bits fds;
1060 char *bits;
1061 long timeout;
1062 int ret, size;
1063
1064 timeout = MAX_SCHEDULE_TIMEOUT;
1065 if (tvp32) {
1066 time_t sec, usec;
1067
1068 ret = -EFAULT;
1069 if (get_user(sec, &tvp32->tv_sec) || get_user(usec, &tvp32->tv_usec))
1070 goto out_nofds;
1071
1072 ret = -EINVAL;
1073 if (sec < 0 || usec < 0)
1074 goto out_nofds;
1075
1076 if ((unsigned long) sec < MAX_SELECT_SECONDS) {
1077 timeout = ROUND_UP_TIME(usec, 1000000/HZ);
1078 timeout += sec * (unsigned long) HZ;
1079 }
1080 }
1081
1082 ret = -EINVAL;
1083 if (n < 0)
1084 goto out_nofds;
1085
1086 if (n > current->files->max_fdset)
1087 n = current->files->max_fdset;
1088
1089 /*
1090 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1091 * since we used fdset we need to allocate memory in units of
1092 * long-words.
1093 */
1094 ret = -ENOMEM;
1095 size = FDS_BYTES(n);
1096 bits = kmalloc(6 * size, GFP_KERNEL);
1097 if (!bits)
1098 goto out_nofds;
1099 fds.in = (unsigned long *) bits;
1100 fds.out = (unsigned long *) (bits + size);
1101 fds.ex = (unsigned long *) (bits + 2*size);
1102 fds.res_in = (unsigned long *) (bits + 3*size);
1103 fds.res_out = (unsigned long *) (bits + 4*size);
1104 fds.res_ex = (unsigned long *) (bits + 5*size);
1105
1106 if ((ret = get_fd_set(n, inp, fds.in)) ||
1107 (ret = get_fd_set(n, outp, fds.out)) ||
1108 (ret = get_fd_set(n, exp, fds.ex)))
1109 goto out;
1110 zero_fd_set(n, fds.res_in);
1111 zero_fd_set(n, fds.res_out);
1112 zero_fd_set(n, fds.res_ex);
1113
1114 ret = do_select(n, &fds, &timeout);
1115
1116 if (tvp32 && !(current->personality & STICKY_TIMEOUTS)) {
1117 time_t sec = 0, usec = 0;
1118 if (timeout) {
1119 sec = timeout / HZ;
1120 usec = timeout % HZ;
1121 usec *= (1000000/HZ);
1122 }
1123 if (put_user(sec, &tvp32->tv_sec) || put_user(usec, &tvp32->tv_usec)) {
1124 ret = -EFAULT;
1125 goto out;
1126 }
1127 }
1128
1129 if (ret < 0)
1130 goto out;
1131 if (!ret) {
1132 ret = -ERESTARTNOHAND;
1133 if (signal_pending(current))
1134 goto out;
1135 ret = 0;
1136 }
1137
1138 set_fd_set(n, inp, fds.res_in);
1139 set_fd_set(n, outp, fds.res_out);
1140 set_fd_set(n, exp, fds.res_ex);
1141
1142 out:
1143 kfree(bits);
1144 out_nofds:
1145 return ret;
1146 }
1147
1148 struct sel_arg_struct {
1149 unsigned int n;
1150 unsigned int inp;
1151 unsigned int outp;
1152 unsigned int exp;
1153 unsigned int tvp;
1154 };
1155
1156 asmlinkage long
sys32_old_select(struct sel_arg_struct * arg)1157 sys32_old_select (struct sel_arg_struct *arg)
1158 {
1159 struct sel_arg_struct a;
1160
1161 if (copy_from_user(&a, arg, sizeof(a)))
1162 return -EFAULT;
1163 return sys32_select(a.n, (fd_set *) A(a.inp), (fd_set *) A(a.outp), (fd_set *) A(a.exp),
1164 (struct timeval32 *) A(a.tvp));
1165 }
1166
1167 extern asmlinkage long sys_nanosleep (struct timespec *rqtp, struct timespec *rmtp);
1168
1169 asmlinkage long
sys32_nanosleep(struct timespec32 * rqtp,struct timespec32 * rmtp)1170 sys32_nanosleep (struct timespec32 *rqtp, struct timespec32 *rmtp)
1171 {
1172 struct timespec t;
1173 int ret;
1174 mm_segment_t old_fs = get_fs();
1175
1176 if (get_user (t.tv_sec, &rqtp->tv_sec) || get_user (t.tv_nsec, &rqtp->tv_nsec))
1177 return -EFAULT;
1178 set_fs(KERNEL_DS);
1179 ret = sys_nanosleep(&t, rmtp ? &t : NULL);
1180 set_fs(old_fs);
1181 if (rmtp && ret == -EINTR) {
1182 if (put_user(t.tv_sec, &rmtp->tv_sec) || put_user(t.tv_nsec, &rmtp->tv_nsec))
1183 return -EFAULT;
1184 }
1185 return ret;
1186 }
1187
1188 struct iovec32 { unsigned int iov_base; int iov_len; };
1189 asmlinkage ssize_t sys_readv (unsigned long,const struct iovec *,unsigned long);
1190 asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long);
1191
1192 static struct iovec *
get_iovec32(struct iovec32 * iov32,struct iovec * iov_buf,u32 count,int type)1193 get_iovec32 (struct iovec32 *iov32, struct iovec *iov_buf, u32 count, int type)
1194 {
1195 int i;
1196 u32 buf, len;
1197 struct iovec *ivp, *iov;
1198
1199 /* Get the "struct iovec" from user memory */
1200
1201 if (!count)
1202 return 0;
1203 if (verify_area(VERIFY_READ, iov32, sizeof(struct iovec32)*count))
1204 return NULL;
1205 if (count > UIO_MAXIOV)
1206 return NULL;
1207 if (count > UIO_FASTIOV) {
1208 iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL);
1209 if (!iov)
1210 return NULL;
1211 } else
1212 iov = iov_buf;
1213
1214 ivp = iov;
1215 for (i = 0; i < count; i++) {
1216 if (__get_user(len, &iov32->iov_len) || __get_user(buf, &iov32->iov_base)) {
1217 if (iov != iov_buf)
1218 kfree(iov);
1219 return NULL;
1220 }
1221 if (verify_area(type, (void *)A(buf), len)) {
1222 if (iov != iov_buf)
1223 kfree(iov);
1224 return((struct iovec *)0);
1225 }
1226 ivp->iov_base = (void *)A(buf);
1227 ivp->iov_len = (__kernel_size_t) len;
1228 iov32++;
1229 ivp++;
1230 }
1231 return iov;
1232 }
1233
1234 asmlinkage long
sys32_readv(int fd,struct iovec32 * vector,u32 count)1235 sys32_readv (int fd, struct iovec32 *vector, u32 count)
1236 {
1237 struct iovec iovstack[UIO_FASTIOV];
1238 struct iovec *iov;
1239 long ret;
1240 mm_segment_t old_fs = get_fs();
1241
1242 iov = get_iovec32(vector, iovstack, count, VERIFY_WRITE);
1243 if (!iov)
1244 return -EFAULT;
1245 set_fs(KERNEL_DS);
1246 ret = sys_readv(fd, iov, count);
1247 set_fs(old_fs);
1248 if (iov != iovstack)
1249 kfree(iov);
1250 return ret;
1251 }
1252
1253 asmlinkage long
sys32_writev(int fd,struct iovec32 * vector,u32 count)1254 sys32_writev (int fd, struct iovec32 *vector, u32 count)
1255 {
1256 struct iovec iovstack[UIO_FASTIOV];
1257 struct iovec *iov;
1258 long ret;
1259 mm_segment_t old_fs = get_fs();
1260
1261 iov = get_iovec32(vector, iovstack, count, VERIFY_READ);
1262 if (!iov)
1263 return -EFAULT;
1264 set_fs(KERNEL_DS);
1265 ret = sys_writev(fd, iov, count);
1266 set_fs(old_fs);
1267 if (iov != iovstack)
1268 kfree(iov);
1269 return ret;
1270 }
1271
1272 #define RLIM_INFINITY32 0x7fffffff
1273 #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
1274
1275 struct rlimit32 {
1276 unsigned int rlim_cur;
1277 unsigned int rlim_max;
1278 };
1279
1280 extern asmlinkage long sys_getrlimit (unsigned int resource, struct rlimit *rlim);
1281
1282 asmlinkage long
sys32_old_getrlimit(unsigned int resource,struct rlimit32 * rlim)1283 sys32_old_getrlimit (unsigned int resource, struct rlimit32 *rlim)
1284 {
1285 mm_segment_t old_fs = get_fs();
1286 struct rlimit r;
1287 int ret;
1288
1289 set_fs(KERNEL_DS);
1290 ret = sys_getrlimit(resource, &r);
1291 set_fs(old_fs);
1292 if (!ret) {
1293 ret = put_user(RESOURCE32(r.rlim_cur), &rlim->rlim_cur);
1294 ret |= put_user(RESOURCE32(r.rlim_max), &rlim->rlim_max);
1295 }
1296 return ret;
1297 }
1298
1299 asmlinkage long
sys32_getrlimit(unsigned int resource,struct rlimit32 * rlim)1300 sys32_getrlimit (unsigned int resource, struct rlimit32 *rlim)
1301 {
1302 mm_segment_t old_fs = get_fs();
1303 struct rlimit r;
1304 int ret;
1305
1306 set_fs(KERNEL_DS);
1307 ret = sys_getrlimit(resource, &r);
1308 set_fs(old_fs);
1309 if (!ret) {
1310 if (r.rlim_cur >= 0xffffffff)
1311 r.rlim_cur = 0xffffffff;
1312 if (r.rlim_max >= 0xffffffff)
1313 r.rlim_max = 0xffffffff;
1314 ret = put_user(r.rlim_cur, &rlim->rlim_cur);
1315 ret |= put_user(r.rlim_max, &rlim->rlim_max);
1316 }
1317 return ret;
1318 }
1319
1320 extern asmlinkage long sys_setrlimit (unsigned int resource, struct rlimit *rlim);
1321
1322 asmlinkage long
sys32_setrlimit(unsigned int resource,struct rlimit32 * rlim)1323 sys32_setrlimit (unsigned int resource, struct rlimit32 *rlim)
1324 {
1325 struct rlimit r;
1326 int ret;
1327 mm_segment_t old_fs = get_fs();
1328
1329 if (resource >= RLIM_NLIMITS)
1330 return -EINVAL;
1331 if (get_user(r.rlim_cur, &rlim->rlim_cur) || get_user(r.rlim_max, &rlim->rlim_max))
1332 return -EFAULT;
1333 if (r.rlim_cur == RLIM_INFINITY32)
1334 r.rlim_cur = RLIM_INFINITY;
1335 if (r.rlim_max == RLIM_INFINITY32)
1336 r.rlim_max = RLIM_INFINITY;
1337 set_fs(KERNEL_DS);
1338 ret = sys_setrlimit(resource, &r);
1339 set_fs(old_fs);
1340 return ret;
1341 }
1342
1343 /*
1344 * Declare the IA32 version of the msghdr
1345 */
1346
1347 struct msghdr32 {
1348 unsigned int msg_name; /* Socket name */
1349 int msg_namelen; /* Length of name */
1350 unsigned int msg_iov; /* Data blocks */
1351 unsigned int msg_iovlen; /* Number of blocks */
1352 unsigned int msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
1353 unsigned int msg_controllen; /* Length of cmsg list */
1354 unsigned msg_flags;
1355 };
1356
1357 struct cmsghdr32 {
1358 __kernel_size_t32 cmsg_len;
1359 int cmsg_level;
1360 int cmsg_type;
1361 };
1362
1363 /* Bleech... */
1364 #define __CMSG32_NXTHDR(ctl, len, cmsg, cmsglen) __cmsg32_nxthdr((ctl),(len),(cmsg),(cmsglen))
1365 #define CMSG32_NXTHDR(mhdr, cmsg, cmsglen) cmsg32_nxthdr((mhdr), (cmsg), (cmsglen))
1366 #define CMSG32_ALIGN(len) ( ((len)+sizeof(int)-1) & ~(sizeof(int)-1) )
1367 #define CMSG32_DATA(cmsg) \
1368 ((void *)((char *)(cmsg) + CMSG32_ALIGN(sizeof(struct cmsghdr32))))
1369 #define CMSG32_SPACE(len) \
1370 (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + CMSG32_ALIGN(len))
1371 #define CMSG32_LEN(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + (len))
1372 #define __CMSG32_FIRSTHDR(ctl,len) \
1373 ((len) >= sizeof(struct cmsghdr32) ? (struct cmsghdr32 *)(ctl) : (struct cmsghdr32 *)NULL)
1374 #define CMSG32_FIRSTHDR(msg) __CMSG32_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen)
1375 #define CMSG32_OK(ucmlen, ucmsg, mhdr) \
1376 ((ucmlen) >= sizeof(struct cmsghdr32) && \
1377 (ucmlen) <= (unsigned long) \
1378 ((mhdr)->msg_controllen - \
1379 ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
1380
1381 static inline struct cmsghdr32 *
__cmsg32_nxthdr(void * ctl,__kernel_size_t size,struct cmsghdr32 * cmsg,int cmsg_len)1382 __cmsg32_nxthdr (void *ctl, __kernel_size_t size, struct cmsghdr32 *cmsg, int cmsg_len)
1383 {
1384 struct cmsghdr32 * ptr;
1385
1386 ptr = (struct cmsghdr32 *)(((unsigned char *) cmsg) + CMSG32_ALIGN(cmsg_len));
1387 if ((unsigned long)((char*)(ptr+1) - (char *) ctl) > size)
1388 return NULL;
1389 return ptr;
1390 }
1391
1392 static inline struct cmsghdr32 *
cmsg32_nxthdr(struct msghdr * msg,struct cmsghdr32 * cmsg,int cmsg_len)1393 cmsg32_nxthdr (struct msghdr *msg, struct cmsghdr32 *cmsg, int cmsg_len)
1394 {
1395 return __cmsg32_nxthdr(msg->msg_control, msg->msg_controllen, cmsg, cmsg_len);
1396 }
1397
1398 static inline int
get_msghdr32(struct msghdr * mp,struct msghdr32 * mp32)1399 get_msghdr32 (struct msghdr *mp, struct msghdr32 *mp32)
1400 {
1401 int ret;
1402 unsigned int i;
1403
1404 if (!access_ok(VERIFY_READ, mp32, sizeof(*mp32)))
1405 return -EFAULT;
1406 ret = __get_user(i, &mp32->msg_name);
1407 mp->msg_name = (void *)A(i);
1408 ret |= __get_user(mp->msg_namelen, &mp32->msg_namelen);
1409 ret |= __get_user(i, &mp32->msg_iov);
1410 mp->msg_iov = (struct iovec *)A(i);
1411 ret |= __get_user(mp->msg_iovlen, &mp32->msg_iovlen);
1412 ret |= __get_user(i, &mp32->msg_control);
1413 mp->msg_control = (void *)A(i);
1414 ret |= __get_user(mp->msg_controllen, &mp32->msg_controllen);
1415 ret |= __get_user(mp->msg_flags, &mp32->msg_flags);
1416 return ret ? -EFAULT : 0;
1417 }
1418
1419 /*
1420 * There is a lot of hair here because the alignment rules (and thus placement) of cmsg
1421 * headers and length are different for 32-bit apps. -DaveM
1422 */
1423 static int
get_cmsghdr32(struct msghdr * kmsg,unsigned char * stackbuf,struct sock * sk,size_t * bufsize)1424 get_cmsghdr32 (struct msghdr *kmsg, unsigned char *stackbuf, struct sock *sk, size_t *bufsize)
1425 {
1426 struct cmsghdr *kcmsg, *kcmsg_base;
1427 __kernel_size_t kcmlen, tmp;
1428 __kernel_size_t32 ucmlen;
1429 struct cmsghdr32 *ucmsg;
1430 long err;
1431
1432 kcmlen = 0;
1433 kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
1434 ucmsg = CMSG32_FIRSTHDR(kmsg);
1435 while (ucmsg != NULL) {
1436 if (get_user(ucmlen, &ucmsg->cmsg_len))
1437 return -EFAULT;
1438
1439 /* Catch bogons. */
1440 if (!CMSG32_OK(ucmlen, ucmsg, kmsg))
1441 return -EINVAL;
1442
1443 tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) +
1444 CMSG_ALIGN(sizeof(struct cmsghdr)));
1445 tmp = CMSG_ALIGN(tmp);
1446 kcmlen += tmp;
1447 ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen);
1448 }
1449 if (kcmlen == 0)
1450 return -EINVAL;
1451
1452 /*
1453 * The kcmlen holds the 64-bit version of the control length. It may not be
1454 * modified as we do not stick it into the kmsg until we have successfully copied
1455 * over all of the data from the user.
1456 */
1457 if (kcmlen > *bufsize) {
1458 *bufsize = kcmlen;
1459 kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
1460 }
1461 if (kcmsg == NULL)
1462 return -ENOBUFS;
1463
1464 /* Now copy them over neatly. */
1465 memset(kcmsg, 0, kcmlen);
1466 ucmsg = CMSG32_FIRSTHDR(kmsg);
1467 while (ucmsg != NULL) {
1468 err = get_user(ucmlen, &ucmsg->cmsg_len);
1469 tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) +
1470 CMSG_ALIGN(sizeof(struct cmsghdr)));
1471 kcmsg->cmsg_len = tmp;
1472 err |= get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level);
1473 err |= get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type);
1474
1475 /* Copy over the data. */
1476 err |= copy_from_user(CMSG_DATA(kcmsg), CMSG32_DATA(ucmsg),
1477 (ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))));
1478 if (err)
1479 goto out_free_efault;
1480
1481 /* Advance. */
1482 kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp);
1483 ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen);
1484 }
1485
1486 /* Ok, looks like we made it. Hook it up and return success. */
1487 kmsg->msg_control = kcmsg_base;
1488 kmsg->msg_controllen = kcmlen;
1489 return 0;
1490
1491 out_free_efault:
1492 if (kcmsg_base != (struct cmsghdr *)stackbuf)
1493 sock_kfree_s(sk, kcmsg_base, kcmlen);
1494 return -EFAULT;
1495 }
1496
1497 /*
1498 * Verify & re-shape IA32 iovec. The caller must ensure that the
1499 * iovec is big enough to hold the re-shaped message iovec.
1500 *
1501 * Save time not doing verify_area. copy_*_user will make this work
1502 * in any case.
1503 *
1504 * Don't need to check the total size for overflow (cf net/core/iovec.c),
1505 * 32-bit sizes can't overflow a 64-bit count.
1506 */
1507
1508 static inline int
verify_iovec32(struct msghdr * m,struct iovec * iov,char * address,int mode)1509 verify_iovec32 (struct msghdr *m, struct iovec *iov, char *address, int mode)
1510 {
1511 int size, err, ct;
1512 struct iovec32 *iov32;
1513
1514 if (m->msg_namelen) {
1515 if (mode == VERIFY_READ) {
1516 err = move_addr_to_kernel(m->msg_name, m->msg_namelen, address);
1517 if (err < 0)
1518 goto out;
1519 }
1520 m->msg_name = address;
1521 } else
1522 m->msg_name = NULL;
1523
1524 err = -EFAULT;
1525 size = m->msg_iovlen * sizeof(struct iovec32);
1526 if (copy_from_user(iov, m->msg_iov, size))
1527 goto out;
1528 m->msg_iov = iov;
1529
1530 err = 0;
1531 iov32 = (struct iovec32 *)iov;
1532 for (ct = m->msg_iovlen; ct-- > 0; ) {
1533 iov[ct].iov_len = (__kernel_size_t)iov32[ct].iov_len;
1534 iov[ct].iov_base = (void *) A(iov32[ct].iov_base);
1535 err += iov[ct].iov_len;
1536 }
1537 out:
1538 return err;
1539 }
1540
1541 static void
put_cmsg32(struct msghdr * kmsg,int level,int type,int len,void * data)1542 put_cmsg32(struct msghdr *kmsg, int level, int type, int len, void *data)
1543 {
1544 struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control;
1545 struct cmsghdr32 cmhdr;
1546 int cmlen = CMSG32_LEN(len);
1547
1548 if(cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
1549 kmsg->msg_flags |= MSG_CTRUNC;
1550 return;
1551 }
1552
1553 if(kmsg->msg_controllen < cmlen) {
1554 kmsg->msg_flags |= MSG_CTRUNC;
1555 cmlen = kmsg->msg_controllen;
1556 }
1557 cmhdr.cmsg_level = level;
1558 cmhdr.cmsg_type = type;
1559 cmhdr.cmsg_len = cmlen;
1560
1561 if(copy_to_user(cm, &cmhdr, sizeof cmhdr))
1562 return;
1563 if(copy_to_user(CMSG32_DATA(cm), data,
1564 cmlen - sizeof(struct cmsghdr32)))
1565 return;
1566 cmlen = CMSG32_SPACE(len);
1567 kmsg->msg_control += cmlen;
1568 kmsg->msg_controllen -= cmlen;
1569 }
1570
1571 static void
scm_detach_fds32(struct msghdr * kmsg,struct scm_cookie * scm)1572 scm_detach_fds32 (struct msghdr *kmsg, struct scm_cookie *scm)
1573 {
1574 struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control;
1575 int fdmax = (kmsg->msg_controllen - sizeof(struct cmsghdr32))
1576 / sizeof(int);
1577 int fdnum = scm->fp->count;
1578 struct file **fp = scm->fp->fp;
1579 int *cmfptr;
1580 int err = 0, i;
1581
1582 if (fdnum < fdmax)
1583 fdmax = fdnum;
1584
1585 for (i = 0, cmfptr = (int *) CMSG32_DATA(cm);
1586 i < fdmax;
1587 i++, cmfptr++) {
1588 int new_fd;
1589 err = get_unused_fd();
1590 if (err < 0)
1591 break;
1592 new_fd = err;
1593 err = put_user(new_fd, cmfptr);
1594 if (err) {
1595 put_unused_fd(new_fd);
1596 break;
1597 }
1598 /* Bump the usage count and install the file. */
1599 get_file(fp[i]);
1600 current->files->fd[new_fd] = fp[i];
1601 }
1602
1603 if (i > 0) {
1604 int cmlen = CMSG32_LEN(i * sizeof(int));
1605 if (!err)
1606 err = put_user(SOL_SOCKET, &cm->cmsg_level);
1607 if (!err)
1608 err = put_user(SCM_RIGHTS, &cm->cmsg_type);
1609 if (!err)
1610 err = put_user(cmlen, &cm->cmsg_len);
1611 if (!err) {
1612 cmlen = CMSG32_SPACE(i * sizeof(int));
1613 kmsg->msg_control += cmlen;
1614 kmsg->msg_controllen -= cmlen;
1615 }
1616 }
1617 if (i < fdnum)
1618 kmsg->msg_flags |= MSG_CTRUNC;
1619
1620 /*
1621 * All of the files that fit in the message have had their
1622 * usage counts incremented, so we just free the list.
1623 */
1624 __scm_destroy(scm);
1625 }
1626
1627 /*
1628 * In these cases we (currently) can just copy to data over verbatim because all CMSGs
1629 * created by the kernel have well defined types which have the same layout in both the
1630 * 32-bit and 64-bit API. One must add some special cased conversions here if we start
1631 * sending control messages with incompatible types.
1632 *
1633 * SCM_RIGHTS and SCM_CREDENTIALS are done by hand in recvmsg32 right after
1634 * we do our work. The remaining cases are:
1635 *
1636 * SOL_IP IP_PKTINFO struct in_pktinfo 32-bit clean
1637 * IP_TTL int 32-bit clean
1638 * IP_TOS __u8 32-bit clean
1639 * IP_RECVOPTS variable length 32-bit clean
1640 * IP_RETOPTS variable length 32-bit clean
1641 * (these last two are clean because the types are defined
1642 * by the IPv4 protocol)
1643 * IP_RECVERR struct sock_extended_err +
1644 * struct sockaddr_in 32-bit clean
1645 * SOL_IPV6 IPV6_RECVERR struct sock_extended_err +
1646 * struct sockaddr_in6 32-bit clean
1647 * IPV6_PKTINFO struct in6_pktinfo 32-bit clean
1648 * IPV6_HOPLIMIT int 32-bit clean
1649 * IPV6_FLOWINFO u32 32-bit clean
1650 * IPV6_HOPOPTS ipv6 hop exthdr 32-bit clean
1651 * IPV6_DSTOPTS ipv6 dst exthdr(s) 32-bit clean
1652 * IPV6_RTHDR ipv6 routing exthdr 32-bit clean
1653 * IPV6_AUTHHDR ipv6 auth exthdr 32-bit clean
1654 */
1655 static void
cmsg32_recvmsg_fixup(struct msghdr * kmsg,unsigned long orig_cmsg_uptr,__kernel_size_t orig_cmsg_len)1656 cmsg32_recvmsg_fixup (struct msghdr *kmsg, unsigned long orig_cmsg_uptr,
1657 __kernel_size_t orig_cmsg_len)
1658 {
1659 unsigned char *workbuf, *wp;
1660 unsigned long bufsz, space_avail;
1661 struct cmsghdr *ucmsg;
1662 long err;
1663
1664 bufsz = ((unsigned long)kmsg->msg_control) - orig_cmsg_uptr;
1665 space_avail = kmsg->msg_controllen + bufsz;
1666 wp = workbuf = kmalloc(bufsz, GFP_KERNEL);
1667 if (workbuf == NULL)
1668 goto fail;
1669
1670 /* To make this more sane we assume the kernel sends back properly
1671 * formatted control messages. Because of how the kernel will truncate
1672 * the cmsg_len for MSG_TRUNC cases, we need not check that case either.
1673 */
1674 ucmsg = (struct cmsghdr *) orig_cmsg_uptr;
1675 while (((unsigned long)ucmsg) < ((unsigned long)kmsg->msg_control)) {
1676 struct cmsghdr32 *kcmsg32 = (struct cmsghdr32 *) wp;
1677 int clen64, clen32;
1678
1679 /*
1680 * UCMSG is the 64-bit format CMSG entry in user-space. KCMSG32 is within
1681 * the kernel space temporary buffer we use to convert into a 32-bit style
1682 * CMSG.
1683 */
1684 err = get_user(kcmsg32->cmsg_len, &ucmsg->cmsg_len);
1685 err |= get_user(kcmsg32->cmsg_level, &ucmsg->cmsg_level);
1686 err |= get_user(kcmsg32->cmsg_type, &ucmsg->cmsg_type);
1687 if (err)
1688 goto fail2;
1689
1690 clen64 = kcmsg32->cmsg_len;
1691 if ((clen64 < CMSG_ALIGN(sizeof(*ucmsg))) ||
1692 (clen64 > (orig_cmsg_len + wp - workbuf)))
1693 break;
1694 copy_from_user(CMSG32_DATA(kcmsg32), CMSG_DATA(ucmsg),
1695 clen64 - CMSG_ALIGN(sizeof(*ucmsg)));
1696 clen32 = ((clen64 - CMSG_ALIGN(sizeof(*ucmsg))) +
1697 CMSG32_ALIGN(sizeof(struct cmsghdr32)));
1698 kcmsg32->cmsg_len = clen32;
1699
1700 ucmsg = (struct cmsghdr *) (((char *)ucmsg) + CMSG_ALIGN(clen64));
1701 wp = (((char *)kcmsg32) + CMSG32_ALIGN(clen32));
1702 }
1703
1704 /* Copy back fixed up data, and adjust pointers. */
1705 bufsz = (wp - workbuf);
1706 if (copy_to_user((void *)orig_cmsg_uptr, workbuf, bufsz))
1707 goto fail2;
1708
1709 kmsg->msg_control = (struct cmsghdr *) (((char *)orig_cmsg_uptr) + bufsz);
1710 kmsg->msg_controllen = space_avail - bufsz;
1711 kfree(workbuf);
1712 return;
1713
1714 fail2:
1715 kfree(workbuf);
1716 fail:
1717 /*
1718 * If we leave the 64-bit format CMSG chunks in there, the application could get
1719 * confused and crash. So to ensure greater recovery, we report no CMSGs.
1720 */
1721 kmsg->msg_controllen += bufsz;
1722 kmsg->msg_control = (void *) orig_cmsg_uptr;
1723 }
1724
1725 static inline void
sockfd_put(struct socket * sock)1726 sockfd_put (struct socket *sock)
1727 {
1728 fput(sock->file);
1729 }
1730
1731 /* XXX This really belongs in some header file... -DaveM */
1732 #define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
1733 16 for IP, 16 for IPX,
1734 24 for IPv6,
1735 about 80 for AX.25 */
1736
1737 extern struct socket *sockfd_lookup (int fd, int *err);
1738
1739 /*
1740 * BSD sendmsg interface
1741 */
1742
1743 int
sys32_sendmsg(int fd,struct msghdr32 * msg,unsigned flags)1744 sys32_sendmsg (int fd, struct msghdr32 *msg, unsigned flags)
1745 {
1746 struct socket *sock;
1747 char address[MAX_SOCK_ADDR];
1748 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1749 unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */
1750 unsigned char *ctl_buf = ctl;
1751 struct msghdr msg_sys;
1752 int err, iov_size, total_len;
1753 size_t ctl_len;
1754
1755 err = -EFAULT;
1756 if (get_msghdr32(&msg_sys, msg))
1757 goto out;
1758
1759 sock = sockfd_lookup(fd, &err);
1760 if (!sock)
1761 goto out;
1762
1763 /* do not move before msg_sys is valid */
1764 err = -EINVAL;
1765 if (msg_sys.msg_iovlen > UIO_MAXIOV)
1766 goto out_put;
1767
1768 /* Check whether to allocate the iovec area*/
1769 err = -ENOMEM;
1770 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec32);
1771 if (msg_sys.msg_iovlen > UIO_FASTIOV) {
1772 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
1773 if (!iov)
1774 goto out_put;
1775 }
1776
1777 /* This will also move the address data into kernel space */
1778 err = verify_iovec32(&msg_sys, iov, address, VERIFY_READ);
1779 if (err < 0)
1780 goto out_freeiov;
1781 total_len = err;
1782
1783 err = -ENOBUFS;
1784
1785 if (msg_sys.msg_controllen > INT_MAX)
1786 goto out_freeiov;
1787 if (msg_sys.msg_controllen) {
1788 ctl_len = sizeof(ctl);
1789 err = get_cmsghdr32(&msg_sys, ctl_buf, sock->sk, &ctl_len);
1790 if (err)
1791 goto out_freeiov;
1792 ctl_buf = msg_sys.msg_control;
1793 }
1794 msg_sys.msg_flags = flags;
1795
1796 if (sock->file->f_flags & O_NONBLOCK)
1797 msg_sys.msg_flags |= MSG_DONTWAIT;
1798 err = sock_sendmsg(sock, &msg_sys, total_len);
1799
1800 if (ctl_buf != ctl)
1801 sock_kfree_s(sock->sk, ctl_buf, ctl_len);
1802 out_freeiov:
1803 if (iov != iovstack)
1804 sock_kfree_s(sock->sk, iov, iov_size);
1805 out_put:
1806 sockfd_put(sock);
1807 out:
1808 return err;
1809 }
1810
1811 /*
1812 * BSD recvmsg interface
1813 */
1814
1815 int
sys32_recvmsg(int fd,struct msghdr32 * msg,unsigned int flags)1816 sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags)
1817 {
1818 struct socket *sock;
1819 struct iovec iovstack[UIO_FASTIOV];
1820 struct iovec *iov=iovstack;
1821 struct msghdr msg_sys;
1822 unsigned long cmsg_ptr;
1823 __kernel_size_t cmsg_len;
1824 int err, iov_size, total_len, len;
1825 struct scm_cookie scm;
1826
1827 /* kernel mode address */
1828 char addr[MAX_SOCK_ADDR];
1829
1830 /* user mode address pointers */
1831 struct sockaddr *uaddr;
1832 int *uaddr_len;
1833
1834 err = -EFAULT;
1835 if (get_msghdr32(&msg_sys, msg))
1836 goto out;
1837
1838 sock = sockfd_lookup(fd, &err);
1839 if (!sock)
1840 goto out;
1841
1842 err = -EINVAL;
1843 if (msg_sys.msg_iovlen > UIO_MAXIOV)
1844 goto out_put;
1845
1846 /* Check whether to allocate the iovec area*/
1847 err = -ENOMEM;
1848 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
1849 if (msg_sys.msg_iovlen > UIO_FASTIOV) {
1850 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
1851 if (!iov)
1852 goto out_put;
1853 }
1854
1855 /*
1856 * Save the user-mode address (verify_iovec will change the
1857 * kernel msghdr to use the kernel address space)
1858 */
1859
1860 uaddr = msg_sys.msg_name;
1861 uaddr_len = &msg->msg_namelen;
1862 err = verify_iovec32(&msg_sys, iov, addr, VERIFY_WRITE);
1863 if (err < 0)
1864 goto out_freeiov;
1865 total_len=err;
1866
1867 cmsg_ptr = (unsigned long)msg_sys.msg_control;
1868 cmsg_len = msg_sys.msg_controllen;
1869 msg_sys.msg_flags = 0;
1870
1871 if (sock->file->f_flags & O_NONBLOCK)
1872 flags |= MSG_DONTWAIT;
1873
1874 memset(&scm, 0, sizeof(scm));
1875
1876 lock_kernel();
1877 {
1878 err = sock->ops->recvmsg(sock, &msg_sys, total_len, flags, &scm);
1879 if (err < 0)
1880 goto out_unlock_freeiov;
1881
1882 len = err;
1883 if (!msg_sys.msg_control) {
1884 if (sock->passcred || scm.fp)
1885 msg_sys.msg_flags |= MSG_CTRUNC;
1886 if (scm.fp)
1887 __scm_destroy(&scm);
1888 } else {
1889 /*
1890 * If recvmsg processing itself placed some control messages into
1891 * user space, it's is using 64-bit CMSG processing, so we need to
1892 * fix it up before we tack on more stuff.
1893 */
1894 if ((unsigned long) msg_sys.msg_control != cmsg_ptr)
1895 cmsg32_recvmsg_fixup(&msg_sys, cmsg_ptr,
1896 cmsg_len);
1897
1898 /* Wheee... */
1899 if (sock->passcred)
1900 put_cmsg32(&msg_sys, SOL_SOCKET, SCM_CREDENTIALS,
1901 sizeof(scm.creds), &scm.creds);
1902 if (scm.fp != NULL)
1903 scm_detach_fds32(&msg_sys, &scm);
1904 }
1905 }
1906 unlock_kernel();
1907
1908 if (uaddr != NULL) {
1909 err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, uaddr_len);
1910 if (err < 0)
1911 goto out_freeiov;
1912 }
1913 err = __put_user(msg_sys.msg_flags, &msg->msg_flags);
1914 if (err)
1915 goto out_freeiov;
1916 err = __put_user((unsigned long)msg_sys.msg_control-cmsg_ptr,
1917 &msg->msg_controllen);
1918 if (err)
1919 goto out_freeiov;
1920 err = len;
1921
1922 out_freeiov:
1923 if (iov != iovstack)
1924 sock_kfree_s(sock->sk, iov, iov_size);
1925 out_put:
1926 sockfd_put(sock);
1927 out:
1928 return err;
1929
1930 out_unlock_freeiov:
1931 goto out_freeiov;
1932 }
1933
1934 /* Argument list sizes for sys_socketcall */
1935 #define AL(x) ((x) * sizeof(u32))
1936 static const unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
1937 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
1938 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
1939 #undef AL
1940
1941 extern asmlinkage long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
1942 extern asmlinkage long sys_connect(int fd, struct sockaddr *uservaddr,
1943 int addrlen);
1944 extern asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr,
1945 int *upeer_addrlen);
1946 extern asmlinkage long sys_getsockname(int fd, struct sockaddr *usockaddr,
1947 int *usockaddr_len);
1948 extern asmlinkage long sys_getpeername(int fd, struct sockaddr *usockaddr,
1949 int *usockaddr_len);
1950 extern asmlinkage long sys_send(int fd, void *buff, size_t len, unsigned flags);
1951 extern asmlinkage long sys_sendto(int fd, u32 buff, __kernel_size_t32 len,
1952 unsigned flags, u32 addr, int addr_len);
1953 extern asmlinkage long sys_recv(int fd, void *ubuf, size_t size, unsigned flags);
1954 extern asmlinkage long sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
1955 unsigned flags, u32 addr, u32 addr_len);
1956 extern asmlinkage long sys_setsockopt(int fd, int level, int optname,
1957 char *optval, int optlen);
1958 extern asmlinkage long sys_getsockopt(int fd, int level, int optname,
1959 u32 optval, u32 optlen);
1960
1961 extern asmlinkage long sys_socket(int family, int type, int protocol);
1962 extern asmlinkage long sys_socketpair(int family, int type, int protocol,
1963 int usockvec[2]);
1964 extern asmlinkage long sys_shutdown(int fd, int how);
1965 extern asmlinkage long sys_listen(int fd, int backlog);
1966
1967 asmlinkage long
sys32_socketcall(int call,u32 * args)1968 sys32_socketcall (int call, u32 *args)
1969 {
1970 int ret;
1971 u32 a[6];
1972 u32 a0,a1;
1973
1974 if (call<SYS_SOCKET||call>SYS_RECVMSG)
1975 return -EINVAL;
1976 if (copy_from_user(a, args, nas[call]))
1977 return -EFAULT;
1978 a0=a[0];
1979 a1=a[1];
1980
1981 switch(call)
1982 {
1983 case SYS_SOCKET:
1984 ret = sys_socket(a0, a1, a[2]);
1985 break;
1986 case SYS_BIND:
1987 ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]);
1988 break;
1989 case SYS_CONNECT:
1990 ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]);
1991 break;
1992 case SYS_LISTEN:
1993 ret = sys_listen(a0, a1);
1994 break;
1995 case SYS_ACCEPT:
1996 ret = sys_accept(a0, (struct sockaddr *)A(a1), (int *)A(a[2]));
1997 break;
1998 case SYS_GETSOCKNAME:
1999 ret = sys_getsockname(a0, (struct sockaddr *)A(a1), (int *)A(a[2]));
2000 break;
2001 case SYS_GETPEERNAME:
2002 ret = sys_getpeername(a0, (struct sockaddr *)A(a1), (int *)A(a[2]));
2003 break;
2004 case SYS_SOCKETPAIR:
2005 ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3]));
2006 break;
2007 case SYS_SEND:
2008 ret = sys_send(a0, (void *)A(a1), a[2], a[3]);
2009 break;
2010 case SYS_SENDTO:
2011 ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]);
2012 break;
2013 case SYS_RECV:
2014 ret = sys_recv(a0, (void *)A(a1), a[2], a[3]);
2015 break;
2016 case SYS_RECVFROM:
2017 ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]);
2018 break;
2019 case SYS_SHUTDOWN:
2020 ret = sys_shutdown(a0,a1);
2021 break;
2022 case SYS_SETSOCKOPT:
2023 ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]),
2024 a[4]);
2025 break;
2026 case SYS_GETSOCKOPT:
2027 ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]);
2028 break;
2029 case SYS_SENDMSG:
2030 ret = sys32_sendmsg(a0, (struct msghdr32 *) A(a1), a[2]);
2031 break;
2032 case SYS_RECVMSG:
2033 ret = sys32_recvmsg(a0, (struct msghdr32 *) A(a1), a[2]);
2034 break;
2035 default:
2036 ret = EINVAL;
2037 break;
2038 }
2039 return ret;
2040 }
2041
2042 /*
2043 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
2044 *
2045 * This is really horribly ugly.
2046 */
2047
2048 struct msgbuf32 { s32 mtype; char mtext[1]; };
2049
2050 struct ipc_perm32 {
2051 key_t key;
2052 __kernel_uid_t32 uid;
2053 __kernel_gid_t32 gid;
2054 __kernel_uid_t32 cuid;
2055 __kernel_gid_t32 cgid;
2056 __kernel_mode_t32 mode;
2057 unsigned short seq;
2058 };
2059
2060 struct ipc64_perm32 {
2061 key_t key;
2062 __kernel_uid32_t32 uid;
2063 __kernel_gid32_t32 gid;
2064 __kernel_uid32_t32 cuid;
2065 __kernel_gid32_t32 cgid;
2066 __kernel_mode_t32 mode;
2067 unsigned short __pad1;
2068 unsigned short seq;
2069 unsigned short __pad2;
2070 unsigned int unused1;
2071 unsigned int unused2;
2072 };
2073
2074 struct semid_ds32 {
2075 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
2076 __kernel_time_t32 sem_otime; /* last semop time */
2077 __kernel_time_t32 sem_ctime; /* last change time */
2078 u32 sem_base; /* ptr to first semaphore in array */
2079 u32 sem_pending; /* pending operations to be processed */
2080 u32 sem_pending_last; /* last pending operation */
2081 u32 undo; /* undo requests on this array */
2082 unsigned short sem_nsems; /* no. of semaphores in array */
2083 };
2084
2085 struct semid64_ds32 {
2086 struct ipc64_perm32 sem_perm;
2087 __kernel_time_t32 sem_otime;
2088 unsigned int __unused1;
2089 __kernel_time_t32 sem_ctime;
2090 unsigned int __unused2;
2091 unsigned int sem_nsems;
2092 unsigned int __unused3;
2093 unsigned int __unused4;
2094 };
2095
2096 struct msqid_ds32 {
2097 struct ipc_perm32 msg_perm;
2098 u32 msg_first;
2099 u32 msg_last;
2100 __kernel_time_t32 msg_stime;
2101 __kernel_time_t32 msg_rtime;
2102 __kernel_time_t32 msg_ctime;
2103 u32 wwait;
2104 u32 rwait;
2105 unsigned short msg_cbytes;
2106 unsigned short msg_qnum;
2107 unsigned short msg_qbytes;
2108 __kernel_ipc_pid_t32 msg_lspid;
2109 __kernel_ipc_pid_t32 msg_lrpid;
2110 };
2111
2112 struct msqid64_ds32 {
2113 struct ipc64_perm32 msg_perm;
2114 __kernel_time_t32 msg_stime;
2115 unsigned int __unused1;
2116 __kernel_time_t32 msg_rtime;
2117 unsigned int __unused2;
2118 __kernel_time_t32 msg_ctime;
2119 unsigned int __unused3;
2120 unsigned int msg_cbytes;
2121 unsigned int msg_qnum;
2122 unsigned int msg_qbytes;
2123 __kernel_pid_t32 msg_lspid;
2124 __kernel_pid_t32 msg_lrpid;
2125 unsigned int __unused4;
2126 unsigned int __unused5;
2127 };
2128
2129 struct shmid_ds32 {
2130 struct ipc_perm32 shm_perm;
2131 int shm_segsz;
2132 __kernel_time_t32 shm_atime;
2133 __kernel_time_t32 shm_dtime;
2134 __kernel_time_t32 shm_ctime;
2135 __kernel_ipc_pid_t32 shm_cpid;
2136 __kernel_ipc_pid_t32 shm_lpid;
2137 unsigned short shm_nattch;
2138 };
2139
2140 struct shmid64_ds32 {
2141 struct ipc64_perm32 shm_perm;
2142 __kernel_size_t32 shm_segsz;
2143 __kernel_time_t32 shm_atime;
2144 unsigned int __unused1;
2145 __kernel_time_t32 shm_dtime;
2146 unsigned int __unused2;
2147 __kernel_time_t32 shm_ctime;
2148 unsigned int __unused3;
2149 __kernel_pid_t32 shm_cpid;
2150 __kernel_pid_t32 shm_lpid;
2151 unsigned int shm_nattch;
2152 unsigned int __unused4;
2153 unsigned int __unused5;
2154 };
2155
2156 struct shminfo64_32 {
2157 unsigned int shmmax;
2158 unsigned int shmmin;
2159 unsigned int shmmni;
2160 unsigned int shmseg;
2161 unsigned int shmall;
2162 unsigned int __unused1;
2163 unsigned int __unused2;
2164 unsigned int __unused3;
2165 unsigned int __unused4;
2166 };
2167
2168 struct shm_info32 {
2169 int used_ids;
2170 u32 shm_tot, shm_rss, shm_swp;
2171 u32 swap_attempts, swap_successes;
2172 };
2173
2174 struct ipc_kludge {
2175 u32 msgp;
2176 s32 msgtyp;
2177 };
2178
2179 #define SEMOP 1
2180 #define SEMGET 2
2181 #define SEMCTL 3
2182 #define MSGSND 11
2183 #define MSGRCV 12
2184 #define MSGGET 13
2185 #define MSGCTL 14
2186 #define SHMAT 21
2187 #define SHMDT 22
2188 #define SHMGET 23
2189 #define SHMCTL 24
2190
2191 #define IPCOP_MASK(__x) (1UL << (__x))
2192
2193 static int
ipc_parse_version32(int * cmd)2194 ipc_parse_version32 (int *cmd)
2195 {
2196 if (*cmd & IPC_64) {
2197 *cmd ^= IPC_64;
2198 return IPC_64;
2199 } else {
2200 return IPC_OLD;
2201 }
2202 }
2203
2204 static int
semctl32(int first,int second,int third,void * uptr)2205 semctl32 (int first, int second, int third, void *uptr)
2206 {
2207 union semun fourth;
2208 u32 pad;
2209 int err = 0, err2;
2210 struct semid64_ds s;
2211 mm_segment_t old_fs;
2212 int version = ipc_parse_version32(&third);
2213
2214 if (!uptr)
2215 return -EINVAL;
2216 if (get_user(pad, (u32 *)uptr))
2217 return -EFAULT;
2218 if (third == SETVAL)
2219 fourth.val = (int)pad;
2220 else
2221 fourth.__pad = (void *)A(pad);
2222 switch (third) {
2223 default:
2224 err = -EINVAL;
2225 break;
2226
2227 case IPC_INFO:
2228 case IPC_RMID:
2229 case IPC_SET:
2230 case SEM_INFO:
2231 case GETVAL:
2232 case GETPID:
2233 case GETNCNT:
2234 case GETZCNT:
2235 case GETALL:
2236 case SETVAL:
2237 case SETALL:
2238 err = sys_semctl(first, second, third, fourth);
2239 break;
2240
2241 case IPC_STAT:
2242 case SEM_STAT:
2243 fourth.__pad = &s;
2244 old_fs = get_fs();
2245 set_fs(KERNEL_DS);
2246 err = sys_semctl(first, second, third, fourth);
2247 set_fs(old_fs);
2248
2249 if (version == IPC_64) {
2250 struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad);
2251
2252 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
2253 err = -EFAULT;
2254 break;
2255 }
2256 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
2257 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
2258 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
2259 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
2260 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
2261 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
2262 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
2263 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
2264 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
2265 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
2266 } else {
2267 struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad);
2268
2269 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
2270 err = -EFAULT;
2271 break;
2272 }
2273 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
2274 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
2275 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
2276 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
2277 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
2278 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
2279 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
2280 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
2281 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
2282 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
2283 }
2284 if (err2)
2285 err = -EFAULT;
2286 break;
2287 }
2288 return err;
2289 }
2290
2291 static int
do_sys32_msgsnd(int first,int second,int third,void * uptr)2292 do_sys32_msgsnd (int first, int second, int third, void *uptr)
2293 {
2294 struct msgbuf *p = kmalloc(second + sizeof(struct msgbuf), GFP_USER);
2295 struct msgbuf32 *up = (struct msgbuf32 *)uptr;
2296 mm_segment_t old_fs;
2297 int err;
2298
2299 if (!p)
2300 return -ENOMEM;
2301 err = get_user(p->mtype, &up->mtype);
2302 err |= copy_from_user(p->mtext, &up->mtext, second);
2303 if (err)
2304 goto out;
2305 old_fs = get_fs();
2306 set_fs(KERNEL_DS);
2307 err = sys_msgsnd(first, p, second, third);
2308 set_fs(old_fs);
2309 out:
2310 kfree(p);
2311 return err;
2312 }
2313
2314 static int
do_sys32_msgrcv(int first,int second,int msgtyp,int third,int version,void * uptr)2315 do_sys32_msgrcv (int first, int second, int msgtyp, int third, int version, void *uptr)
2316 {
2317 struct msgbuf32 *up;
2318 struct msgbuf *p;
2319 mm_segment_t old_fs;
2320 int err;
2321
2322 if (!version) {
2323 struct ipc_kludge *uipck = (struct ipc_kludge *)uptr;
2324 struct ipc_kludge ipck;
2325
2326 err = -EINVAL;
2327 if (!uptr)
2328 goto out;
2329 err = -EFAULT;
2330 if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge)))
2331 goto out;
2332 uptr = (void *)A(ipck.msgp);
2333 msgtyp = ipck.msgtyp;
2334 }
2335 err = -ENOMEM;
2336 p = kmalloc(second + sizeof(struct msgbuf), GFP_USER);
2337 if (!p)
2338 goto out;
2339 old_fs = get_fs();
2340 set_fs(KERNEL_DS);
2341 err = sys_msgrcv(first, p, second, msgtyp, third);
2342 set_fs(old_fs);
2343 if (err < 0)
2344 goto free_then_out;
2345 up = (struct msgbuf32 *)uptr;
2346 if (put_user(p->mtype, &up->mtype) || copy_to_user(&up->mtext, p->mtext, err))
2347 err = -EFAULT;
2348 free_then_out:
2349 kfree(p);
2350 out:
2351 return err;
2352 }
2353
2354 static int
msgctl32(int first,int second,void * uptr)2355 msgctl32 (int first, int second, void *uptr)
2356 {
2357 int err = -EINVAL, err2;
2358 struct msqid_ds m;
2359 struct msqid64_ds m64;
2360 struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
2361 struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
2362 mm_segment_t old_fs;
2363 int version = ipc_parse_version32(&second);
2364
2365 switch (second) {
2366 case IPC_INFO:
2367 case IPC_RMID:
2368 case MSG_INFO:
2369 err = sys_msgctl(first, second, (struct msqid_ds *)uptr);
2370 break;
2371
2372 case IPC_SET:
2373 if (version == IPC_64) {
2374 err = get_user(m64.msg_perm.uid, &up64->msg_perm.uid);
2375 err |= get_user(m64.msg_perm.gid, &up64->msg_perm.gid);
2376 err |= get_user(m64.msg_perm.mode, &up64->msg_perm.mode);
2377 err |= get_user(m64.msg_qbytes, &up64->msg_qbytes);
2378 } else {
2379 err = get_user(m64.msg_perm.uid, &up32->msg_perm.uid);
2380 err |= get_user(m64.msg_perm.gid, &up32->msg_perm.gid);
2381 err |= get_user(m64.msg_perm.mode, &up32->msg_perm.mode);
2382 err |= get_user(m64.msg_qbytes, &up32->msg_qbytes);
2383 }
2384 if (err)
2385 break;
2386 old_fs = get_fs();
2387 set_fs(KERNEL_DS);
2388 err = sys_msgctl(first, second, &m64);
2389 set_fs(old_fs);
2390 break;
2391
2392 case IPC_STAT:
2393 case MSG_STAT:
2394 old_fs = get_fs();
2395 set_fs(KERNEL_DS);
2396 err = sys_msgctl(first, second, (void *) &m64);
2397 set_fs(old_fs);
2398
2399 if (version == IPC_64) {
2400 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
2401 err = -EFAULT;
2402 break;
2403 }
2404 err2 = __put_user(m64.msg_perm.key, &up64->msg_perm.key);
2405 err2 |= __put_user(m64.msg_perm.uid, &up64->msg_perm.uid);
2406 err2 |= __put_user(m64.msg_perm.gid, &up64->msg_perm.gid);
2407 err2 |= __put_user(m64.msg_perm.cuid, &up64->msg_perm.cuid);
2408 err2 |= __put_user(m64.msg_perm.cgid, &up64->msg_perm.cgid);
2409 err2 |= __put_user(m64.msg_perm.mode, &up64->msg_perm.mode);
2410 err2 |= __put_user(m64.msg_perm.seq, &up64->msg_perm.seq);
2411 err2 |= __put_user(m64.msg_stime, &up64->msg_stime);
2412 err2 |= __put_user(m64.msg_rtime, &up64->msg_rtime);
2413 err2 |= __put_user(m64.msg_ctime, &up64->msg_ctime);
2414 err2 |= __put_user(m64.msg_cbytes, &up64->msg_cbytes);
2415 err2 |= __put_user(m64.msg_qnum, &up64->msg_qnum);
2416 err2 |= __put_user(m64.msg_qbytes, &up64->msg_qbytes);
2417 err2 |= __put_user(m64.msg_lspid, &up64->msg_lspid);
2418 err2 |= __put_user(m64.msg_lrpid, &up64->msg_lrpid);
2419 if (err2)
2420 err = -EFAULT;
2421 } else {
2422 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
2423 err = -EFAULT;
2424 break;
2425 }
2426 err2 = __put_user(m64.msg_perm.key, &up32->msg_perm.key);
2427 err2 |= __put_user(m64.msg_perm.uid, &up32->msg_perm.uid);
2428 err2 |= __put_user(m64.msg_perm.gid, &up32->msg_perm.gid);
2429 err2 |= __put_user(m64.msg_perm.cuid, &up32->msg_perm.cuid);
2430 err2 |= __put_user(m64.msg_perm.cgid, &up32->msg_perm.cgid);
2431 err2 |= __put_user(m64.msg_perm.mode, &up32->msg_perm.mode);
2432 err2 |= __put_user(m64.msg_perm.seq, &up32->msg_perm.seq);
2433 err2 |= __put_user(m64.msg_stime, &up32->msg_stime);
2434 err2 |= __put_user(m64.msg_rtime, &up32->msg_rtime);
2435 err2 |= __put_user(m64.msg_ctime, &up32->msg_ctime);
2436 err2 |= __put_user(m64.msg_cbytes, &up32->msg_cbytes);
2437 err2 |= __put_user(m64.msg_qnum, &up32->msg_qnum);
2438 err2 |= __put_user(m64.msg_qbytes, &up32->msg_qbytes);
2439 err2 |= __put_user(m64.msg_lspid, &up32->msg_lspid);
2440 err2 |= __put_user(m64.msg_lrpid, &up32->msg_lrpid);
2441 if (err2)
2442 err = -EFAULT;
2443 }
2444 break;
2445 }
2446 return err;
2447 }
2448
2449 static int
shmat32(int first,int second,int third,int version,void * uptr)2450 shmat32 (int first, int second, int third, int version, void *uptr)
2451 {
2452 unsigned long raddr;
2453 u32 *uaddr = (u32 *)A((u32)third);
2454 int err;
2455
2456 if (version == 1)
2457 return -EINVAL; /* iBCS2 emulator entry point: unsupported */
2458 err = sys_shmat(first, uptr, second, &raddr);
2459 if (err)
2460 return err;
2461 return put_user(raddr, uaddr);
2462 }
2463
2464 static int
shmctl32(int first,int second,void * uptr)2465 shmctl32 (int first, int second, void *uptr)
2466 {
2467 int err = -EFAULT, err2;
2468
2469 struct shmid64_ds s64;
2470 struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
2471 struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
2472 mm_segment_t old_fs;
2473 struct shm_info32 *uip = (struct shm_info32 *)uptr;
2474 struct shm_info si;
2475 int version = ipc_parse_version32(&second);
2476 struct shminfo64 smi;
2477 struct shminfo *usi32 = (struct shminfo *) uptr;
2478 struct shminfo64_32 *usi64 = (struct shminfo64_32 *) uptr;
2479
2480 switch (second) {
2481 case IPC_INFO:
2482 old_fs = get_fs();
2483 set_fs(KERNEL_DS);
2484 err = sys_shmctl(first, second, (struct shmid_ds *)&smi);
2485 set_fs(old_fs);
2486
2487 if (version == IPC_64) {
2488 if (!access_ok(VERIFY_WRITE, usi64, sizeof(*usi64))) {
2489 err = -EFAULT;
2490 break;
2491 }
2492 err2 = __put_user(smi.shmmax, &usi64->shmmax);
2493 err2 |= __put_user(smi.shmmin, &usi64->shmmin);
2494 err2 |= __put_user(smi.shmmni, &usi64->shmmni);
2495 err2 |= __put_user(smi.shmseg, &usi64->shmseg);
2496 err2 |= __put_user(smi.shmall, &usi64->shmall);
2497 } else {
2498 if (!access_ok(VERIFY_WRITE, usi32, sizeof(*usi32))) {
2499 err = -EFAULT;
2500 break;
2501 }
2502 err2 = __put_user(smi.shmmax, &usi32->shmmax);
2503 err2 |= __put_user(smi.shmmin, &usi32->shmmin);
2504 err2 |= __put_user(smi.shmmni, &usi32->shmmni);
2505 err2 |= __put_user(smi.shmseg, &usi32->shmseg);
2506 err2 |= __put_user(smi.shmall, &usi32->shmall);
2507 }
2508 if (err2)
2509 err = -EFAULT;
2510 break;
2511
2512 case IPC_RMID:
2513 case SHM_LOCK:
2514 case SHM_UNLOCK:
2515 err = sys_shmctl(first, second, (struct shmid_ds *)uptr);
2516 break;
2517
2518 case IPC_SET:
2519 if (version == IPC_64) {
2520 err = get_user(s64.shm_perm.uid, &up64->shm_perm.uid);
2521 err |= get_user(s64.shm_perm.gid, &up64->shm_perm.gid);
2522 err |= get_user(s64.shm_perm.mode, &up64->shm_perm.mode);
2523 } else {
2524 err = get_user(s64.shm_perm.uid, &up32->shm_perm.uid);
2525 err |= get_user(s64.shm_perm.gid, &up32->shm_perm.gid);
2526 err |= get_user(s64.shm_perm.mode, &up32->shm_perm.mode);
2527 }
2528 if (err)
2529 break;
2530 old_fs = get_fs();
2531 set_fs(KERNEL_DS);
2532 err = sys_shmctl(first, second, &s64);
2533 set_fs(old_fs);
2534 break;
2535
2536 case IPC_STAT:
2537 case SHM_STAT:
2538 old_fs = get_fs();
2539 set_fs(KERNEL_DS);
2540 err = sys_shmctl(first, second, (void *) &s64);
2541 set_fs(old_fs);
2542 if (err < 0)
2543 break;
2544 if (version == IPC_64) {
2545 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
2546 err = -EFAULT;
2547 break;
2548 }
2549 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
2550 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
2551 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
2552 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
2553 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
2554 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
2555 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
2556 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
2557 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
2558 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
2559 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
2560 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
2561 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
2562 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
2563 } else {
2564 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
2565 err = -EFAULT;
2566 break;
2567 }
2568 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
2569 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
2570 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
2571 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
2572 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
2573 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
2574 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
2575 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
2576 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
2577 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
2578 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
2579 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
2580 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
2581 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
2582 }
2583 if (err2)
2584 err = -EFAULT;
2585 break;
2586
2587 case SHM_INFO:
2588 old_fs = get_fs();
2589 set_fs(KERNEL_DS);
2590 err = sys_shmctl(first, second, (void *)&si);
2591 set_fs(old_fs);
2592 if (err < 0)
2593 break;
2594
2595 if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip))) {
2596 err = -EFAULT;
2597 break;
2598 }
2599 err2 = __put_user(si.used_ids, &uip->used_ids);
2600 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
2601 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
2602 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
2603 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
2604 err2 |= __put_user(si.swap_successes, &uip->swap_successes);
2605 if (err2)
2606 err = -EFAULT;
2607 break;
2608
2609 }
2610 return err;
2611 }
2612
2613 asmlinkage long
sys32_ipc(u32 call,int first,int second,int third,u32 ptr,u32 fifth)2614 sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
2615 {
2616 int version;
2617
2618 version = call >> 16; /* hack for backward compatibility */
2619 call &= 0xffff;
2620
2621 switch (call) {
2622 case SEMOP:
2623 /* struct sembuf is the same on 32 and 64bit :)) */
2624 return sys_semop(first, (struct sembuf *)AA(ptr), second);
2625 case SEMGET:
2626 return sys_semget(first, second, third);
2627 case SEMCTL:
2628 return semctl32(first, second, third, (void *)AA(ptr));
2629
2630 case MSGSND:
2631 return do_sys32_msgsnd(first, second, third, (void *)AA(ptr));
2632 case MSGRCV:
2633 return do_sys32_msgrcv(first, second, fifth, third, version, (void *)AA(ptr));
2634 case MSGGET:
2635 return sys_msgget((key_t) first, second);
2636 case MSGCTL:
2637 return msgctl32(first, second, (void *)AA(ptr));
2638
2639 case SHMAT:
2640 return shmat32(first, second, third, version, (void *)AA(ptr));
2641 break;
2642 case SHMDT:
2643 return sys_shmdt((char *)AA(ptr));
2644 case SHMGET:
2645 return sys_shmget(first, second, third);
2646 case SHMCTL:
2647 return shmctl32(first, second, (void *)AA(ptr));
2648
2649 default:
2650 return -ENOSYS;
2651 }
2652 return -EINVAL;
2653 }
2654
2655 /*
2656 * sys_time() can be implemented in user-level using
2657 * sys_gettimeofday(). IA64 did this but i386 Linux did not
2658 * so we have to implement this system call here.
2659 */
2660 asmlinkage long
sys32_time(int * tloc)2661 sys32_time (int *tloc)
2662 {
2663 int i;
2664
2665 /* SMP: This is fairly trivial. We grab CURRENT_TIME and
2666 stuff it to user space. No side effects */
2667 i = CURRENT_TIME;
2668 if (tloc) {
2669 if (put_user(i, tloc))
2670 i = -EFAULT;
2671 }
2672 return i;
2673 }
2674
2675 struct rusage32 {
2676 struct timeval32 ru_utime;
2677 struct timeval32 ru_stime;
2678 int ru_maxrss;
2679 int ru_ixrss;
2680 int ru_idrss;
2681 int ru_isrss;
2682 int ru_minflt;
2683 int ru_majflt;
2684 int ru_nswap;
2685 int ru_inblock;
2686 int ru_oublock;
2687 int ru_msgsnd;
2688 int ru_msgrcv;
2689 int ru_nsignals;
2690 int ru_nvcsw;
2691 int ru_nivcsw;
2692 };
2693
2694 static int
put_rusage(struct rusage32 * ru,struct rusage * r)2695 put_rusage (struct rusage32 *ru, struct rusage *r)
2696 {
2697 int err;
2698
2699 if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)))
2700 return -EFAULT;
2701
2702 err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
2703 err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
2704 err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
2705 err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
2706 err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
2707 err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
2708 err |= __put_user (r->ru_idrss, &ru->ru_idrss);
2709 err |= __put_user (r->ru_isrss, &ru->ru_isrss);
2710 err |= __put_user (r->ru_minflt, &ru->ru_minflt);
2711 err |= __put_user (r->ru_majflt, &ru->ru_majflt);
2712 err |= __put_user (r->ru_nswap, &ru->ru_nswap);
2713 err |= __put_user (r->ru_inblock, &ru->ru_inblock);
2714 err |= __put_user (r->ru_oublock, &ru->ru_oublock);
2715 err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
2716 err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
2717 err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
2718 err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
2719 err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
2720 return err;
2721 }
2722
2723 asmlinkage long
sys32_wait4(int pid,unsigned int * stat_addr,int options,struct rusage32 * ru)2724 sys32_wait4 (int pid, unsigned int *stat_addr, int options, struct rusage32 *ru)
2725 {
2726 if (!ru)
2727 return sys_wait4(pid, stat_addr, options, NULL);
2728 else {
2729 struct rusage r;
2730 int ret;
2731 unsigned int status;
2732 mm_segment_t old_fs = get_fs();
2733
2734 set_fs(KERNEL_DS);
2735 ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
2736 set_fs(old_fs);
2737 if (put_rusage(ru, &r))
2738 return -EFAULT;
2739 if (stat_addr && put_user(status, stat_addr))
2740 return -EFAULT;
2741 return ret;
2742 }
2743 }
2744
2745 asmlinkage long
sys32_waitpid(int pid,unsigned int * stat_addr,int options)2746 sys32_waitpid (int pid, unsigned int *stat_addr, int options)
2747 {
2748 return sys32_wait4(pid, stat_addr, options, NULL);
2749 }
2750
2751
2752 extern asmlinkage long sys_getrusage (int who, struct rusage *ru);
2753
2754 asmlinkage long
sys32_getrusage(int who,struct rusage32 * ru)2755 sys32_getrusage (int who, struct rusage32 *ru)
2756 {
2757 struct rusage r;
2758 int ret;
2759 mm_segment_t old_fs = get_fs();
2760
2761 set_fs(KERNEL_DS);
2762 ret = sys_getrusage(who, &r);
2763 set_fs(old_fs);
2764 if (put_rusage (ru, &r))
2765 return -EFAULT;
2766 return ret;
2767 }
2768
2769 struct tms32 {
2770 __kernel_clock_t32 tms_utime;
2771 __kernel_clock_t32 tms_stime;
2772 __kernel_clock_t32 tms_cutime;
2773 __kernel_clock_t32 tms_cstime;
2774 };
2775
2776 extern asmlinkage long sys_times (struct tms * tbuf);
2777
2778 asmlinkage long
sys32_times(struct tms32 * tbuf)2779 sys32_times (struct tms32 *tbuf)
2780 {
2781 mm_segment_t old_fs = get_fs();
2782 struct tms t;
2783 long ret;
2784 int err;
2785
2786 set_fs(KERNEL_DS);
2787 ret = sys_times(tbuf ? &t : NULL);
2788 set_fs(old_fs);
2789 if (tbuf) {
2790 err = put_user (IA32_TICK(t.tms_utime), &tbuf->tms_utime);
2791 err |= put_user (IA32_TICK(t.tms_stime), &tbuf->tms_stime);
2792 err |= put_user (IA32_TICK(t.tms_cutime), &tbuf->tms_cutime);
2793 err |= put_user (IA32_TICK(t.tms_cstime), &tbuf->tms_cstime);
2794 if (err)
2795 ret = -EFAULT;
2796 }
2797 return IA32_TICK(ret);
2798 }
2799
2800 static unsigned int
ia32_peek(struct pt_regs * regs,struct task_struct * child,unsigned long addr,unsigned int * val)2801 ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val)
2802 {
2803 size_t copied;
2804 unsigned int ret;
2805
2806 copied = access_process_vm(child, addr, val, sizeof(*val), 0);
2807 return (copied != sizeof(ret)) ? -EIO : 0;
2808 }
2809
2810 static unsigned int
ia32_poke(struct pt_regs * regs,struct task_struct * child,unsigned long addr,unsigned int val)2811 ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val)
2812 {
2813
2814 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
2815 return -EIO;
2816 return 0;
2817 }
2818
2819 /*
2820 * The order in which registers are stored in the ptrace regs structure
2821 */
2822 #define PT_EBX 0
2823 #define PT_ECX 1
2824 #define PT_EDX 2
2825 #define PT_ESI 3
2826 #define PT_EDI 4
2827 #define PT_EBP 5
2828 #define PT_EAX 6
2829 #define PT_DS 7
2830 #define PT_ES 8
2831 #define PT_FS 9
2832 #define PT_GS 10
2833 #define PT_ORIG_EAX 11
2834 #define PT_EIP 12
2835 #define PT_CS 13
2836 #define PT_EFL 14
2837 #define PT_UESP 15
2838 #define PT_SS 16
2839
2840 static unsigned int
getreg(struct task_struct * child,int regno)2841 getreg (struct task_struct *child, int regno)
2842 {
2843 struct pt_regs *child_regs;
2844
2845 child_regs = ia64_task_regs(child);
2846 switch (regno / sizeof(int)) {
2847 case PT_EBX: return child_regs->r11;
2848 case PT_ECX: return child_regs->r9;
2849 case PT_EDX: return child_regs->r10;
2850 case PT_ESI: return child_regs->r14;
2851 case PT_EDI: return child_regs->r15;
2852 case PT_EBP: return child_regs->r13;
2853 case PT_EAX: return child_regs->r8;
2854 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
2855 case PT_EIP: return child_regs->cr_iip;
2856 case PT_UESP: return child_regs->r12;
2857 case PT_EFL: return child->thread.eflag;
2858 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
2859 return __USER_DS;
2860 case PT_CS: return __USER_CS;
2861 default:
2862 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
2863 break;
2864 }
2865 return 0;
2866 }
2867
2868 static void
putreg(struct task_struct * child,int regno,unsigned int value)2869 putreg (struct task_struct *child, int regno, unsigned int value)
2870 {
2871 struct pt_regs *child_regs;
2872
2873 child_regs = ia64_task_regs(child);
2874 switch (regno / sizeof(int)) {
2875 case PT_EBX: child_regs->r11 = value; break;
2876 case PT_ECX: child_regs->r9 = value; break;
2877 case PT_EDX: child_regs->r10 = value; break;
2878 case PT_ESI: child_regs->r14 = value; break;
2879 case PT_EDI: child_regs->r15 = value; break;
2880 case PT_EBP: child_regs->r13 = value; break;
2881 case PT_EAX: child_regs->r8 = value; break;
2882 case PT_ORIG_EAX: child_regs->r1 = value; break;
2883 case PT_EIP: child_regs->cr_iip = value; break;
2884 case PT_UESP: child_regs->r12 = value; break;
2885 case PT_EFL: child->thread.eflag = value; break;
2886 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
2887 if (value != __USER_DS)
2888 printk(KERN_ERR
2889 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
2890 regno, value);
2891 break;
2892 case PT_CS:
2893 if (value != __USER_CS)
2894 printk(KERN_ERR
2895 "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
2896 regno, value);
2897 break;
2898 default:
2899 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
2900 break;
2901 }
2902 }
2903
2904 static void
put_fpreg(int regno,struct _fpreg_ia32 * reg,struct pt_regs * ptp,struct switch_stack * swp,int tos)2905 put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
2906 int tos)
2907 {
2908 struct _fpreg_ia32 *f;
2909 char buf[32];
2910
2911 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
2912 if ((regno += tos) >= 8)
2913 regno -= 8;
2914 switch (regno) {
2915 case 0:
2916 ia64f2ia32f(f, &ptp->f8);
2917 break;
2918 case 1:
2919 ia64f2ia32f(f, &ptp->f9);
2920 break;
2921 case 2:
2922 ia64f2ia32f(f, &ptp->f10);
2923 break;
2924 case 3:
2925 ia64f2ia32f(f, &ptp->f11);
2926 break;
2927 case 4:
2928 case 5:
2929 case 6:
2930 case 7:
2931 ia64f2ia32f(f, &swp->f12 + (regno - 4));
2932 break;
2933 }
2934 copy_to_user(reg, f, sizeof(*reg));
2935 }
2936
2937 static void
get_fpreg(int regno,struct _fpreg_ia32 * reg,struct pt_regs * ptp,struct switch_stack * swp,int tos)2938 get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
2939 int tos)
2940 {
2941
2942 if ((regno += tos) >= 8)
2943 regno -= 8;
2944 switch (regno) {
2945 case 0:
2946 copy_from_user(&ptp->f8, reg, sizeof(*reg));
2947 break;
2948 case 1:
2949 copy_from_user(&ptp->f9, reg, sizeof(*reg));
2950 break;
2951 case 2:
2952 copy_from_user(&ptp->f10, reg, sizeof(*reg));
2953 break;
2954 case 3:
2955 copy_from_user(&ptp->f11, reg, sizeof(*reg));
2956 break;
2957 case 4:
2958 case 5:
2959 case 6:
2960 case 7:
2961 copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
2962 break;
2963 }
2964 return;
2965 }
2966
2967 int
save_ia32_fpstate(struct task_struct * tsk,struct ia32_user_i387_struct * save)2968 save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
2969 {
2970 struct switch_stack *swp;
2971 struct pt_regs *ptp;
2972 int i, tos;
2973
2974 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
2975 return -EFAULT;
2976
2977 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
2978 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
2979 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
2980 __put_user(tsk->thread.fir, &save->fip);
2981 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
2982 __put_user(tsk->thread.fdr, &save->foo);
2983 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
2984
2985 /*
2986 * Stack frames start with 16-bytes of temp space
2987 */
2988 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
2989 ptp = ia64_task_regs(tsk);
2990 tos = (tsk->thread.fsr >> 11) & 7;
2991 for (i = 0; i < 8; i++)
2992 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
2993 return 0;
2994 }
2995
2996 static int
restore_ia32_fpstate(struct task_struct * tsk,struct ia32_user_i387_struct * save)2997 restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
2998 {
2999 struct switch_stack *swp;
3000 struct pt_regs *ptp;
3001 int i, tos;
3002 unsigned int fsrlo, fsrhi, num32;
3003
3004 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
3005 return(-EFAULT);
3006
3007 __get_user(num32, (unsigned int *)&save->cwd);
3008 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
3009 __get_user(fsrlo, (unsigned int *)&save->swd);
3010 __get_user(fsrhi, (unsigned int *)&save->twd);
3011 num32 = (fsrhi << 16) | fsrlo;
3012 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
3013 __get_user(num32, (unsigned int *)&save->fip);
3014 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
3015 __get_user(num32, (unsigned int *)&save->foo);
3016 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
3017
3018 /*
3019 * Stack frames start with 16-bytes of temp space
3020 */
3021 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
3022 ptp = ia64_task_regs(tsk);
3023 tos = (tsk->thread.fsr >> 11) & 7;
3024 for (i = 0; i < 8; i++)
3025 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
3026 return 0;
3027 }
3028
3029 int
save_ia32_fpxstate(struct task_struct * tsk,struct ia32_user_fxsr_struct * save)3030 save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
3031 {
3032 struct switch_stack *swp;
3033 struct pt_regs *ptp;
3034 int i, tos;
3035 unsigned long mxcsr=0;
3036 unsigned long num128[2];
3037
3038 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
3039 return -EFAULT;
3040
3041 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
3042 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
3043 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
3044 __put_user(tsk->thread.fir, &save->fip);
3045 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
3046 __put_user(tsk->thread.fdr, &save->foo);
3047 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
3048
3049 /*
3050 * Stack frames start with 16-bytes of temp space
3051 */
3052 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
3053 ptp = ia64_task_regs(tsk);
3054 tos = (tsk->thread.fsr >> 11) & 7;
3055 for (i = 0; i < 8; i++)
3056 put_fpreg(i, (struct _fpxreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
3057
3058 mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
3059 __put_user(mxcsr & 0xffff, &save->mxcsr);
3060 for (i = 0; i < 8; i++) {
3061 memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
3062 memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
3063 copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
3064 }
3065 return 0;
3066 }
3067
3068 static int
restore_ia32_fpxstate(struct task_struct * tsk,struct ia32_user_fxsr_struct * save)3069 restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
3070 {
3071 struct switch_stack *swp;
3072 struct pt_regs *ptp;
3073 int i, tos;
3074 unsigned int fsrlo, fsrhi, num32;
3075 int mxcsr;
3076 unsigned long num64;
3077 unsigned long num128[2];
3078
3079 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
3080 return(-EFAULT);
3081
3082 __get_user(num32, (unsigned int *)&save->cwd);
3083 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
3084 __get_user(fsrlo, (unsigned int *)&save->swd);
3085 __get_user(fsrhi, (unsigned int *)&save->twd);
3086 num32 = (fsrhi << 16) | fsrlo;
3087 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
3088 __get_user(num32, (unsigned int *)&save->fip);
3089 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
3090 __get_user(num32, (unsigned int *)&save->foo);
3091 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
3092
3093 /*
3094 * Stack frames start with 16-bytes of temp space
3095 */
3096 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
3097 ptp = ia64_task_regs(tsk);
3098 tos = (tsk->thread.fsr >> 11) & 7;
3099 for (i = 0; i < 8; i++)
3100 get_fpreg(i, (struct _fpxreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
3101
3102 __get_user(mxcsr, (unsigned int *)&save->mxcsr);
3103 num64 = mxcsr & 0xff10;
3104 tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000)) | (num64<<32);
3105 num64 = mxcsr & 0x3f;
3106 tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000)) | (num64<<32);
3107
3108 for (i = 0; i < 8; i++) {
3109 copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
3110 memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
3111 memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
3112 }
3113 return 0;
3114 }
3115
3116 extern asmlinkage long sys_ptrace (long, pid_t, unsigned long, unsigned long, long, long, long,
3117 long, long);
3118
3119 /*
3120 * Note that the IA32 version of `ptrace' calls the IA64 routine for
3121 * many of the requests. This will only work for requests that do
3122 * not need access to the calling processes `pt_regs' which is located
3123 * at the address of `stack'. Once we call the IA64 `sys_ptrace' then
3124 * the address of `stack' will not be the address of the `pt_regs'.
3125 */
3126 asmlinkage long
sys32_ptrace(int request,pid_t pid,unsigned int addr,unsigned int data,long arg4,long arg5,long arg6,long arg7,long stack)3127 sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
3128 long arg4, long arg5, long arg6, long arg7, long stack)
3129 {
3130 struct pt_regs *regs = (struct pt_regs *) &stack;
3131 struct task_struct *child;
3132 unsigned int value, tmp;
3133 long i, ret;
3134
3135 lock_kernel();
3136 if (request == PTRACE_TRACEME) {
3137 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
3138 goto out;
3139 }
3140
3141 ret = -ESRCH;
3142 read_lock(&tasklist_lock);
3143 child = find_task_by_pid(pid);
3144 if (child)
3145 get_task_struct(child);
3146 read_unlock(&tasklist_lock);
3147 if (!child)
3148 goto out;
3149 ret = -EPERM;
3150 if (pid == 1) /* no messing around with init! */
3151 goto out_tsk;
3152
3153 if (request == PTRACE_ATTACH) {
3154 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
3155 goto out_tsk;
3156 }
3157
3158 ret = ptrace_check_attach(child, request == PTRACE_KILL);
3159 if (ret < 0)
3160 goto out_tsk;
3161
3162 switch (request) {
3163 case PTRACE_PEEKTEXT:
3164 case PTRACE_PEEKDATA: /* read word at location addr */
3165 ret = ia32_peek(regs, child, addr, &value);
3166 if (ret == 0)
3167 ret = put_user(value, (unsigned int *) A(data));
3168 else
3169 ret = -EIO;
3170 goto out_tsk;
3171
3172 case PTRACE_POKETEXT:
3173 case PTRACE_POKEDATA: /* write the word at location addr */
3174 ret = ia32_poke(regs, child, addr, data);
3175 goto out_tsk;
3176
3177 case PTRACE_PEEKUSR: /* read word at addr in USER area */
3178 ret = -EIO;
3179 if ((addr & 3) || addr > 17*sizeof(int))
3180 break;
3181
3182 tmp = getreg(child, addr);
3183 if (!put_user(tmp, (unsigned int *) A(data)))
3184 ret = 0;
3185 break;
3186
3187 case PTRACE_POKEUSR: /* write word at addr in USER area */
3188 ret = -EIO;
3189 if ((addr & 3) || addr > 17*sizeof(int))
3190 break;
3191
3192 putreg(child, addr, data);
3193 ret = 0;
3194 break;
3195
3196 case IA32_PTRACE_GETREGS:
3197 if (!access_ok(VERIFY_WRITE, (int *) A(data), 17*sizeof(int))) {
3198 ret = -EIO;
3199 break;
3200 }
3201 for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
3202 put_user(getreg(child, i), (unsigned int *) A(data));
3203 data += sizeof(int);
3204 }
3205 ret = 0;
3206 break;
3207
3208 case IA32_PTRACE_SETREGS:
3209 if (!access_ok(VERIFY_READ, (int *) A(data), 17*sizeof(int))) {
3210 ret = -EIO;
3211 break;
3212 }
3213 for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
3214 get_user(tmp, (unsigned int *) A(data));
3215 putreg(child, i, tmp);
3216 data += sizeof(int);
3217 }
3218 ret = 0;
3219 break;
3220
3221 case IA32_PTRACE_GETFPREGS:
3222 ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
3223 break;
3224
3225 case IA32_PTRACE_GETFPXREGS:
3226 ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
3227 break;
3228
3229 case IA32_PTRACE_SETFPREGS:
3230 ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
3231 break;
3232
3233 case IA32_PTRACE_SETFPXREGS:
3234 ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
3235 break;
3236
3237 case PTRACE_SYSCALL: /* continue, stop after next syscall */
3238 case PTRACE_CONT: /* restart after signal. */
3239 case PTRACE_KILL:
3240 case PTRACE_SINGLESTEP: /* execute chile for one instruction */
3241 case PTRACE_DETACH: /* detach a process */
3242 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
3243 break;
3244
3245 default:
3246 ret = -EIO;
3247 break;
3248
3249 }
3250 out_tsk:
3251 free_task_struct(child);
3252 out:
3253 unlock_kernel();
3254 return ret;
3255 }
3256
3257 static inline int
get_flock32(struct flock * kfl,struct flock32 * ufl)3258 get_flock32(struct flock *kfl, struct flock32 *ufl)
3259 {
3260 int err;
3261
3262 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)))
3263 return -EFAULT;
3264
3265 err = __get_user(kfl->l_type, &ufl->l_type);
3266 err |= __get_user(kfl->l_whence, &ufl->l_whence);
3267 err |= __get_user(kfl->l_start, &ufl->l_start);
3268 err |= __get_user(kfl->l_len, &ufl->l_len);
3269 err |= __get_user(kfl->l_pid, &ufl->l_pid);
3270 return err;
3271 }
3272
3273 static inline int
put_flock32(struct flock * kfl,struct flock32 * ufl)3274 put_flock32(struct flock *kfl, struct flock32 *ufl)
3275 {
3276 int err;
3277
3278 if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)))
3279 return -EFAULT;
3280
3281 err = __put_user(kfl->l_type, &ufl->l_type);
3282 err |= __put_user(kfl->l_whence, &ufl->l_whence);
3283 err |= __put_user(kfl->l_start, &ufl->l_start);
3284 err |= __put_user(kfl->l_len, &ufl->l_len);
3285 err |= __put_user(kfl->l_pid, &ufl->l_pid);
3286 return err;
3287 }
3288
3289 extern asmlinkage long sys_fcntl (unsigned int fd, unsigned int cmd, unsigned long arg);
3290
3291 asmlinkage long
sys32_fcntl(unsigned int fd,unsigned int cmd,unsigned int arg)3292 sys32_fcntl (unsigned int fd, unsigned int cmd, unsigned int arg)
3293 {
3294 mm_segment_t old_fs;
3295 struct flock f;
3296 long ret;
3297
3298 switch (cmd) {
3299 case F_GETLK:
3300 case F_SETLK:
3301 case F_SETLKW:
3302 if (get_flock32(&f, (struct flock32 *) A(arg)))
3303 return -EFAULT;
3304 old_fs = get_fs();
3305 set_fs(KERNEL_DS);
3306 ret = sys_fcntl(fd, cmd, (unsigned long) &f);
3307 set_fs(old_fs);
3308 if (cmd == F_GETLK && put_flock32(&f, (struct flock32 *) A(arg)))
3309 return -EFAULT;
3310 return ret;
3311
3312 default:
3313 /*
3314 * `sys_fcntl' lies about arg, for the F_SETOWN
3315 * sub-function arg can have a negative value.
3316 */
3317 return sys_fcntl(fd, cmd, arg);
3318 }
3319 }
3320
3321 asmlinkage long sys_ni_syscall(void);
3322
3323 asmlinkage long
sys32_ni_syscall(int dummy0,int dummy1,int dummy2,int dummy3,int dummy4,int dummy5,int dummy6,int dummy7,int stack)3324 sys32_ni_syscall (int dummy0, int dummy1, int dummy2, int dummy3, int dummy4, int dummy5,
3325 int dummy6, int dummy7, int stack)
3326 {
3327 struct pt_regs *regs = (struct pt_regs *)&stack;
3328
3329 printk(KERN_WARNING "IA32 syscall #%d issued, maybe we should implement it\n",
3330 (int)regs->r1);
3331 return(sys_ni_syscall());
3332 }
3333
3334 /*
3335 * The IA64 maps 4 I/O ports for each 4K page
3336 */
3337 #define IOLEN ((65536 / 4) * 4096)
3338
3339 asmlinkage long
sys32_iopl(int level)3340 sys32_iopl (int level)
3341 {
3342 extern unsigned long ia64_iobase;
3343 int fd;
3344 struct file * file;
3345 unsigned int old;
3346 unsigned long addr;
3347 mm_segment_t old_fs = get_fs ();
3348
3349 if (level != 3)
3350 return(-EINVAL);
3351 /* Trying to gain more privileges? */
3352 asm volatile ("mov %0=ar.eflag ;;" : "=r"(old));
3353 if (level > ((old >> 12) & 3)) {
3354 if (!capable(CAP_SYS_RAWIO))
3355 return -EPERM;
3356 }
3357 set_fs(KERNEL_DS);
3358 fd = sys_open("/dev/mem", O_SYNC | O_RDWR, 0);
3359 set_fs(old_fs);
3360 if (fd < 0)
3361 return fd;
3362 file = fget(fd);
3363 if (file == NULL) {
3364 sys_close(fd);
3365 return(-EFAULT);
3366 }
3367
3368 down_write(¤t->mm->mmap_sem);
3369 addr = do_mmap_pgoff(file, IA32_IOBASE,
3370 IOLEN, PROT_READ|PROT_WRITE, MAP_SHARED,
3371 (ia64_iobase & ~PAGE_OFFSET) >> PAGE_SHIFT);
3372 up_write(¤t->mm->mmap_sem);
3373
3374 if (addr >= 0) {
3375 old = (old & ~0x3000) | (level << 12);
3376 asm volatile ("mov ar.eflag=%0;;" :: "r"(old));
3377 }
3378
3379 fput(file);
3380 sys_close(fd);
3381 return 0;
3382 }
3383
3384 asmlinkage long
sys32_ioperm(unsigned int from,unsigned int num,int on)3385 sys32_ioperm (unsigned int from, unsigned int num, int on)
3386 {
3387
3388 /*
3389 * Since IA64 doesn't have permission bits we'd have to go to
3390 * a lot of trouble to simulate them in software. There's
3391 * no point, only trusted programs can make this call so we'll
3392 * just turn it into an iopl call and let the process have
3393 * access to all I/O ports.
3394 *
3395 * XXX proper ioperm() support should be emulated by
3396 * manipulating the page protections...
3397 */
3398 return sys32_iopl(3);
3399 }
3400
3401 typedef struct {
3402 unsigned int ss_sp;
3403 unsigned int ss_flags;
3404 unsigned int ss_size;
3405 } ia32_stack_t;
3406
3407 asmlinkage long
sys32_sigaltstack(ia32_stack_t * uss32,ia32_stack_t * uoss32,long arg2,long arg3,long arg4,long arg5,long arg6,long arg7,long stack)3408 sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
3409 long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack)
3410 {
3411 struct pt_regs *pt = (struct pt_regs *) &stack;
3412 stack_t uss, uoss;
3413 ia32_stack_t buf32;
3414 int ret;
3415 mm_segment_t old_fs = get_fs();
3416
3417 if (uss32)
3418 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
3419 return -EFAULT;
3420 uss.ss_sp = (void *) (long) buf32.ss_sp;
3421 uss.ss_flags = buf32.ss_flags;
3422 /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
3423 check and set it to the user requested value later */
3424 if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
3425 ret = -ENOMEM;
3426 goto out;
3427 }
3428 uss.ss_size = MINSIGSTKSZ;
3429 set_fs(KERNEL_DS);
3430 ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
3431 current->sas_ss_size = buf32.ss_size;
3432 set_fs(old_fs);
3433 out:
3434 if (ret < 0)
3435 return(ret);
3436 if (uoss32) {
3437 buf32.ss_sp = (long) uoss.ss_sp;
3438 buf32.ss_flags = uoss.ss_flags;
3439 buf32.ss_size = uoss.ss_size;
3440 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
3441 return -EFAULT;
3442 }
3443 return ret;
3444 }
3445
3446 asmlinkage int
sys32_pause(void)3447 sys32_pause (void)
3448 {
3449 current->state = TASK_INTERRUPTIBLE;
3450 schedule();
3451 return -ERESTARTNOHAND;
3452 }
3453
3454 asmlinkage long sys_msync (unsigned long start, size_t len, int flags);
3455
3456 asmlinkage int
sys32_msync(unsigned int start,unsigned int len,int flags)3457 sys32_msync (unsigned int start, unsigned int len, int flags)
3458 {
3459 unsigned int addr;
3460
3461 if (OFFSET4K(start))
3462 return -EINVAL;
3463 addr = PAGE_START(start);
3464 return sys_msync(addr, len + (start - addr), flags);
3465 }
3466
3467 struct sysctl32 {
3468 unsigned int name;
3469 int nlen;
3470 unsigned int oldval;
3471 unsigned int oldlenp;
3472 unsigned int newval;
3473 unsigned int newlen;
3474 unsigned int __unused[4];
3475 };
3476
3477 extern asmlinkage long sys_sysctl(struct __sysctl_args *args);
3478
3479 asmlinkage long
sys32_sysctl(struct sysctl32 * args)3480 sys32_sysctl (struct sysctl32 *args)
3481 {
3482 #ifdef CONFIG_SYSCTL
3483 struct sysctl32 a32;
3484 mm_segment_t old_fs = get_fs ();
3485 void *oldvalp, *newvalp;
3486 size_t oldlen;
3487 int *namep;
3488 long ret;
3489
3490 if (copy_from_user(&a32, args, sizeof(a32)))
3491 return -EFAULT;
3492
3493 /*
3494 * We need to pre-validate these because we have to disable address checking
3495 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
3496 * user specifying bad addresses here. Well, since we're dealing with 32 bit
3497 * addresses, we KNOW that access_ok() will always succeed, so this is an
3498 * expensive NOP, but so what...
3499 */
3500 namep = (int *) A(a32.name);
3501 oldvalp = (void *) A(a32.oldval);
3502 newvalp = (void *) A(a32.newval);
3503
3504 if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
3505 || !access_ok(VERIFY_WRITE, namep, 0)
3506 || !access_ok(VERIFY_WRITE, oldvalp, 0)
3507 || !access_ok(VERIFY_WRITE, newvalp, 0))
3508 return -EFAULT;
3509
3510 set_fs(KERNEL_DS);
3511 lock_kernel();
3512 ret = do_sysctl(namep, a32.nlen, oldvalp, &oldlen, newvalp, (size_t) a32.newlen);
3513 unlock_kernel();
3514 set_fs(old_fs);
3515
3516 if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
3517 return -EFAULT;
3518
3519 return ret;
3520 #else
3521 return -ENOSYS;
3522 #endif
3523 }
3524
3525 asmlinkage long
sys32_newuname(struct new_utsname * name)3526 sys32_newuname (struct new_utsname *name)
3527 {
3528 extern asmlinkage long sys_newuname(struct new_utsname * name);
3529 int ret = sys_newuname(name);
3530
3531 if (!ret)
3532 if (copy_to_user(name->machine, "i686\0\0\0", 8))
3533 ret = -EFAULT;
3534 return ret;
3535 }
3536
3537 extern asmlinkage long sys_getresuid (uid_t *ruid, uid_t *euid, uid_t *suid);
3538
3539 asmlinkage long
sys32_getresuid16(u16 * ruid,u16 * euid,u16 * suid)3540 sys32_getresuid16 (u16 *ruid, u16 *euid, u16 *suid)
3541 {
3542 uid_t a, b, c;
3543 int ret;
3544 mm_segment_t old_fs = get_fs();
3545
3546 set_fs(KERNEL_DS);
3547 ret = sys_getresuid(&a, &b, &c);
3548 set_fs(old_fs);
3549
3550 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
3551 return -EFAULT;
3552 return ret;
3553 }
3554
3555 extern asmlinkage long sys_getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid);
3556
3557 asmlinkage long
sys32_getresgid16(u16 * rgid,u16 * egid,u16 * sgid)3558 sys32_getresgid16 (u16 *rgid, u16 *egid, u16 *sgid)
3559 {
3560 gid_t a, b, c;
3561 int ret;
3562 mm_segment_t old_fs = get_fs();
3563
3564 set_fs(KERNEL_DS);
3565 ret = sys_getresgid(&a, &b, &c);
3566 set_fs(old_fs);
3567
3568 if (ret)
3569 return ret;
3570
3571 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
3572 }
3573
3574 asmlinkage long
sys32_lseek(unsigned int fd,int offset,unsigned int whence)3575 sys32_lseek (unsigned int fd, int offset, unsigned int whence)
3576 {
3577 extern off_t sys_lseek (unsigned int fd, off_t offset, unsigned int origin);
3578
3579 /* Sign-extension of "offset" is important here... */
3580 return sys_lseek(fd, offset, whence);
3581 }
3582
3583 extern asmlinkage long sys_getgroups (int gidsetsize, gid_t *grouplist);
3584
3585 asmlinkage long
sys32_getgroups16(int gidsetsize,short * grouplist)3586 sys32_getgroups16 (int gidsetsize, short *grouplist)
3587 {
3588 mm_segment_t old_fs = get_fs();
3589 gid_t gl[NGROUPS];
3590 int ret, i;
3591
3592 set_fs(KERNEL_DS);
3593 ret = sys_getgroups(gidsetsize, gl);
3594 set_fs(old_fs);
3595
3596 if (gidsetsize && ret > 0 && ret <= NGROUPS)
3597 for (i = 0; i < ret; i++, grouplist++)
3598 if (put_user(gl[i], grouplist))
3599 return -EFAULT;
3600 return ret;
3601 }
3602
3603 extern asmlinkage long sys_setgroups (int gidsetsize, gid_t *grouplist);
3604
3605 asmlinkage long
sys32_setgroups16(int gidsetsize,short * grouplist)3606 sys32_setgroups16 (int gidsetsize, short *grouplist)
3607 {
3608 mm_segment_t old_fs = get_fs();
3609 gid_t gl[NGROUPS];
3610 int ret, i;
3611
3612 if ((unsigned) gidsetsize > NGROUPS)
3613 return -EINVAL;
3614 for (i = 0; i < gidsetsize; i++, grouplist++)
3615 if (get_user(gl[i], grouplist))
3616 return -EFAULT;
3617 set_fs(KERNEL_DS);
3618 ret = sys_setgroups(gidsetsize, gl);
3619 set_fs(old_fs);
3620 return ret;
3621 }
3622
3623 /*
3624 * Unfortunately, the x86 compiler aligns variables of type "long long" to a 4 byte boundary
3625 * only, which means that the x86 version of "struct flock64" doesn't match the ia64 version
3626 * of struct flock.
3627 */
3628
3629 static inline long
ia32_put_flock(struct flock * l,unsigned long addr)3630 ia32_put_flock (struct flock *l, unsigned long addr)
3631 {
3632 return (put_user(l->l_type, (short *) addr)
3633 | put_user(l->l_whence, (short *) (addr + 2))
3634 | put_user(l->l_start, (long *) (addr + 4))
3635 | put_user(l->l_len, (long *) (addr + 12))
3636 | put_user(l->l_pid, (int *) (addr + 20)));
3637 }
3638
3639 static inline long
ia32_get_flock(struct flock * l,unsigned long addr)3640 ia32_get_flock (struct flock *l, unsigned long addr)
3641 {
3642 unsigned int start_lo, start_hi, len_lo, len_hi;
3643 int err = (get_user(l->l_type, (short *) addr)
3644 | get_user(l->l_whence, (short *) (addr + 2))
3645 | get_user(start_lo, (int *) (addr + 4))
3646 | get_user(start_hi, (int *) (addr + 8))
3647 | get_user(len_lo, (int *) (addr + 12))
3648 | get_user(len_hi, (int *) (addr + 16))
3649 | get_user(l->l_pid, (int *) (addr + 20)));
3650 l->l_start = ((unsigned long) start_hi << 32) | start_lo;
3651 l->l_len = ((unsigned long) len_hi << 32) | len_lo;
3652 return err;
3653 }
3654
3655 asmlinkage long
sys32_fcntl64(unsigned int fd,unsigned int cmd,unsigned int arg)3656 sys32_fcntl64 (unsigned int fd, unsigned int cmd, unsigned int arg)
3657 {
3658 mm_segment_t old_fs;
3659 struct flock f;
3660 long ret;
3661
3662 switch (cmd) {
3663 case F_GETLK64:
3664 case F_SETLK64:
3665 case F_SETLKW64:
3666 if (ia32_get_flock(&f, arg))
3667 return -EFAULT;
3668 old_fs = get_fs();
3669 set_fs(KERNEL_DS);
3670 ret = sys_fcntl(fd, (cmd == F_GETLK64) ? F_GETLK :
3671 ((cmd == F_SETLK64) ? F_SETLK : F_SETLKW),
3672 (unsigned long) &f);
3673 set_fs(old_fs);
3674 if (cmd == F_GETLK64 && ia32_put_flock(&f, arg))
3675 return -EFAULT;
3676 break;
3677
3678 default:
3679 ret = sys32_fcntl(fd, cmd, arg);
3680 break;
3681 }
3682 return ret;
3683 }
3684
3685 asmlinkage long
sys32_truncate64(unsigned int path,unsigned int len_lo,unsigned int len_hi)3686 sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
3687 {
3688 extern asmlinkage long sys_truncate (const char *path, unsigned long length);
3689
3690 return sys_truncate((const char *) A(path), ((unsigned long) len_hi << 32) | len_lo);
3691 }
3692
3693 asmlinkage long
sys32_ftruncate64(int fd,unsigned int len_lo,unsigned int len_hi)3694 sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
3695 {
3696 extern asmlinkage long sys_ftruncate (int fd, unsigned long length);
3697
3698 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
3699 }
3700
3701 static int
putstat64(struct stat64 * ubuf,struct stat * kbuf)3702 putstat64 (struct stat64 *ubuf, struct stat *kbuf)
3703 {
3704 int err;
3705
3706 if (clear_user(ubuf, sizeof(*ubuf)))
3707 return 1;
3708
3709 err = __put_user(kbuf->st_dev, &ubuf->st_dev);
3710 err |= __put_user(kbuf->st_ino, &ubuf->__st_ino);
3711 err |= __put_user(kbuf->st_ino, &ubuf->st_ino_lo);
3712 err |= __put_user(kbuf->st_ino >> 32, &ubuf->st_ino_hi);
3713 err |= __put_user(kbuf->st_mode, &ubuf->st_mode);
3714 err |= __put_user(kbuf->st_nlink, &ubuf->st_nlink);
3715 err |= __put_user(kbuf->st_uid, &ubuf->st_uid);
3716 err |= __put_user(kbuf->st_gid, &ubuf->st_gid);
3717 err |= __put_user(kbuf->st_rdev, &ubuf->st_rdev);
3718 err |= __put_user(kbuf->st_size, &ubuf->st_size_lo);
3719 err |= __put_user((kbuf->st_size >> 32), &ubuf->st_size_hi);
3720 err |= __put_user(kbuf->st_atime, &ubuf->st_atime);
3721 err |= __put_user(kbuf->st_mtime, &ubuf->st_mtime);
3722 err |= __put_user(kbuf->st_ctime, &ubuf->st_ctime);
3723 err |= __put_user(kbuf->st_blksize, &ubuf->st_blksize);
3724 err |= __put_user(kbuf->st_blocks, &ubuf->st_blocks);
3725 return err;
3726 }
3727
3728 asmlinkage long
sys32_stat64(char * filename,struct stat64 * statbuf)3729 sys32_stat64 (char *filename, struct stat64 *statbuf)
3730 {
3731 char *name;
3732 mm_segment_t old_fs = get_fs();
3733 struct stat s;
3734 long ret;
3735
3736 name = getname(filename);
3737 if (IS_ERR(name))
3738 return PTR_ERR(name);
3739 set_fs(KERNEL_DS);
3740 ret = sys_newstat(name, &s);
3741 set_fs(old_fs);
3742 putname(name);
3743 if (putstat64(statbuf, &s))
3744 return -EFAULT;
3745 return ret;
3746 }
3747
3748 asmlinkage long
sys32_lstat64(char * filename,struct stat64 * statbuf)3749 sys32_lstat64 (char *filename, struct stat64 *statbuf)
3750 {
3751 char *name;
3752 mm_segment_t old_fs = get_fs();
3753 struct stat s;
3754 long ret;
3755
3756 name = getname(filename);
3757 if (IS_ERR(name))
3758 return PTR_ERR(name);
3759 set_fs(KERNEL_DS);
3760 ret = sys_newlstat(name, &s);
3761 set_fs(old_fs);
3762 putname(name);
3763 if (putstat64(statbuf, &s))
3764 return -EFAULT;
3765 return ret;
3766 }
3767
3768 asmlinkage long
sys32_fstat64(unsigned int fd,struct stat64 * statbuf)3769 sys32_fstat64 (unsigned int fd, struct stat64 *statbuf)
3770 {
3771 mm_segment_t old_fs = get_fs();
3772 struct stat s;
3773 long ret;
3774
3775 set_fs(KERNEL_DS);
3776 ret = sys_newfstat(fd, &s);
3777 set_fs(old_fs);
3778 if (putstat64(statbuf, &s))
3779 return -EFAULT;
3780 return ret;
3781 }
3782
3783 asmlinkage long
sys32_sigpending(unsigned int * set)3784 sys32_sigpending (unsigned int *set)
3785 {
3786 return do_sigpending(set, sizeof(*set));
3787 }
3788
3789 struct sysinfo32 {
3790 s32 uptime;
3791 u32 loads[3];
3792 u32 totalram;
3793 u32 freeram;
3794 u32 sharedram;
3795 u32 bufferram;
3796 u32 totalswap;
3797 u32 freeswap;
3798 u16 procs;
3799 u16 pad;
3800 u32 totalhigh;
3801 u32 freehigh;
3802 u32 mem_unit;
3803 char _f[8];
3804 };
3805
3806 asmlinkage long
sys32_sysinfo(struct sysinfo32 * info)3807 sys32_sysinfo (struct sysinfo32 *info)
3808 {
3809 extern asmlinkage long sys_sysinfo (struct sysinfo *);
3810 struct sysinfo s;
3811 long ret, err;
3812 int bitcount = 0;
3813 mm_segment_t old_fs = get_fs();
3814
3815 set_fs(KERNEL_DS);
3816 ret = sys_sysinfo(&s);
3817 set_fs(old_fs);
3818 /* Check to see if any memory value is too large for 32-bit and
3819 * scale down if needed.
3820 */
3821 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
3822 while (s.mem_unit < PAGE_SIZE) {
3823 s.mem_unit <<= 1;
3824 bitcount++;
3825 }
3826 s.totalram >>= bitcount;
3827 s.freeram >>= bitcount;
3828 s.sharedram >>= bitcount;
3829 s.bufferram >>= bitcount;
3830 s.totalswap >>= bitcount;
3831 s.freeswap >>= bitcount;
3832 s.totalhigh >>= bitcount;
3833 s.freehigh >>= bitcount;
3834 }
3835
3836 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
3837 return -EFAULT;
3838
3839 err = __put_user(s.uptime, &info->uptime);
3840 err |= __put_user(s.loads[0], &info->loads[0]);
3841 err |= __put_user(s.loads[1], &info->loads[1]);
3842 err |= __put_user(s.loads[2], &info->loads[2]);
3843 err |= __put_user(s.totalram, &info->totalram);
3844 err |= __put_user(s.freeram, &info->freeram);
3845 err |= __put_user(s.sharedram, &info->sharedram);
3846 err |= __put_user(s.bufferram, &info->bufferram);
3847 err |= __put_user(s.totalswap, &info->totalswap);
3848 err |= __put_user(s.freeswap, &info->freeswap);
3849 err |= __put_user(s.procs, &info->procs);
3850 err |= __put_user(s.totalhigh, &info->totalhigh);
3851 err |= __put_user(s.freehigh, &info->freehigh);
3852 err |= __put_user(s.mem_unit, &info->mem_unit);
3853 if (err)
3854 return -EFAULT;
3855 return ret;
3856 }
3857
3858 /* In order to reduce some races, while at the same time doing additional
3859 * checking and hopefully speeding things up, we copy filenames to the
3860 * kernel data space before using them..
3861 *
3862 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
3863 */
3864 static inline int
do_getname32(const char * filename,char * page)3865 do_getname32 (const char *filename, char *page)
3866 {
3867 int retval;
3868
3869 /* 32bit pointer will be always far below TASK_SIZE :)) */
3870 retval = strncpy_from_user((char *)page, (char *)filename, PAGE_SIZE);
3871 if (retval > 0) {
3872 if (retval < PAGE_SIZE)
3873 return 0;
3874 return -ENAMETOOLONG;
3875 } else if (!retval)
3876 retval = -ENOENT;
3877 return retval;
3878 }
3879
3880 static char *
getname32(const char * filename)3881 getname32 (const char *filename)
3882 {
3883 char *tmp, *result;
3884
3885 result = ERR_PTR(-ENOMEM);
3886 tmp = (char *)__get_free_page(GFP_KERNEL);
3887 if (tmp) {
3888 int retval = do_getname32(filename, tmp);
3889
3890 result = tmp;
3891 if (retval < 0) {
3892 putname(tmp);
3893 result = ERR_PTR(retval);
3894 }
3895 }
3896 return result;
3897 }
3898
3899 struct user_dqblk32 {
3900 __u32 dqb_bhardlimit;
3901 __u32 dqb_bsoftlimit;
3902 __u32 dqb_curblocks;
3903 __u32 dqb_ihardlimit;
3904 __u32 dqb_isoftlimit;
3905 __u32 dqb_curinodes;
3906 __kernel_time_t32 dqb_btime;
3907 __kernel_time_t32 dqb_itime;
3908 };
3909
3910 asmlinkage long
sys32_quotactl(int cmd,unsigned int special,int id,caddr_t addr)3911 sys32_quotactl(int cmd, unsigned int special, int id, caddr_t addr)
3912 {
3913 extern asmlinkage long sys_quotactl (int, const char *, int, caddr_t);
3914 int cmds = cmd >> SUBCMDSHIFT;
3915 mm_segment_t old_fs;
3916 struct v1c_mem_dqblk d;
3917 char *spec;
3918 long err;
3919
3920 switch (cmds) {
3921 case Q_V1_GETQUOTA:
3922 break;
3923 case Q_V1_SETQUOTA:
3924 case Q_V1_SETUSE:
3925 case Q_V1_SETQLIM:
3926 if (copy_from_user(&d, addr, sizeof(struct user_dqblk32)))
3927 return -EFAULT;
3928 d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime;
3929 d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime;
3930 break;
3931 default:
3932 return sys_quotactl(cmd, (void *)A(special), id, addr);
3933 }
3934 spec = getname32((void *) A(special));
3935 err = PTR_ERR(spec);
3936 if (IS_ERR(spec))
3937 return err;
3938 old_fs = get_fs();
3939 set_fs(KERNEL_DS);
3940 err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d);
3941 set_fs(old_fs);
3942 putname(spec);
3943 if (cmds == Q_V1_GETQUOTA) {
3944 __kernel_time_t b = d.dqb_btime, i = d.dqb_itime;
3945 ((struct user_dqblk32 *)&d)->dqb_itime = i;
3946 ((struct user_dqblk32 *)&d)->dqb_btime = b;
3947 if (copy_to_user(addr, &d, sizeof(struct user_dqblk32)))
3948 return -EFAULT;
3949 }
3950 return err;
3951 }
3952
3953 asmlinkage long
sys32_sched_rr_get_interval(pid_t pid,struct timespec32 * interval)3954 sys32_sched_rr_get_interval (pid_t pid, struct timespec32 *interval)
3955 {
3956 extern asmlinkage long sys_sched_rr_get_interval (pid_t, struct timespec *);
3957 mm_segment_t old_fs = get_fs();
3958 struct timespec t;
3959 long ret;
3960
3961 set_fs(KERNEL_DS);
3962 ret = sys_sched_rr_get_interval(pid, &t);
3963 set_fs(old_fs);
3964 if (put_user (t.tv_sec, &interval->tv_sec) || put_user (t.tv_nsec, &interval->tv_nsec))
3965 return -EFAULT;
3966 return ret;
3967 }
3968
3969 asmlinkage long
sys32_pread(unsigned int fd,void * buf,unsigned int count,u32 pos_lo,u32 pos_hi)3970 sys32_pread (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
3971 {
3972 extern asmlinkage long sys_pread (unsigned int, char *, size_t, loff_t);
3973 return sys_pread(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
3974 }
3975
3976 asmlinkage long
sys32_pwrite(unsigned int fd,void * buf,unsigned int count,u32 pos_lo,u32 pos_hi)3977 sys32_pwrite (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
3978 {
3979 extern asmlinkage long sys_pwrite (unsigned int, const char *, size_t, loff_t);
3980 return sys_pwrite(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
3981 }
3982
3983 asmlinkage long
sys32_sendfile(int out_fd,int in_fd,int * offset,unsigned int count)3984 sys32_sendfile (int out_fd, int in_fd, int *offset, unsigned int count)
3985 {
3986 extern asmlinkage long sys_sendfile (int, int, off_t *, size_t);
3987 mm_segment_t old_fs = get_fs();
3988 long ret;
3989 off_t of;
3990
3991 if (offset && get_user(of, offset))
3992 return -EFAULT;
3993
3994 set_fs(KERNEL_DS);
3995 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
3996 set_fs(old_fs);
3997
3998 if (!ret && offset && put_user(of, offset))
3999 return -EFAULT;
4000
4001 return ret;
4002 }
4003
4004 asmlinkage long
sys32_personality(unsigned int personality)4005 sys32_personality (unsigned int personality)
4006 {
4007 extern asmlinkage long sys_personality (unsigned long);
4008 long ret;
4009
4010 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
4011 personality = PER_LINUX32;
4012 ret = sys_personality(personality);
4013 if (ret == PER_LINUX32)
4014 ret = PER_LINUX;
4015 return ret;
4016 }
4017
4018 asmlinkage unsigned long
sys32_brk(unsigned int brk)4019 sys32_brk (unsigned int brk)
4020 {
4021 unsigned long ret, obrk;
4022 struct mm_struct *mm = current->mm;
4023
4024 obrk = mm->brk;
4025 ret = sys_brk(brk);
4026 if (ret < obrk)
4027 clear_user((void *) ret, PAGE_ALIGN(ret) - ret);
4028 return ret;
4029 }
4030
4031 /*
4032 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
4033 */
4034 asmlinkage long
sys32_open(const char * filename,int flags,int mode)4035 sys32_open (const char * filename, int flags, int mode)
4036 {
4037 char * tmp;
4038 int fd, error;
4039
4040 tmp = getname(filename);
4041 fd = PTR_ERR(tmp);
4042 if (!IS_ERR(tmp)) {
4043 fd = get_unused_fd();
4044 if (fd >= 0) {
4045 struct file *f = filp_open(tmp, flags, mode);
4046 error = PTR_ERR(f);
4047 if (IS_ERR(f))
4048 goto out_error;
4049 fd_install(fd, f);
4050 }
4051 out:
4052 putname(tmp);
4053 }
4054 return fd;
4055
4056 out_error:
4057 put_unused_fd(fd);
4058 fd = error;
4059 goto out;
4060 }
4061
4062 #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
4063
4064 struct ncp_mount_data32 {
4065 int version;
4066 unsigned int ncp_fd;
4067 __kernel_uid_t32 mounted_uid;
4068 int wdog_pid;
4069 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
4070 unsigned int time_out;
4071 unsigned int retry_count;
4072 unsigned int flags;
4073 __kernel_uid_t32 uid;
4074 __kernel_gid_t32 gid;
4075 __kernel_mode_t32 file_mode;
4076 __kernel_mode_t32 dir_mode;
4077 };
4078
4079 static void *
do_ncp_super_data_conv(void * raw_data)4080 do_ncp_super_data_conv(void *raw_data)
4081 {
4082 struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
4083 struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
4084
4085 n->dir_mode = n32->dir_mode;
4086 n->file_mode = n32->file_mode;
4087 n->gid = n32->gid;
4088 n->uid = n32->uid;
4089 memmove (n->mounted_vol, n32->mounted_vol,
4090 (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
4091 n->wdog_pid = n32->wdog_pid;
4092 n->mounted_uid = n32->mounted_uid;
4093 return raw_data;
4094 }
4095
4096 struct smb_mount_data32 {
4097 int version;
4098 __kernel_uid_t32 mounted_uid;
4099 __kernel_uid_t32 uid;
4100 __kernel_gid_t32 gid;
4101 __kernel_mode_t32 file_mode;
4102 __kernel_mode_t32 dir_mode;
4103 };
4104
4105 static void *
do_smb_super_data_conv(void * raw_data)4106 do_smb_super_data_conv(void *raw_data)
4107 {
4108 struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
4109 struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
4110
4111 s->version = s32->version;
4112 s->mounted_uid = s32->mounted_uid;
4113 s->uid = s32->uid;
4114 s->gid = s32->gid;
4115 s->file_mode = s32->file_mode;
4116 s->dir_mode = s32->dir_mode;
4117 return raw_data;
4118 }
4119
4120 static int
copy_mount_stuff_to_kernel(const void * user,unsigned long * kernel)4121 copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel)
4122 {
4123 int i;
4124 unsigned long page;
4125 struct vm_area_struct *vma;
4126
4127 *kernel = 0;
4128 if(!user)
4129 return 0;
4130 vma = find_vma(current->mm, (unsigned long)user);
4131 if(!vma || (unsigned long)user < vma->vm_start)
4132 return -EFAULT;
4133 if(!(vma->vm_flags & VM_READ))
4134 return -EFAULT;
4135 i = vma->vm_end - (unsigned long) user;
4136 if(PAGE_SIZE <= (unsigned long) i)
4137 i = PAGE_SIZE - 1;
4138 if(!(page = __get_free_page(GFP_KERNEL)))
4139 return -ENOMEM;
4140 if(copy_from_user((void *) page, user, i)) {
4141 free_page(page);
4142 return -EFAULT;
4143 }
4144 *kernel = page;
4145 return 0;
4146 }
4147
4148 extern asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
4149 unsigned long new_flags, void *data);
4150
4151 #define SMBFS_NAME "smbfs"
4152 #define NCPFS_NAME "ncpfs"
4153
4154 asmlinkage long
sys32_mount(char * dev_name,char * dir_name,char * type,unsigned long new_flags,u32 data)4155 sys32_mount(char *dev_name, char *dir_name, char *type,
4156 unsigned long new_flags, u32 data)
4157 {
4158 unsigned long type_page;
4159 int err, is_smb, is_ncp;
4160
4161 if(!capable(CAP_SYS_ADMIN))
4162 return -EPERM;
4163 is_smb = is_ncp = 0;
4164 err = copy_mount_stuff_to_kernel((const void *)type, &type_page);
4165 if(err)
4166 return err;
4167 if(type_page) {
4168 is_smb = !strcmp((char *)type_page, SMBFS_NAME);
4169 is_ncp = !strcmp((char *)type_page, NCPFS_NAME);
4170 }
4171 if(!is_smb && !is_ncp) {
4172 if(type_page)
4173 free_page(type_page);
4174 return sys_mount(dev_name, dir_name, type, new_flags,
4175 (void *)AA(data));
4176 } else {
4177 unsigned long dev_page, dir_page, data_page;
4178
4179 err = copy_mount_stuff_to_kernel((const void *)dev_name,
4180 &dev_page);
4181 if(err)
4182 goto out;
4183 err = copy_mount_stuff_to_kernel((const void *)dir_name,
4184 &dir_page);
4185 if(err)
4186 goto dev_out;
4187 err = copy_mount_stuff_to_kernel((const void *)AA(data),
4188 &data_page);
4189 if(err)
4190 goto dir_out;
4191 if(is_ncp)
4192 do_ncp_super_data_conv((void *)data_page);
4193 else if(is_smb)
4194 do_smb_super_data_conv((void *)data_page);
4195 else
4196 panic("The problem is here...");
4197 err = do_mount((char *)dev_page, (char *)dir_page,
4198 (char *)type_page, new_flags,
4199 (void *)data_page);
4200 if(data_page)
4201 free_page(data_page);
4202 dir_out:
4203 if(dir_page)
4204 free_page(dir_page);
4205 dev_out:
4206 if(dev_page)
4207 free_page(dev_page);
4208 out:
4209 if(type_page)
4210 free_page(type_page);
4211 return err;
4212 }
4213 }
4214
4215 extern asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
4216
sys32_setreuid(__kernel_uid_t32 ruid,__kernel_uid_t32 euid)4217 asmlinkage long sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid)
4218 {
4219 uid_t sruid, seuid;
4220
4221 sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid);
4222 seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid);
4223 return sys_setreuid(sruid, seuid);
4224 }
4225
4226 extern asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
4227
4228 asmlinkage long
sys32_setresuid(__kernel_uid_t32 ruid,__kernel_uid_t32 euid,__kernel_uid_t32 suid)4229 sys32_setresuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid,
4230 __kernel_uid_t32 suid)
4231 {
4232 uid_t sruid, seuid, ssuid;
4233
4234 sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid);
4235 seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid);
4236 ssuid = (suid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)suid);
4237 return sys_setresuid(sruid, seuid, ssuid);
4238 }
4239
4240 extern asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
4241
4242 asmlinkage long
sys32_setregid(__kernel_gid_t32 rgid,__kernel_gid_t32 egid)4243 sys32_setregid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid)
4244 {
4245 gid_t srgid, segid;
4246
4247 srgid = (rgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)rgid);
4248 segid = (egid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)egid);
4249 return sys_setregid(srgid, segid);
4250 }
4251
4252 extern asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
4253
4254 asmlinkage long
sys32_setresgid(__kernel_gid_t32 rgid,__kernel_gid_t32 egid,__kernel_gid_t32 sgid)4255 sys32_setresgid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid,
4256 __kernel_gid_t32 sgid)
4257 {
4258 gid_t srgid, segid, ssgid;
4259
4260 srgid = (rgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)rgid);
4261 segid = (egid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)egid);
4262 ssgid = (sgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)sgid);
4263 return sys_setresgid(srgid, segid, ssgid);
4264 }
4265
4266 /* Stuff for NFS server syscalls... */
4267 struct nfsctl_svc32 {
4268 u16 svc32_port;
4269 s32 svc32_nthreads;
4270 };
4271
4272 struct nfsctl_client32 {
4273 s8 cl32_ident[NFSCLNT_IDMAX+1];
4274 s32 cl32_naddr;
4275 struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX];
4276 s32 cl32_fhkeytype;
4277 s32 cl32_fhkeylen;
4278 u8 cl32_fhkey[NFSCLNT_KEYMAX];
4279 };
4280
4281 struct nfsctl_export32 {
4282 s8 ex32_client[NFSCLNT_IDMAX+1];
4283 s8 ex32_path[NFS_MAXPATHLEN+1];
4284 __kernel_dev_t32 ex32_dev;
4285 __kernel_ino_t32 ex32_ino;
4286 s32 ex32_flags;
4287 __kernel_uid_t32 ex32_anon_uid;
4288 __kernel_gid_t32 ex32_anon_gid;
4289 };
4290
4291 struct nfsctl_uidmap32 {
4292 u32 ug32_ident; /* char * */
4293 __kernel_uid_t32 ug32_uidbase;
4294 s32 ug32_uidlen;
4295 u32 ug32_udimap; /* uid_t * */
4296 __kernel_uid_t32 ug32_gidbase;
4297 s32 ug32_gidlen;
4298 u32 ug32_gdimap; /* gid_t * */
4299 };
4300
4301 struct nfsctl_fhparm32 {
4302 struct sockaddr gf32_addr;
4303 __kernel_dev_t32 gf32_dev;
4304 __kernel_ino_t32 gf32_ino;
4305 s32 gf32_version;
4306 };
4307
4308 struct nfsctl_arg32 {
4309 s32 ca32_version; /* safeguard */
4310 union {
4311 struct nfsctl_svc32 u32_svc;
4312 struct nfsctl_client32 u32_client;
4313 struct nfsctl_export32 u32_export;
4314 struct nfsctl_uidmap32 u32_umap;
4315 struct nfsctl_fhparm32 u32_getfh;
4316 u32 u32_debug;
4317 } u;
4318 #define ca32_svc u.u32_svc
4319 #define ca32_client u.u32_client
4320 #define ca32_export u.u32_export
4321 #define ca32_umap u.u32_umap
4322 #define ca32_getfh u.u32_getfh
4323 #define ca32_authd u.u32_authd
4324 #define ca32_debug u.u32_debug
4325 };
4326
4327 union nfsctl_res32 {
4328 struct knfs_fh cr32_getfh;
4329 u32 cr32_debug;
4330 };
4331
4332 static int
nfs_svc32_trans(struct nfsctl_arg * karg,struct nfsctl_arg32 * arg32)4333 nfs_svc32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
4334 {
4335 int err;
4336
4337 err = __get_user(karg->ca_version, &arg32->ca32_version);
4338 err |= __get_user(karg->ca_svc.svc_port, &arg32->ca32_svc.svc32_port);
4339 err |= __get_user(karg->ca_svc.svc_nthreads,
4340 &arg32->ca32_svc.svc32_nthreads);
4341 return err;
4342 }
4343
4344 static int
nfs_clnt32_trans(struct nfsctl_arg * karg,struct nfsctl_arg32 * arg32)4345 nfs_clnt32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
4346 {
4347 int err;
4348
4349 err = __get_user(karg->ca_version, &arg32->ca32_version);
4350 err |= copy_from_user(&karg->ca_client.cl_ident[0],
4351 &arg32->ca32_client.cl32_ident[0],
4352 NFSCLNT_IDMAX);
4353 err |= __get_user(karg->ca_client.cl_naddr,
4354 &arg32->ca32_client.cl32_naddr);
4355 err |= copy_from_user(&karg->ca_client.cl_addrlist[0],
4356 &arg32->ca32_client.cl32_addrlist[0],
4357 (sizeof(struct in_addr) * NFSCLNT_ADDRMAX));
4358 err |= __get_user(karg->ca_client.cl_fhkeytype,
4359 &arg32->ca32_client.cl32_fhkeytype);
4360 err |= __get_user(karg->ca_client.cl_fhkeylen,
4361 &arg32->ca32_client.cl32_fhkeylen);
4362 err |= copy_from_user(&karg->ca_client.cl_fhkey[0],
4363 &arg32->ca32_client.cl32_fhkey[0],
4364 NFSCLNT_KEYMAX);
4365 return err;
4366 }
4367
4368 static int
nfs_exp32_trans(struct nfsctl_arg * karg,struct nfsctl_arg32 * arg32)4369 nfs_exp32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
4370 {
4371 int err;
4372
4373 err = __get_user(karg->ca_version, &arg32->ca32_version);
4374 err |= copy_from_user(&karg->ca_export.ex_client[0],
4375 &arg32->ca32_export.ex32_client[0],
4376 NFSCLNT_IDMAX);
4377 err |= copy_from_user(&karg->ca_export.ex_path[0],
4378 &arg32->ca32_export.ex32_path[0],
4379 NFS_MAXPATHLEN);
4380 err |= __get_user(karg->ca_export.ex_dev,
4381 &arg32->ca32_export.ex32_dev);
4382 err |= __get_user(karg->ca_export.ex_ino,
4383 &arg32->ca32_export.ex32_ino);
4384 err |= __get_user(karg->ca_export.ex_flags,
4385 &arg32->ca32_export.ex32_flags);
4386 err |= __get_user(karg->ca_export.ex_anon_uid,
4387 &arg32->ca32_export.ex32_anon_uid);
4388 err |= __get_user(karg->ca_export.ex_anon_gid,
4389 &arg32->ca32_export.ex32_anon_gid);
4390 return err;
4391 }
4392
4393 static int
nfs_uud32_trans(struct nfsctl_arg * karg,struct nfsctl_arg32 * arg32)4394 nfs_uud32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
4395 {
4396 u32 uaddr;
4397 int i;
4398 int err;
4399
4400 memset(karg, 0, sizeof(*karg));
4401 if(__get_user(karg->ca_version, &arg32->ca32_version))
4402 return -EFAULT;
4403 karg->ca_umap.ug_ident = (char *)get_free_page(GFP_USER);
4404 if(!karg->ca_umap.ug_ident)
4405 return -ENOMEM;
4406 err = __get_user(uaddr, &arg32->ca32_umap.ug32_ident);
4407 if(strncpy_from_user(karg->ca_umap.ug_ident,
4408 (char *)A(uaddr), PAGE_SIZE) <= 0)
4409 return -EFAULT;
4410 err |= __get_user(karg->ca_umap.ug_uidbase,
4411 &arg32->ca32_umap.ug32_uidbase);
4412 err |= __get_user(karg->ca_umap.ug_uidlen,
4413 &arg32->ca32_umap.ug32_uidlen);
4414 err |= __get_user(uaddr, &arg32->ca32_umap.ug32_udimap);
4415 if (err)
4416 return -EFAULT;
4417 karg->ca_umap.ug_udimap = kmalloc((sizeof(uid_t) *
4418 karg->ca_umap.ug_uidlen),
4419 GFP_USER);
4420 if(!karg->ca_umap.ug_udimap)
4421 return -ENOMEM;
4422 for(i = 0; i < karg->ca_umap.ug_uidlen; i++)
4423 err |= __get_user(karg->ca_umap.ug_udimap[i],
4424 &(((__kernel_uid_t32 *)A(uaddr))[i]));
4425 err |= __get_user(karg->ca_umap.ug_gidbase,
4426 &arg32->ca32_umap.ug32_gidbase);
4427 err |= __get_user(karg->ca_umap.ug_uidlen,
4428 &arg32->ca32_umap.ug32_gidlen);
4429 err |= __get_user(uaddr, &arg32->ca32_umap.ug32_gdimap);
4430 if (err)
4431 return -EFAULT;
4432 karg->ca_umap.ug_gdimap = kmalloc((sizeof(gid_t) *
4433 karg->ca_umap.ug_uidlen),
4434 GFP_USER);
4435 if(!karg->ca_umap.ug_gdimap)
4436 return -ENOMEM;
4437 for(i = 0; i < karg->ca_umap.ug_gidlen; i++)
4438 err |= __get_user(karg->ca_umap.ug_gdimap[i],
4439 &(((__kernel_gid_t32 *)A(uaddr))[i]));
4440
4441 return err;
4442 }
4443
4444 static int
nfs_getfh32_trans(struct nfsctl_arg * karg,struct nfsctl_arg32 * arg32)4445 nfs_getfh32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
4446 {
4447 int err;
4448
4449 err = __get_user(karg->ca_version, &arg32->ca32_version);
4450 err |= copy_from_user(&karg->ca_getfh.gf_addr,
4451 &arg32->ca32_getfh.gf32_addr,
4452 (sizeof(struct sockaddr)));
4453 err |= __get_user(karg->ca_getfh.gf_dev,
4454 &arg32->ca32_getfh.gf32_dev);
4455 err |= __get_user(karg->ca_getfh.gf_ino,
4456 &arg32->ca32_getfh.gf32_ino);
4457 err |= __get_user(karg->ca_getfh.gf_version,
4458 &arg32->ca32_getfh.gf32_version);
4459 return err;
4460 }
4461
4462 static int
nfs_getfh32_res_trans(union nfsctl_res * kres,union nfsctl_res32 * res32)4463 nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32)
4464 {
4465 int err;
4466
4467 err = copy_to_user(&res32->cr32_getfh,
4468 &kres->cr_getfh,
4469 sizeof(res32->cr32_getfh));
4470 err |= __put_user(kres->cr_debug, &res32->cr32_debug);
4471 return err;
4472 }
4473
4474 extern asmlinkage long sys_nfsservctl(int cmd, void *arg, void *resp);
4475
4476 int asmlinkage
sys32_nfsservctl(int cmd,struct nfsctl_arg32 * arg32,union nfsctl_res32 * res32)4477 sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32)
4478 {
4479 struct nfsctl_arg *karg = NULL;
4480 union nfsctl_res *kres = NULL;
4481 mm_segment_t oldfs;
4482 int err;
4483
4484 karg = kmalloc(sizeof(*karg), GFP_USER);
4485 if(!karg)
4486 return -ENOMEM;
4487 if(res32) {
4488 kres = kmalloc(sizeof(*kres), GFP_USER);
4489 if(!kres) {
4490 kfree(karg);
4491 return -ENOMEM;
4492 }
4493 }
4494 switch(cmd) {
4495 case NFSCTL_SVC:
4496 err = nfs_svc32_trans(karg, arg32);
4497 break;
4498 case NFSCTL_ADDCLIENT:
4499 err = nfs_clnt32_trans(karg, arg32);
4500 break;
4501 case NFSCTL_DELCLIENT:
4502 err = nfs_clnt32_trans(karg, arg32);
4503 break;
4504 case NFSCTL_EXPORT:
4505 err = nfs_exp32_trans(karg, arg32);
4506 break;
4507 /* This one is unimplemented, be we're ready for it. */
4508 case NFSCTL_UGIDUPDATE:
4509 err = nfs_uud32_trans(karg, arg32);
4510 break;
4511 case NFSCTL_GETFH:
4512 err = nfs_getfh32_trans(karg, arg32);
4513 break;
4514 default:
4515 err = -EINVAL;
4516 break;
4517 }
4518 if(err)
4519 goto done;
4520 oldfs = get_fs();
4521 set_fs(KERNEL_DS);
4522 err = sys_nfsservctl(cmd, karg, kres);
4523 set_fs(oldfs);
4524
4525 if(!err && cmd == NFSCTL_GETFH)
4526 err = nfs_getfh32_res_trans(kres, res32);
4527
4528 done:
4529 if(karg) {
4530 if(cmd == NFSCTL_UGIDUPDATE) {
4531 if(karg->ca_umap.ug_ident)
4532 kfree(karg->ca_umap.ug_ident);
4533 if(karg->ca_umap.ug_udimap)
4534 kfree(karg->ca_umap.ug_udimap);
4535 if(karg->ca_umap.ug_gdimap)
4536 kfree(karg->ca_umap.ug_gdimap);
4537 }
4538 kfree(karg);
4539 }
4540 if(kres)
4541 kfree(kres);
4542 return err;
4543 }
4544
4545 /* Handle adjtimex compatability. */
4546
4547 struct timex32 {
4548 u32 modes;
4549 s32 offset, freq, maxerror, esterror;
4550 s32 status, constant, precision, tolerance;
4551 struct timeval32 time;
4552 s32 tick;
4553 s32 ppsfreq, jitter, shift, stabil;
4554 s32 jitcnt, calcnt, errcnt, stbcnt;
4555 s32 :32; s32 :32; s32 :32; s32 :32;
4556 s32 :32; s32 :32; s32 :32; s32 :32;
4557 s32 :32; s32 :32; s32 :32; s32 :32;
4558 };
4559
4560 extern int do_adjtimex(struct timex *);
4561
4562 asmlinkage long
sys32_adjtimex(struct timex32 * utp)4563 sys32_adjtimex(struct timex32 *utp)
4564 {
4565 struct timex txc;
4566 int ret;
4567
4568 memset(&txc, 0, sizeof(struct timex));
4569
4570 if(get_user(txc.modes, &utp->modes) ||
4571 __get_user(txc.offset, &utp->offset) ||
4572 __get_user(txc.freq, &utp->freq) ||
4573 __get_user(txc.maxerror, &utp->maxerror) ||
4574 __get_user(txc.esterror, &utp->esterror) ||
4575 __get_user(txc.status, &utp->status) ||
4576 __get_user(txc.constant, &utp->constant) ||
4577 __get_user(txc.precision, &utp->precision) ||
4578 __get_user(txc.tolerance, &utp->tolerance) ||
4579 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
4580 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
4581 __get_user(txc.tick, &utp->tick) ||
4582 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
4583 __get_user(txc.jitter, &utp->jitter) ||
4584 __get_user(txc.shift, &utp->shift) ||
4585 __get_user(txc.stabil, &utp->stabil) ||
4586 __get_user(txc.jitcnt, &utp->jitcnt) ||
4587 __get_user(txc.calcnt, &utp->calcnt) ||
4588 __get_user(txc.errcnt, &utp->errcnt) ||
4589 __get_user(txc.stbcnt, &utp->stbcnt))
4590 return -EFAULT;
4591
4592 ret = do_adjtimex(&txc);
4593
4594 if(put_user(txc.modes, &utp->modes) ||
4595 __put_user(txc.offset, &utp->offset) ||
4596 __put_user(txc.freq, &utp->freq) ||
4597 __put_user(txc.maxerror, &utp->maxerror) ||
4598 __put_user(txc.esterror, &utp->esterror) ||
4599 __put_user(txc.status, &utp->status) ||
4600 __put_user(txc.constant, &utp->constant) ||
4601 __put_user(txc.precision, &utp->precision) ||
4602 __put_user(txc.tolerance, &utp->tolerance) ||
4603 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
4604 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
4605 __put_user(txc.tick, &utp->tick) ||
4606 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
4607 __put_user(txc.jitter, &utp->jitter) ||
4608 __put_user(txc.shift, &utp->shift) ||
4609 __put_user(txc.stabil, &utp->stabil) ||
4610 __put_user(txc.jitcnt, &utp->jitcnt) ||
4611 __put_user(txc.calcnt, &utp->calcnt) ||
4612 __put_user(txc.errcnt, &utp->errcnt) ||
4613 __put_user(txc.stbcnt, &utp->stbcnt))
4614 ret = -EFAULT;
4615
4616 return ret;
4617 }
4618 #endif /* NOTYET */
4619