1 /*
2  * This file contains various system calls that have different calling
3  * conventions on different platforms.
4  *
5  * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <linux/config.h>
9 #include <linux/errno.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>		/* doh, must come after sched.h... */
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
17 #include <linux/highuid.h>
18 #include <linux/hugetlb.h>
19 
20 #include <asm/shmparam.h>
21 #include <asm/uaccess.h>
22 
23 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)24 arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
25 			unsigned long pgoff, unsigned long flags)
26 {
27 	long map_shared = (flags & MAP_SHARED);
28 	unsigned long align_mask = PAGE_SIZE - 1;
29 	struct vm_area_struct * vmm;
30 
31 	if (len > RGN_MAP_LIMIT)
32 		return -ENOMEM;
33 #ifdef CONFIG_HUGETLB_PAGE
34 	if (rgn_index(addr)==REGION_HPAGE)
35 		addr = 0;
36 #endif
37 	if (!addr)
38 		addr = TASK_UNMAPPED_BASE;
39 
40 	if (map_shared && (TASK_SIZE > 0xfffffffful))
41 		/*
42 		 * For 64-bit tasks, align shared segments to 1MB to avoid potential
43 		 * performance penalty due to virtual aliasing (see ASDM).  For 32-bit
44 		 * tasks, we prefer to avoid exhausting the address space too quickly by
45 		 * limiting alignment to a single page.
46 		 */
47 		align_mask = SHMLBA - 1;
48 
49 	addr = (addr + align_mask) & ~align_mask;
50 
51 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
52 		/* At this point:  (!vmm || addr < vmm->vm_end). */
53 		if (TASK_SIZE - len < addr)
54 			return -ENOMEM;
55 		if (rgn_offset(addr) + len > RGN_MAP_LIMIT)	/* no risk of overflow here... */
56 			return -ENOMEM;
57 		if (!vmm || addr + len <= vmm->vm_start)
58 			return addr;
59 		addr = (vmm->vm_end + align_mask) & ~align_mask;
60 	}
61 }
62 
63 asmlinkage long
ia64_getpriority(int which,int who)64 ia64_getpriority (int which, int who)
65 {
66 	extern long sys_getpriority (int, int);
67 	long prio;
68 
69 	prio = sys_getpriority(which, who);
70 	if (prio >= 0) {
71 		force_successful_syscall_return();
72 		prio = 20 - prio;
73 	}
74 	return prio;
75 }
76 
77 /* XXX obsolete, but leave it here until the old libc is gone... */
78 asmlinkage unsigned long
sys_getpagesize(void)79 sys_getpagesize (void)
80 {
81 	return PAGE_SIZE;
82 }
83 
84 asmlinkage unsigned long
ia64_shmat(int shmid,void * shmaddr,int shmflg)85 ia64_shmat (int shmid, void *shmaddr, int shmflg)
86 {
87 	extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr);
88 	unsigned long raddr;
89 	int retval;
90 
91 	retval = sys_shmat(shmid, shmaddr, shmflg, &raddr);
92 	if (retval < 0)
93 		return retval;
94 
95 	force_successful_syscall_return();
96 	return raddr;
97 }
98 
99 asmlinkage unsigned long
ia64_brk(unsigned long brk)100 ia64_brk (unsigned long brk)
101 {
102 	extern int vm_enough_memory (long pages);
103 	unsigned long rlim, retval, newbrk, oldbrk;
104 	struct mm_struct *mm = current->mm;
105 
106 	/*
107 	 * Most of this replicates the code in sys_brk() except for an additional safety
108 	 * check and the clearing of r8.  However, we can't call sys_brk() because we need
109 	 * to acquire the mmap_sem before we can do the test...
110 	 */
111 	down_write(&mm->mmap_sem);
112 
113 	if (brk < mm->end_code)
114 		goto out;
115 	newbrk = PAGE_ALIGN(brk);
116 	oldbrk = PAGE_ALIGN(mm->brk);
117 	if (oldbrk == newbrk)
118 		goto set_brk;
119 
120 	/* Always allow shrinking brk. */
121 	if (brk <= mm->brk) {
122 		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
123 			goto set_brk;
124 		goto out;
125 	}
126 
127 	/* Check against unimplemented/unmapped addresses: */
128 	if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
129 		goto out;
130 
131 	/* Check against rlimit.. */
132 	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
133 	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
134 		goto out;
135 
136 	/* Check against existing mmap mappings. */
137 	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
138 		goto out;
139 
140 	/* Check if we have enough memory.. */
141 	if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
142 		goto out;
143 
144 	/* Ok, looks good - let it rip. */
145 	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
146 		goto out;
147 set_brk:
148 	mm->brk = brk;
149 out:
150 	retval = mm->brk;
151 	up_write(&mm->mmap_sem);
152 	force_successful_syscall_return();
153 	return retval;
154 }
155 
156 /*
157  * On IA-64, we return the two file descriptors in ret0 and ret1 (r8
158  * and r9) as this is faster than doing a copy_to_user().
159  */
160 asmlinkage long
sys_pipe(long arg0,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6,long arg7,long stack)161 sys_pipe (long arg0, long arg1, long arg2, long arg3,
162 	  long arg4, long arg5, long arg6, long arg7, long stack)
163 {
164 	struct pt_regs *regs = (struct pt_regs *) &stack;
165 	int fd[2];
166 	int retval;
167 
168 	retval = do_pipe(fd);
169 	if (retval)
170 		goto out;
171 	retval = fd[0];
172 	regs->r9 = fd[1];
173   out:
174 	return retval;
175 }
176 
177 static inline unsigned long
do_mmap2(unsigned long addr,unsigned long len,int prot,int flags,int fd,unsigned long pgoff)178 do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
179 {
180 	unsigned long roff;
181 	struct file *file = 0;
182 
183 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
184 	if (!(flags & MAP_ANONYMOUS)) {
185 		file = fget(fd);
186 		if (!file)
187 			return -EBADF;
188 
189 		if (!file->f_op || !file->f_op->mmap) {
190 			addr = -ENODEV;
191 			goto out;
192 		}
193 	}
194 
195 	/*
196 	 * A zero mmap always succeeds in Linux, independent of whether or not the
197 	 * remaining arguments are valid.
198 	 */
199 	len = PAGE_ALIGN(len);
200 	if (len == 0)
201 		goto out;
202 
203 	/*
204 	 * Don't permit mappings into unmapped space, the virtual page table of a region,
205 	 * or across a region boundary.  Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
206 	 * (for some integer n <= 61) and len > 0.
207 	 */
208 	roff = rgn_offset(addr);
209 	if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
210 		addr = -EINVAL;
211 		goto out;
212 	}
213 
214 	down_write(&current->mm->mmap_sem);
215 	addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
216 	up_write(&current->mm->mmap_sem);
217 
218 out:	if (file)
219 		fput(file);
220 	return addr;
221 }
222 
223 /*
224  * mmap2() is like mmap() except that the offset is expressed in units
225  * of PAGE_SIZE (instead of bytes).  This allows to mmap2() (pieces
226  * of) files that are larger than the address space of the CPU.
227  */
228 asmlinkage unsigned long
sys_mmap2(unsigned long addr,unsigned long len,int prot,int flags,int fd,long pgoff)229 sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
230 {
231 	addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
232 	if (!IS_ERR((void *) addr))
233 		force_successful_syscall_return();
234 	return addr;
235 }
236 
237 asmlinkage unsigned long
sys_mmap(unsigned long addr,unsigned long len,int prot,int flags,int fd,long off)238 sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off)
239 {
240 	if ((off & ~PAGE_MASK) != 0)
241 		return -EINVAL;
242 
243 	addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
244 	if (!IS_ERR((void *) addr))
245 		force_successful_syscall_return();
246 	return addr;
247 }
248 
249 asmlinkage unsigned long
ia64_mremap(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long new_addr)250 ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
251 	     unsigned long new_addr)
252 {
253 	extern unsigned long do_mremap (unsigned long addr,
254 					unsigned long old_len,
255 					unsigned long new_len,
256 					unsigned long flags,
257 					unsigned long new_addr);
258 
259 	down_write(&current->mm->mmap_sem);
260 	{
261 		addr = do_mremap(addr, old_len, new_len, flags, new_addr);
262 	}
263 	up_write(&current->mm->mmap_sem);
264 
265 	if (IS_ERR((void *) addr))
266 		return addr;
267 
268 	force_successful_syscall_return();
269 	return addr;
270 }
271 
272 asmlinkage long
sys_vm86(long arg0,long arg1,long arg2,long arg3)273 sys_vm86 (long arg0, long arg1, long arg2, long arg3)
274 {
275 	printk(KERN_ERR "sys_vm86(%lx, %lx, %lx, %lx)!\n", arg0, arg1, arg2, arg3);
276 	return -ENOSYS;
277 }
278 
279 asmlinkage unsigned long
ia64_create_module(const char * name_user,size_t size,long arg2,long arg3,long arg4,long arg5,long arg6,long arg7,long stack)280 ia64_create_module (const char *name_user, size_t size, long arg2, long arg3,
281 		    long arg4, long arg5, long arg6, long arg7, long stack)
282 {
283 	extern unsigned long sys_create_module (const char *, size_t);
284 	struct pt_regs *regs = (struct pt_regs *) &stack;
285 	unsigned long   addr;
286 
287 	addr = sys_create_module (name_user, size);
288 	if (!IS_ERR((void *) addr))
289 		regs->r8 = 0;	/* ensure large addresses are not mistaken as failures... */
290 	return addr;
291 }
292 
293 #if 1
294 /*
295  * This is here for a while to keep compatibillity with the old stat()
296  * call - it will be removed later once everybody migrates to the new
297  * kernel stat structure that matches the glibc one - Jes
298  */
299 static __inline__ int
do_revalidate(struct dentry * dentry)300 do_revalidate (struct dentry *dentry)
301 {
302 	struct inode * inode = dentry->d_inode;
303 	if (inode->i_op && inode->i_op->revalidate)
304 		return inode->i_op->revalidate(dentry);
305 	return 0;
306 }
307 
308 static int
cp_ia64_old_stat(struct inode * inode,struct ia64_oldstat * statbuf)309 cp_ia64_old_stat (struct inode *inode, struct ia64_oldstat *statbuf)
310 {
311 	struct ia64_oldstat tmp;
312 	unsigned int blocks, indirect;
313 
314 	memset(&tmp, 0, sizeof(tmp));
315 	tmp.st_dev = kdev_t_to_nr(inode->i_dev);
316 	tmp.st_ino = inode->i_ino;
317 	tmp.st_mode = inode->i_mode;
318 	tmp.st_nlink = inode->i_nlink;
319 	SET_STAT_UID(tmp, inode->i_uid);
320 	SET_STAT_GID(tmp, inode->i_gid);
321 	tmp.st_rdev = kdev_t_to_nr(inode->i_rdev);
322 	tmp.st_size = inode->i_size;
323 	tmp.st_atime = inode->i_atime;
324 	tmp.st_mtime = inode->i_mtime;
325 	tmp.st_ctime = inode->i_ctime;
326 /*
327  * st_blocks and st_blksize are approximated with a simple algorithm if
328  * they aren't supported directly by the filesystem. The minix and msdos
329  * filesystems don't keep track of blocks, so they would either have to
330  * be counted explicitly (by delving into the file itself), or by using
331  * this simple algorithm to get a reasonable (although not 100% accurate)
332  * value.
333  */
334 
335 /*
336  * Use minix fs values for the number of direct and indirect blocks.  The
337  * count is now exact for the minix fs except that it counts zero blocks.
338  * Everything is in units of BLOCK_SIZE until the assignment to
339  * tmp.st_blksize.
340  */
341 #define D_B   7
342 #define I_B   (BLOCK_SIZE / sizeof(unsigned short))
343 
344 	if (!inode->i_blksize) {
345 		blocks = (tmp.st_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
346 		if (blocks > D_B) {
347 			indirect = (blocks - D_B + I_B - 1) / I_B;
348 			blocks += indirect;
349 			if (indirect > 1) {
350 				indirect = (indirect - 1 + I_B - 1) / I_B;
351 				blocks += indirect;
352 				if (indirect > 1)
353 					blocks++;
354 			}
355 		}
356 		tmp.st_blocks = (BLOCK_SIZE / 512) * blocks;
357 		tmp.st_blksize = BLOCK_SIZE;
358 	} else {
359 		tmp.st_blocks = inode->i_blocks;
360 		tmp.st_blksize = inode->i_blksize;
361 	}
362 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
363 }
364 
365 asmlinkage long
ia64_oldstat(char * filename,struct ia64_oldstat * statbuf)366 ia64_oldstat (char *filename, struct ia64_oldstat *statbuf)
367 {
368 	struct nameidata nd;
369 	int error;
370 
371 	error = user_path_walk(filename, &nd);
372 	if (!error) {
373 		error = do_revalidate(nd.dentry);
374 		if (!error)
375 		error = cp_ia64_old_stat(nd.dentry->d_inode, statbuf);
376 		path_release(&nd);
377 	}
378 	return error;
379 }
380 
381 
382 asmlinkage long
ia64_oldlstat(char * filename,struct ia64_oldstat * statbuf)383 ia64_oldlstat (char *filename, struct ia64_oldstat *statbuf) {
384 	struct nameidata nd;
385 	int error;
386 
387 	error = user_path_walk_link(filename, &nd);
388 	if (!error) {
389 		error = do_revalidate(nd.dentry);
390 		if (!error)
391 			error = cp_ia64_old_stat(nd.dentry->d_inode, statbuf);
392 		path_release(&nd);
393 	}
394 	return error;
395 }
396 
397 asmlinkage long
ia64_oldfstat(unsigned int fd,struct ia64_oldstat * statbuf)398 ia64_oldfstat (unsigned int fd, struct ia64_oldstat *statbuf)
399 {
400 	struct file * f;
401 	int err = -EBADF;
402 
403 	f = fget(fd);
404 	if (f) {
405 		struct dentry * dentry = f->f_dentry;
406 
407 		err = do_revalidate(dentry);
408 		if (!err)
409 			err = cp_ia64_old_stat(dentry->d_inode, statbuf);
410 		fput(f);
411 	}
412 	return err;
413 }
414 
415 #endif
416 
417 #ifndef CONFIG_PCI
418 
419 asmlinkage long
sys_pciconfig_read(unsigned long bus,unsigned long dfn,unsigned long off,unsigned long len,void * buf)420 sys_pciconfig_read (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
421 		    void *buf)
422 {
423 	return -ENOSYS;
424 }
425 
426 asmlinkage long
sys_pciconfig_write(unsigned long bus,unsigned long dfn,unsigned long off,unsigned long len,void * buf)427 sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
428 		     void *buf)
429 {
430 	return -ENOSYS;
431 }
432 
433 #endif /* CONFIG_PCI */
434