1 /* linux/arch/sparc/kernel/sys_sparc.c
2 *
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
5 * platform.
6 */
7
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/syscalls.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/ipc.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/unistd.h>
26
27 /* #define DEBUG_UNIMP_SYSCALL */
28
29 /* XXX Make this per-binary type, this way we can detect the type of
30 * XXX a binary. Every Sparc executable calls this very early on.
31 */
sys_getpagesize(void)32 asmlinkage unsigned long sys_getpagesize(void)
33 {
34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
35 }
36
37 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
38
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)39 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
40 {
41 struct vm_area_struct * vmm;
42
43 if (flags & MAP_FIXED) {
44 /* We do not accept a shared mapping if it would violate
45 * cache aliasing constraints.
46 */
47 if ((flags & MAP_SHARED) &&
48 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
49 return -EINVAL;
50 return addr;
51 }
52
53 /* See asm-sparc/uaccess.h */
54 if (len > TASK_SIZE - PAGE_SIZE)
55 return -ENOMEM;
56 if (ARCH_SUN4C && len > 0x20000000)
57 return -ENOMEM;
58 if (!addr)
59 addr = TASK_UNMAPPED_BASE;
60
61 if (flags & MAP_SHARED)
62 addr = COLOUR_ALIGN(addr);
63 else
64 addr = PAGE_ALIGN(addr);
65
66 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
67 /* At this point: (!vmm || addr < vmm->vm_end). */
68 if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) {
69 addr = PAGE_OFFSET;
70 vmm = find_vma(current->mm, PAGE_OFFSET);
71 }
72 if (TASK_SIZE - PAGE_SIZE - len < addr)
73 return -ENOMEM;
74 if (!vmm || addr + len <= vmm->vm_start)
75 return addr;
76 addr = vmm->vm_end;
77 if (flags & MAP_SHARED)
78 addr = COLOUR_ALIGN(addr);
79 }
80 }
81
82 /*
83 * sys_pipe() is the normal C calling standard for creating
84 * a pipe. It's not the way unix traditionally does this, though.
85 */
sparc_pipe(struct pt_regs * regs)86 asmlinkage int sparc_pipe(struct pt_regs *regs)
87 {
88 int fd[2];
89 int error;
90
91 error = do_pipe_flags(fd, 0);
92 if (error)
93 goto out;
94 regs->u_regs[UREG_I1] = fd[1];
95 error = fd[0];
96 out:
97 return error;
98 }
99
sparc_mmap_check(unsigned long addr,unsigned long len)100 int sparc_mmap_check(unsigned long addr, unsigned long len)
101 {
102 if (ARCH_SUN4C &&
103 (len > 0x20000000 ||
104 (addr < 0xe0000000 && addr + len > 0x20000000)))
105 return -EINVAL;
106
107 /* See asm-sparc/uaccess.h */
108 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
109 return -EINVAL;
110
111 return 0;
112 }
113
114 /* Linux version of mmap */
115
sys_mmap2(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long pgoff)116 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
117 unsigned long prot, unsigned long flags, unsigned long fd,
118 unsigned long pgoff)
119 {
120 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
121 we have. */
122 return sys_mmap_pgoff(addr, len, prot, flags, fd,
123 pgoff >> (PAGE_SHIFT - 12));
124 }
125
sys_mmap(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long off)126 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
127 unsigned long prot, unsigned long flags, unsigned long fd,
128 unsigned long off)
129 {
130 /* no alignment check? */
131 return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
132 }
133
sparc_remap_file_pages(unsigned long start,unsigned long size,unsigned long prot,unsigned long pgoff,unsigned long flags)134 long sparc_remap_file_pages(unsigned long start, unsigned long size,
135 unsigned long prot, unsigned long pgoff,
136 unsigned long flags)
137 {
138 /* This works on an existing mmap so we don't need to validate
139 * the range as that was done at the original mmap call.
140 */
141 return sys_remap_file_pages(start, size, prot,
142 (pgoff >> (PAGE_SHIFT - 12)), flags);
143 }
144
145 /* we come to here via sys_nis_syscall so it can setup the regs argument */
146 asmlinkage unsigned long
c_sys_nis_syscall(struct pt_regs * regs)147 c_sys_nis_syscall (struct pt_regs *regs)
148 {
149 static int count = 0;
150
151 if (count++ > 5)
152 return -ENOSYS;
153 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
154 current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
155 #ifdef DEBUG_UNIMP_SYSCALL
156 show_regs (regs);
157 #endif
158 return -ENOSYS;
159 }
160
161 /* #define DEBUG_SPARC_BREAKPOINT */
162
163 asmlinkage void
sparc_breakpoint(struct pt_regs * regs)164 sparc_breakpoint (struct pt_regs *regs)
165 {
166 siginfo_t info;
167
168 #ifdef DEBUG_SPARC_BREAKPOINT
169 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
170 #endif
171 info.si_signo = SIGTRAP;
172 info.si_errno = 0;
173 info.si_code = TRAP_BRKPT;
174 info.si_addr = (void __user *)regs->pc;
175 info.si_trapno = 0;
176 force_sig_info(SIGTRAP, &info, current);
177
178 #ifdef DEBUG_SPARC_BREAKPOINT
179 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
180 #endif
181 }
182
183 asmlinkage int
sparc_sigaction(int sig,const struct old_sigaction __user * act,struct old_sigaction __user * oact)184 sparc_sigaction (int sig, const struct old_sigaction __user *act,
185 struct old_sigaction __user *oact)
186 {
187 struct k_sigaction new_ka, old_ka;
188 int ret;
189
190 WARN_ON_ONCE(sig >= 0);
191 sig = -sig;
192
193 if (act) {
194 unsigned long mask;
195
196 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
197 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
198 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
199 return -EFAULT;
200 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
201 __get_user(mask, &act->sa_mask);
202 siginitset(&new_ka.sa.sa_mask, mask);
203 new_ka.ka_restorer = NULL;
204 }
205
206 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
207
208 if (!ret && oact) {
209 /* In the clone() case we could copy half consistent
210 * state to the user, however this could sleep and
211 * deadlock us if we held the signal lock on SMP. So for
212 * now I take the easy way out and do no locking.
213 */
214 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
215 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
216 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
217 return -EFAULT;
218 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
219 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
220 }
221
222 return ret;
223 }
224
225 asmlinkage long
sys_rt_sigaction(int sig,const struct sigaction __user * act,struct sigaction __user * oact,void __user * restorer,size_t sigsetsize)226 sys_rt_sigaction(int sig,
227 const struct sigaction __user *act,
228 struct sigaction __user *oact,
229 void __user *restorer,
230 size_t sigsetsize)
231 {
232 struct k_sigaction new_ka, old_ka;
233 int ret;
234
235 /* XXX: Don't preclude handling different sized sigset_t's. */
236 if (sigsetsize != sizeof(sigset_t))
237 return -EINVAL;
238
239 if (act) {
240 new_ka.ka_restorer = restorer;
241 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
242 return -EFAULT;
243 }
244
245 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
246
247 if (!ret && oact) {
248 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
249 return -EFAULT;
250 }
251
252 return ret;
253 }
254
sys_getdomainname(char __user * name,int len)255 asmlinkage int sys_getdomainname(char __user *name, int len)
256 {
257 int nlen, err;
258
259 if (len < 0)
260 return -EINVAL;
261
262 down_read(&uts_sem);
263
264 nlen = strlen(utsname()->domainname) + 1;
265 err = -EINVAL;
266 if (nlen > len)
267 goto out;
268
269 err = -EFAULT;
270 if (!copy_to_user(name, utsname()->domainname, nlen))
271 err = 0;
272
273 out:
274 up_read(&uts_sem);
275 return err;
276 }
277
278 /*
279 * Do a system call from kernel instead of calling sys_execve so we
280 * end up with proper pt_regs.
281 */
kernel_execve(const char * filename,const char * const argv[],const char * const envp[])282 int kernel_execve(const char *filename,
283 const char *const argv[],
284 const char *const envp[])
285 {
286 long __res;
287 register long __g1 __asm__ ("g1") = __NR_execve;
288 register long __o0 __asm__ ("o0") = (long)(filename);
289 register long __o1 __asm__ ("o1") = (long)(argv);
290 register long __o2 __asm__ ("o2") = (long)(envp);
291 asm volatile ("t 0x10\n\t"
292 "bcc 1f\n\t"
293 "mov %%o0, %0\n\t"
294 "sub %%g0, %%o0, %0\n\t"
295 "1:\n\t"
296 : "=r" (__res), "=&r" (__o0)
297 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
298 : "cc");
299 return __res;
300 }
301