1 /* $Id: unaligned.c,v 1.23 2001/04/09 04:29:03 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <asm/asi.h>
14 #include <asm/ptrace.h>
15 #include <asm/pstate.h>
16 #include <asm/processor.h>
17 #include <asm/system.h>
18 #include <asm/uaccess.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <asm/fpumacro.h>
22 #include <asm/bitops.h>
23
24 /* #define DEBUG_MNA */
25
26 enum direction {
27 load, /* ld, ldd, ldh, ldsh */
28 store, /* st, std, sth, stsh */
29 both, /* Swap, ldstub, cas, ... */
30 fpld,
31 fpst,
32 invalid,
33 };
34
35 #ifdef DEBUG_MNA
36 static char *dirstrings[] = {
37 "load", "store", "both", "fpload", "fpstore", "invalid"
38 };
39 #endif
40
decode_direction(unsigned int insn)41 static inline enum direction decode_direction(unsigned int insn)
42 {
43 unsigned long tmp = (insn >> 21) & 1;
44
45 if(!tmp)
46 return load;
47 else {
48 switch ((insn>>19)&0xf) {
49 case 15: /* swap* */
50 return both;
51 default:
52 return store;
53 }
54 }
55 }
56
57 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
decode_access_size(unsigned int insn)58 static inline int decode_access_size(unsigned int insn)
59 {
60 unsigned int tmp;
61
62 tmp = ((insn >> 19) & 0xf);
63 if (tmp == 11 || tmp == 14) /* ldx/stx */
64 return 8;
65 tmp &= 3;
66 if(!tmp)
67 return 4;
68 else if(tmp == 3)
69 return 16; /* ldd/std - Although it is actually 8 */
70 else if(tmp == 2)
71 return 2;
72 else {
73 printk("Impossible unaligned trap. insn=%08x\n", insn);
74 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
75 }
76 }
77
decode_asi(unsigned int insn,struct pt_regs * regs)78 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
79 {
80 if (insn & 0x800000) {
81 if (insn & 0x2000)
82 return (unsigned char)(regs->tstate >> 24); /* %asi */
83 else
84 return (unsigned char)(insn >> 5); /* imm_asi */
85 } else
86 return ASI_P;
87 }
88
89 /* 0x400000 = signed, 0 = unsigned */
decode_signedness(unsigned int insn)90 static inline int decode_signedness(unsigned int insn)
91 {
92 return (insn & 0x400000);
93 }
94
maybe_flush_windows(unsigned int rs1,unsigned int rs2,unsigned int rd,int from_kernel)95 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
96 unsigned int rd, int from_kernel)
97 {
98 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
99 if(from_kernel != 0)
100 __asm__ __volatile__("flushw");
101 else
102 flushw_user();
103 }
104 }
105
sign_extend_imm13(long imm)106 static inline long sign_extend_imm13(long imm)
107 {
108 return imm << 51 >> 51;
109 }
110
fetch_reg(unsigned int reg,struct pt_regs * regs)111 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
112 {
113 unsigned long value;
114
115 if(reg < 16)
116 return (!reg ? 0 : regs->u_regs[reg]);
117 if (regs->tstate & TSTATE_PRIV) {
118 struct reg_window *win;
119 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
120 value = win->locals[reg - 16];
121 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
122 struct reg_window32 *win32;
123 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
124 get_user(value, &win32->locals[reg - 16]);
125 } else {
126 struct reg_window *win;
127 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
128 get_user(value, &win->locals[reg - 16]);
129 }
130 return value;
131 }
132
fetch_reg_addr(unsigned int reg,struct pt_regs * regs)133 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
134 {
135 if(reg < 16)
136 return ®s->u_regs[reg];
137 if (regs->tstate & TSTATE_PRIV) {
138 struct reg_window *win;
139 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
140 return &win->locals[reg - 16];
141 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
142 struct reg_window32 *win32;
143 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
144 return (unsigned long *)&win32->locals[reg - 16];
145 } else {
146 struct reg_window *win;
147 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
148 return &win->locals[reg - 16];
149 }
150 }
151
compute_effective_address(struct pt_regs * regs,unsigned int insn,unsigned int rd)152 unsigned long compute_effective_address(struct pt_regs *regs,
153 unsigned int insn, unsigned int rd)
154 {
155 unsigned int rs1 = (insn >> 14) & 0x1f;
156 unsigned int rs2 = insn & 0x1f;
157 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
158
159 if(insn & 0x2000) {
160 maybe_flush_windows(rs1, 0, rd, from_kernel);
161 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
162 } else {
163 maybe_flush_windows(rs1, rs2, rd, from_kernel);
164 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
165 }
166 }
167
168 /* This is just to make gcc think die_if_kernel does return... */
unaligned_panic(char * str,struct pt_regs * regs)169 static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
170 {
171 die_if_kernel(str, regs);
172 }
173
174 #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
175 __asm__ __volatile__ ( \
176 "wr %4, 0, %%asi\n\t" \
177 "cmp %1, 8\n\t" \
178 "bge,pn %%icc, 9f\n\t" \
179 " cmp %1, 4\n\t" \
180 "be,pt %%icc, 6f\n" \
181 "4:\t" " lduba [%2] %%asi, %%l1\n" \
182 "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
183 "sll %%l1, 8, %%l1\n\t" \
184 "brz,pt %3, 3f\n\t" \
185 " add %%l1, %%l2, %%l1\n\t" \
186 "sllx %%l1, 48, %%l1\n\t" \
187 "srax %%l1, 48, %%l1\n" \
188 "3:\t" "ba,pt %%xcc, 0f\n\t" \
189 " stx %%l1, [%0]\n" \
190 "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
191 "sll %%l1, 24, %%l1\n" \
192 "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
193 "sll %%l2, 16, %%l2\n" \
194 "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
195 "sll %%g7, 8, %%g7\n\t" \
196 "or %%l1, %%l2, %%l1\n\t" \
197 "or %%g7, %%g1, %%g7\n\t" \
198 "or %%l1, %%g7, %%l1\n\t" \
199 "brnz,a,pt %3, 3f\n\t" \
200 " sra %%l1, 0, %%l1\n" \
201 "3:\t" "ba,pt %%xcc, 0f\n\t" \
202 " stx %%l1, [%0]\n" \
203 "9:\t" "lduba [%2] %%asi, %%l1\n" \
204 "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
205 "sllx %%l1, 56, %%l1\n" \
206 "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
207 "sllx %%l2, 48, %%l2\n" \
208 "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
209 "sllx %%g7, 40, %%g7\n\t" \
210 "sllx %%g1, 32, %%g1\n\t" \
211 "or %%l1, %%l2, %%l1\n\t" \
212 "or %%g7, %%g1, %%g7\n" \
213 "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
214 "or %%l1, %%g7, %%g7\n" \
215 "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
216 "sllx %%l2, 24, %%l2\n" \
217 "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
218 "sllx %%g1, 16, %%g1\n\t" \
219 "or %%g7, %%l2, %%g7\n" \
220 "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
221 "sllx %%l1, 8, %%l1\n\t" \
222 "or %%g7, %%g1, %%g7\n\t" \
223 "or %%l1, %%l2, %%l1\n\t" \
224 "or %%g7, %%l1, %%g7\n\t" \
225 "cmp %1, 8\n\t" \
226 "be,a,pt %%icc, 0f\n\t" \
227 " stx %%g7, [%0]\n\t" \
228 "srlx %%g7, 32, %%l1\n\t" \
229 "sra %%g7, 0, %%g7\n\t" \
230 "stx %%l1, [%0]\n\t" \
231 "stx %%g7, [%0 + 8]\n" \
232 "0:\n\t" \
233 "wr %%g0, %5, %%asi\n\n\t" \
234 ".section __ex_table\n\t" \
235 ".word 4b, " #errh "\n\t" \
236 ".word 5b, " #errh "\n\t" \
237 ".word 6b, " #errh "\n\t" \
238 ".word 7b, " #errh "\n\t" \
239 ".word 8b, " #errh "\n\t" \
240 ".word 9b, " #errh "\n\t" \
241 ".word 10b, " #errh "\n\t" \
242 ".word 11b, " #errh "\n\t" \
243 ".word 12b, " #errh "\n\t" \
244 ".word 13b, " #errh "\n\t" \
245 ".word 14b, " #errh "\n\t" \
246 ".word 15b, " #errh "\n\t" \
247 ".word 16b, " #errh "\n\n\t" \
248 ".previous\n\t" \
249 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
250 "r" (asi), "i" (ASI_AIUS) \
251 : "l1", "l2", "g7", "g1", "cc"); \
252 })
253
254 #define store_common(dst_addr, size, src_val, asi, errh) ({ \
255 __asm__ __volatile__ ( \
256 "wr %3, 0, %%asi\n\t" \
257 "ldx [%2], %%l1\n" \
258 "cmp %1, 2\n\t" \
259 "be,pn %%icc, 2f\n\t" \
260 " cmp %1, 4\n\t" \
261 "be,pt %%icc, 1f\n\t" \
262 " srlx %%l1, 24, %%l2\n\t" \
263 "srlx %%l1, 56, %%g1\n\t" \
264 "srlx %%l1, 48, %%g7\n" \
265 "4:\t" "stba %%g1, [%0] %%asi\n\t" \
266 "srlx %%l1, 40, %%g1\n" \
267 "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
268 "srlx %%l1, 32, %%g7\n" \
269 "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
270 "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
271 "srlx %%l1, 16, %%g1\n" \
272 "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
273 "srlx %%l1, 8, %%g7\n" \
274 "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
275 "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
276 "ba,pt %%xcc, 0f\n" \
277 "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
278 "1:\t" "srl %%l1, 16, %%g7\n" \
279 "12:\t" "stba %%l2, [%0] %%asi\n\t" \
280 "srl %%l1, 8, %%l2\n" \
281 "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
282 "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
283 "ba,pt %%xcc, 0f\n" \
284 "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
285 "2:\t" "srl %%l1, 8, %%l2\n" \
286 "16:\t" "stba %%l2, [%0] %%asi\n" \
287 "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
288 "0:\n\t" \
289 "wr %%g0, %4, %%asi\n\n\t" \
290 ".section __ex_table\n\t" \
291 ".word 4b, " #errh "\n\t" \
292 ".word 5b, " #errh "\n\t" \
293 ".word 6b, " #errh "\n\t" \
294 ".word 7b, " #errh "\n\t" \
295 ".word 8b, " #errh "\n\t" \
296 ".word 9b, " #errh "\n\t" \
297 ".word 10b, " #errh "\n\t" \
298 ".word 11b, " #errh "\n\t" \
299 ".word 12b, " #errh "\n\t" \
300 ".word 13b, " #errh "\n\t" \
301 ".word 14b, " #errh "\n\t" \
302 ".word 15b, " #errh "\n\t" \
303 ".word 16b, " #errh "\n\t" \
304 ".word 17b, " #errh "\n\n\t" \
305 ".previous\n\t" \
306 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
307 : "l1", "l2", "g7", "g1", "cc"); \
308 })
309
310 #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
311 unsigned long zero = 0; \
312 unsigned long *src_val = &zero; \
313 \
314 if (size == 16) { \
315 size = 8; \
316 zero = (((long)(reg_num ? \
317 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
318 (unsigned)fetch_reg(reg_num + 1, regs); \
319 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
320 store_common(dst_addr, size, src_val, asi, errh); \
321 })
322
323 /* XXX Need to capture/release other cpu's for SMP around this. */
324 #define do_atomic(srcdest_reg, mem, errh) ({ \
325 unsigned long flags, tmp; \
326 \
327 save_and_cli(flags); \
328 tmp = *srcdest_reg; \
329 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
330 store_common(mem, 4, &tmp, errh); \
331 restore_flags(flags); \
332 })
333
advance(struct pt_regs * regs)334 static inline void advance(struct pt_regs *regs)
335 {
336 regs->tpc = regs->tnpc;
337 regs->tnpc += 4;
338 if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
339 regs->tpc &= 0xffffffff;
340 regs->tnpc &= 0xffffffff;
341 }
342 }
343
floating_point_load_or_store_p(unsigned int insn)344 static inline int floating_point_load_or_store_p(unsigned int insn)
345 {
346 return (insn >> 24) & 1;
347 }
348
ok_for_kernel(unsigned int insn)349 static inline int ok_for_kernel(unsigned int insn)
350 {
351 return !floating_point_load_or_store_p(insn);
352 }
353
354 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
355
kernel_mna_trap_fault(struct pt_regs * regs,unsigned int insn)356 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
357 {
358 unsigned long g2 = regs->u_regs [UREG_G2];
359 unsigned long fixup = search_exception_table (regs->tpc, &g2);
360
361 if (!fixup) {
362 unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
363 if(address < PAGE_SIZE) {
364 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
365 } else
366 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
367 printk(KERN_ALERT " at virtual address %016lx\n",address);
368 printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
369 (current->mm ? current->mm->context :
370 current->active_mm->context));
371 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
372 (current->mm ? (unsigned long) current->mm->pgd :
373 (unsigned long) current->active_mm->pgd));
374 die_if_kernel("Oops", regs);
375 /* Not reached */
376 }
377 regs->tpc = fixup;
378 regs->tnpc = regs->tpc + 4;
379 regs->u_regs [UREG_G2] = g2;
380
381 regs->tstate &= ~TSTATE_ASI;
382 regs->tstate |= (ASI_AIUS << 24UL);
383 }
384
kernel_unaligned_trap(struct pt_regs * regs,unsigned int insn,unsigned long sfar,unsigned long sfsr)385 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
386 {
387 enum direction dir = decode_direction(insn);
388 int size = decode_access_size(insn);
389
390 if(!ok_for_kernel(insn) || dir == both) {
391 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
392 regs->tpc);
393 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
394
395 __asm__ __volatile__ ("\n"
396 "kernel_unaligned_trap_fault:\n\t"
397 "mov %0, %%o0\n\t"
398 "call kernel_mna_trap_fault\n\t"
399 " mov %1, %%o1\n\t"
400 :
401 : "r" (regs), "r" (insn)
402 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
403 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
404 } else {
405 unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
406
407 #ifdef DEBUG_MNA
408 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
409 regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
410 #endif
411 switch(dir) {
412 case load:
413 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
414 size, (unsigned long *) addr,
415 decode_signedness(insn), decode_asi(insn, regs),
416 kernel_unaligned_trap_fault);
417 break;
418
419 case store:
420 do_integer_store(((insn>>25)&0x1f), size,
421 (unsigned long *) addr, regs,
422 decode_asi(insn, regs),
423 kernel_unaligned_trap_fault);
424 break;
425 #if 0 /* unsupported */
426 case both:
427 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
428 (unsigned long *) addr,
429 kernel_unaligned_trap_fault);
430 break;
431 #endif
432 default:
433 panic("Impossible kernel unaligned trap.");
434 /* Not reached... */
435 }
436 advance(regs);
437 }
438 }
439
440 static char popc_helper[] = {
441 0, 1, 1, 2, 1, 2, 2, 3,
442 1, 2, 2, 3, 2, 3, 3, 4,
443 };
444
handle_popc(u32 insn,struct pt_regs * regs)445 int handle_popc(u32 insn, struct pt_regs *regs)
446 {
447 u64 value;
448 int ret, i, rd = ((insn >> 25) & 0x1f);
449 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
450
451 if (insn & 0x2000) {
452 maybe_flush_windows(0, 0, rd, from_kernel);
453 value = sign_extend_imm13(insn);
454 } else {
455 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
456 value = fetch_reg(insn & 0x1f, regs);
457 }
458 for (ret = 0, i = 0; i < 16; i++) {
459 ret += popc_helper[value & 0xf];
460 value >>= 4;
461 }
462 if(rd < 16) {
463 if (rd)
464 regs->u_regs[rd] = ret;
465 } else {
466 if (current->thread.flags & SPARC_FLAG_32BIT) {
467 struct reg_window32 *win32;
468 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
469 put_user(ret, &win32->locals[rd - 16]);
470 } else {
471 struct reg_window *win;
472 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
473 put_user(ret, &win->locals[rd - 16]);
474 }
475 }
476 advance(regs);
477 return 1;
478 }
479
480 extern void do_fpother(struct pt_regs *regs);
481 extern void do_privact(struct pt_regs *regs);
482 extern void spitfire_data_access_exception(struct pt_regs *regs,
483 unsigned long sfsr,
484 unsigned long sfar);
485
handle_ldf_stq(u32 insn,struct pt_regs * regs)486 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
487 {
488 unsigned long addr = compute_effective_address(regs, insn, 0);
489 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
490 struct fpustate *f = FPUSTATE;
491 int asi = decode_asi(insn, regs);
492 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
493
494 save_and_clear_fpu();
495 current->thread.xfsr[0] &= ~0x1c000;
496 if (freg & 3) {
497 current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
498 do_fpother(regs);
499 return 0;
500 }
501 if (insn & 0x200000) {
502 /* STQ */
503 u64 first = 0, second = 0;
504
505 if (current->thread.fpsaved[0] & flag) {
506 first = *(u64 *)&f->regs[freg];
507 second = *(u64 *)&f->regs[freg+2];
508 }
509 if (asi < 0x80) {
510 do_privact(regs);
511 return 1;
512 }
513 switch (asi) {
514 case ASI_P:
515 case ASI_S: break;
516 case ASI_PL:
517 case ASI_SL:
518 {
519 /* Need to convert endians */
520 u64 tmp = __swab64p(&first);
521
522 first = __swab64p(&second);
523 second = tmp;
524 break;
525 }
526 default:
527 spitfire_data_access_exception(regs, 0, addr);
528 return 1;
529 }
530 if (put_user (first >> 32, (u32 *)addr) ||
531 __put_user ((u32)first, (u32 *)(addr + 4)) ||
532 __put_user (second >> 32, (u32 *)(addr + 8)) ||
533 __put_user ((u32)second, (u32 *)(addr + 12))) {
534 spitfire_data_access_exception(regs, 0, addr);
535 return 1;
536 }
537 } else {
538 /* LDF, LDDF, LDQF */
539 u32 data[4] __attribute__ ((aligned(8)));
540 int size, i;
541 int err;
542
543 if (asi < 0x80) {
544 do_privact(regs);
545 return 1;
546 } else if (asi > ASI_SNFL) {
547 spitfire_data_access_exception(regs, 0, addr);
548 return 1;
549 }
550 switch (insn & 0x180000) {
551 case 0x000000: size = 1; break;
552 case 0x100000: size = 4; break;
553 default: size = 2; break;
554 }
555 for (i = 0; i < size; i++)
556 data[i] = 0;
557
558 err = get_user (data[0], (u32 *)addr);
559 if (!err) {
560 for (i = 1; i < size; i++)
561 err |= __get_user (data[i], (u32 *)(addr + 4*i));
562 }
563 if (err && !(asi & 0x2 /* NF */)) {
564 spitfire_data_access_exception(regs, 0, addr);
565 return 1;
566 }
567 if (asi & 0x8) /* Little */ {
568 u64 tmp;
569
570 switch (size) {
571 case 1: data[0] = le32_to_cpup(data + 0); break;
572 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
573 break;
574 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
575 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
576 *(u64 *)(data + 2) = tmp;
577 break;
578 }
579 }
580 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
581 current->thread.fpsaved[0] = FPRS_FEF;
582 current->thread.gsr[0] = 0;
583 }
584 if (!(current->thread.fpsaved[0] & flag)) {
585 if (freg < 32)
586 memset(f->regs, 0, 32*sizeof(u32));
587 else
588 memset(f->regs+32, 0, 32*sizeof(u32));
589 }
590 memcpy(f->regs + freg, data, size * 4);
591 current->thread.fpsaved[0] |= flag;
592 }
593 advance(regs);
594 return 1;
595 }
596
handle_ld_nf(u32 insn,struct pt_regs * regs)597 void handle_ld_nf(u32 insn, struct pt_regs *regs)
598 {
599 int rd = ((insn >> 25) & 0x1f);
600 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
601 unsigned long *reg;
602
603 maybe_flush_windows(0, 0, rd, from_kernel);
604 reg = fetch_reg_addr(rd, regs);
605 if (from_kernel || rd < 16) {
606 reg[0] = 0;
607 if ((insn & 0x780000) == 0x180000)
608 reg[1] = 0;
609 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
610 put_user(0, (int *)reg);
611 if ((insn & 0x780000) == 0x180000)
612 put_user(0, ((int *)reg) + 1);
613 } else {
614 put_user(0, reg);
615 if ((insn & 0x780000) == 0x180000)
616 put_user(0, reg + 1);
617 }
618 advance(regs);
619 }
620
handle_lddfmna(struct pt_regs * regs,unsigned long sfar,unsigned long sfsr)621 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
622 {
623 unsigned long pc = regs->tpc;
624 unsigned long tstate = regs->tstate;
625 u32 insn;
626 u32 first, second;
627 u64 value;
628 u8 asi, freg;
629 int flag;
630 struct fpustate *f = FPUSTATE;
631
632 if(tstate & TSTATE_PRIV)
633 die_if_kernel("lddfmna from kernel", regs);
634 if(current->thread.flags & SPARC_FLAG_32BIT)
635 pc = (u32)pc;
636 if (get_user(insn, (u32 *)pc) != -EFAULT) {
637 asi = sfsr >> 16;
638 if ((asi > ASI_SNFL) ||
639 (asi < ASI_P))
640 goto daex;
641 if (get_user(first, (u32 *)sfar) ||
642 get_user(second, (u32 *)(sfar + 4))) {
643 if (asi & 0x2) /* NF */ {
644 first = 0; second = 0;
645 } else
646 goto daex;
647 }
648 save_and_clear_fpu();
649 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
650 value = (((u64)first) << 32) | second;
651 if (asi & 0x8) /* Little */
652 value = __swab64p(&value);
653 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
654 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
655 current->thread.fpsaved[0] = FPRS_FEF;
656 current->thread.gsr[0] = 0;
657 }
658 if (!(current->thread.fpsaved[0] & flag)) {
659 if (freg < 32)
660 memset(f->regs, 0, 32*sizeof(u32));
661 else
662 memset(f->regs+32, 0, 32*sizeof(u32));
663 }
664 *(u64 *)(f->regs + freg) = value;
665 current->thread.fpsaved[0] |= flag;
666 } else {
667 daex: spitfire_data_access_exception(regs, sfsr, sfar);
668 return;
669 }
670 advance(regs);
671 return;
672 }
673
handle_stdfmna(struct pt_regs * regs,unsigned long sfar,unsigned long sfsr)674 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
675 {
676 unsigned long pc = regs->tpc;
677 unsigned long tstate = regs->tstate;
678 u32 insn;
679 u64 value;
680 u8 asi, freg;
681 int flag;
682 struct fpustate *f = FPUSTATE;
683
684 if(tstate & TSTATE_PRIV)
685 die_if_kernel("stdfmna from kernel", regs);
686 if(current->thread.flags & SPARC_FLAG_32BIT)
687 pc = (u32)pc;
688 if (get_user(insn, (u32 *)pc) != -EFAULT) {
689 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
690 asi = sfsr >> 16;
691 value = 0;
692 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
693 if ((asi > ASI_SNFL) ||
694 (asi < ASI_P))
695 goto daex;
696 save_and_clear_fpu();
697 if (current->thread.fpsaved[0] & flag)
698 value = *(u64 *)&f->regs[freg];
699 switch (asi) {
700 case ASI_P:
701 case ASI_S: break;
702 case ASI_PL:
703 case ASI_SL:
704 value = __swab64p(&value); break;
705 default: goto daex;
706 }
707 if (put_user (value >> 32, (u32 *)sfar) ||
708 __put_user ((u32)value, (u32 *)(sfar + 4)))
709 goto daex;
710 } else {
711 daex: spitfire_data_access_exception(regs, sfsr, sfar);
712 return;
713 }
714 advance(regs);
715 return;
716 }
717