1 /*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7 #ifndef __ASM_SWITCH_TO_H
8 #define __ASM_SWITCH_TO_H
9
10 #include <linux/thread_info.h>
11
12 extern struct task_struct *__switch_to(void *, void *);
13 extern void update_per_regs(struct task_struct *task);
14
save_fp_regs(s390_fp_regs * fpregs)15 static inline void save_fp_regs(s390_fp_regs *fpregs)
16 {
17 asm volatile(
18 " std 0,%O0+8(%R0)\n"
19 " std 2,%O0+24(%R0)\n"
20 " std 4,%O0+40(%R0)\n"
21 " std 6,%O0+56(%R0)"
22 : "=Q" (*fpregs) : "Q" (*fpregs));
23 if (!MACHINE_HAS_IEEE)
24 return;
25 asm volatile(
26 " stfpc %0\n"
27 " std 1,%O0+16(%R0)\n"
28 " std 3,%O0+32(%R0)\n"
29 " std 5,%O0+48(%R0)\n"
30 " std 7,%O0+64(%R0)\n"
31 " std 8,%O0+72(%R0)\n"
32 " std 9,%O0+80(%R0)\n"
33 " std 10,%O0+88(%R0)\n"
34 " std 11,%O0+96(%R0)\n"
35 " std 12,%O0+104(%R0)\n"
36 " std 13,%O0+112(%R0)\n"
37 " std 14,%O0+120(%R0)\n"
38 " std 15,%O0+128(%R0)\n"
39 : "=Q" (*fpregs) : "Q" (*fpregs));
40 }
41
restore_fp_regs(s390_fp_regs * fpregs)42 static inline void restore_fp_regs(s390_fp_regs *fpregs)
43 {
44 asm volatile(
45 " ld 0,%O0+8(%R0)\n"
46 " ld 2,%O0+24(%R0)\n"
47 " ld 4,%O0+40(%R0)\n"
48 " ld 6,%O0+56(%R0)"
49 : : "Q" (*fpregs));
50 if (!MACHINE_HAS_IEEE)
51 return;
52 asm volatile(
53 " lfpc %0\n"
54 " ld 1,%O0+16(%R0)\n"
55 " ld 3,%O0+32(%R0)\n"
56 " ld 5,%O0+48(%R0)\n"
57 " ld 7,%O0+64(%R0)\n"
58 " ld 8,%O0+72(%R0)\n"
59 " ld 9,%O0+80(%R0)\n"
60 " ld 10,%O0+88(%R0)\n"
61 " ld 11,%O0+96(%R0)\n"
62 " ld 12,%O0+104(%R0)\n"
63 " ld 13,%O0+112(%R0)\n"
64 " ld 14,%O0+120(%R0)\n"
65 " ld 15,%O0+128(%R0)\n"
66 : : "Q" (*fpregs));
67 }
68
save_access_regs(unsigned int * acrs)69 static inline void save_access_regs(unsigned int *acrs)
70 {
71 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
72 }
73
restore_access_regs(unsigned int * acrs)74 static inline void restore_access_regs(unsigned int *acrs)
75 {
76 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
77 }
78
79 #define switch_to(prev,next,last) do { \
80 if (prev->mm) { \
81 save_fp_regs(&prev->thread.fp_regs); \
82 save_access_regs(&prev->thread.acrs[0]); \
83 } \
84 if (next->mm) { \
85 restore_fp_regs(&next->thread.fp_regs); \
86 restore_access_regs(&next->thread.acrs[0]); \
87 update_per_regs(next); \
88 } \
89 prev = __switch_to(prev,next); \
90 } while (0)
91
92 extern void account_vtime(struct task_struct *, struct task_struct *);
93 extern void account_tick_vtime(struct task_struct *);
94
95 #define finish_arch_switch(prev) do { \
96 set_fs(current->thread.mm_segment); \
97 account_vtime(prev, current); \
98 } while (0)
99
100 #endif /* __ASM_SWITCH_TO_H */
101