1 /*
2 * include/asm-s390/system.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *
8 * Derived from "include/asm-i386/system.h"
9 */
10
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
13
14 #include <linux/config.h>
15 #include <asm/types.h>
16 #ifdef __KERNEL__
17 #include <asm/lowcore.h>
18 #endif
19 #include <linux/kernel.h>
20
21 #define prepare_to_switch() do { } while(0)
22 #define switch_to(prev,next,last) do { \
23 if (prev == next) \
24 break; \
25 save_fp_regs1(&prev->thread.fp_regs); \
26 restore_fp_regs1(&next->thread.fp_regs); \
27 last = resume(prev,next); \
28 } while (0)
29
30 struct task_struct;
31
32 #define nop() __asm__ __volatile__ ("nop")
33
34 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
35
36 extern void __misaligned_u16(void);
37 extern void __misaligned_u32(void);
38
__xchg(unsigned long x,void * ptr,int size)39 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
40 {
41 switch (size) {
42 case 1:
43 asm volatile (
44 " lhi 1,3\n"
45 " nr 1,%0\n" /* isolate last 2 bits */
46 " xr %0,1\n" /* align ptr */
47 " bras 2,0f\n"
48 " icm 1,8,3(%1)\n" /* for ptr&3 == 0 */
49 " stcm 0,8,3(%1)\n"
50 " icm 1,4,3(%1)\n" /* for ptr&3 == 1 */
51 " stcm 0,4,3(%1)\n"
52 " icm 1,2,3(%1)\n" /* for ptr&3 == 2 */
53 " stcm 0,2,3(%1)\n"
54 " icm 1,1,3(%1)\n" /* for ptr&3 == 3 */
55 " stcm 0,1,3(%1)\n"
56 "0: sll 1,3\n"
57 " la 2,0(1,2)\n" /* r2 points to an icm */
58 " l 0,0(%0)\n" /* get fullword */
59 "1: lr 1,0\n" /* cs loop */
60 " ex 0,0(2)\n" /* insert x */
61 " cs 0,1,0(%0)\n"
62 " jl 1b\n"
63 " ex 0,4(2)" /* store *ptr to x */
64 : "+a&" (ptr) : "a" (&x)
65 : "memory", "cc", "0", "1", "2");
66 break;
67 case 2:
68 if(((__u32)ptr)&1)
69 __misaligned_u16();
70 asm volatile (
71 " lhi 1,2\n"
72 " nr 1,%0\n" /* isolate bit 2^1 */
73 " xr %0,1\n" /* align ptr */
74 " bras 2,0f\n"
75 " icm 1,12,2(%1)\n" /* for ptr&2 == 0 */
76 " stcm 0,12,2(%1)\n"
77 " icm 1,3,2(%1)\n" /* for ptr&2 == 1 */
78 " stcm 0,3,2(%1)\n"
79 "0: sll 1,2\n"
80 " la 2,0(1,2)\n" /* r2 points to an icm */
81 " l 0,0(%0)\n" /* get fullword */
82 "1: lr 1,0\n" /* cs loop */
83 " ex 0,0(2)\n" /* insert x */
84 " cs 0,1,0(%0)\n"
85 " jl 1b\n"
86 " ex 0,4(2)" /* store *ptr to x */
87 : "+a&" (ptr) : "a" (&x)
88 : "memory", "cc", "0", "1", "2");
89 break;
90 case 4:
91 if(((__u32)ptr)&3)
92 __misaligned_u32();
93 asm volatile (
94 " l 0,0(%1)\n"
95 "0: cs 0,%0,0(%1)\n"
96 " jl 0b\n"
97 " lr %0,0\n"
98 : "+d&" (x) : "a" (ptr)
99 : "memory", "cc", "0" );
100 break;
101 }
102 return x;
103 }
104
105 /*
106 * Force strict CPU ordering.
107 * And yes, this is required on UP too when we're talking
108 * to devices.
109 *
110 * This is very similar to the ppc eieio/sync instruction in that is
111 * does a checkpoint syncronisation & makes sure that
112 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
113 */
114
115 #define eieio() __asm__ __volatile__ ("BCR 15,0")
116 # define SYNC_OTHER_CORES(x) eieio()
117 #define mb() eieio()
118 #define rmb() eieio()
119 #define wmb() eieio()
120 #define smp_mb() mb()
121 #define smp_rmb() rmb()
122 #define smp_wmb() wmb()
123 #define smp_mb__before_clear_bit() smp_mb()
124 #define smp_mb__after_clear_bit() smp_mb()
125
126
127 #define set_mb(var, value) do { var = value; mb(); } while (0)
128 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
129
130 /* interrupt control.. */
131 #define __sti() ({ \
132 __u8 dummy; \
133 __asm__ __volatile__ ( \
134 "stosm 0(%0),0x03" : : "a" (&dummy) : "memory"); \
135 })
136
137 #define __cli() ({ \
138 __u32 flags; \
139 __asm__ __volatile__ ( \
140 "stnsm 0(%0),0xFC" : : "a" (&flags) : "memory"); \
141 flags; \
142 })
143
144 #define __save_flags(x) \
145 __asm__ __volatile__("stosm 0(%0),0" : : "a" (&x) : "memory")
146
147 #define __restore_flags(x) \
148 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
149
150 #define __load_psw(psw) \
151 __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
152
153 #define __ctl_load(array, low, high) ({ \
154 __asm__ __volatile__ ( \
155 " la 1,%0\n" \
156 " bras 2,0f\n" \
157 " lctl 0,0,0(1)\n" \
158 "0: ex %1,0(2)" \
159 : : "m" (array), "a" (((low)<<4)+(high)) : "1", "2" ); \
160 })
161
162 #define __ctl_store(array, low, high) ({ \
163 __asm__ __volatile__ ( \
164 " la 1,%0\n" \
165 " bras 2,0f\n" \
166 " stctl 0,0,0(1)\n" \
167 "0: ex %1,0(2)" \
168 : "=m" (array) : "a" (((low)<<4)+(high)): "1", "2" ); \
169 })
170
171 #define __ctl_set_bit(cr, bit) ({ \
172 __u8 dummy[16]; \
173 __asm__ __volatile__ ( \
174 " la 1,%0\n" /* align to 8 byte */ \
175 " ahi 1,7\n" \
176 " srl 1,3\n" \
177 " sll 1,3\n" \
178 " bras 2,0f\n" /* skip indirect insns */ \
179 " stctl 0,0,0(1)\n" \
180 " lctl 0,0,0(1)\n" \
181 "0: ex %1,0(2)\n" /* execute stctl */ \
182 " l 0,0(1)\n" \
183 " or 0,%2\n" /* set the bit */ \
184 " st 0,0(1)\n" \
185 "1: ex %1,4(2)" /* execute lctl */ \
186 : "=m" (dummy) : "a" (cr*17), "a" (1<<(bit)) \
187 : "cc", "0", "1", "2"); \
188 })
189
190 #define __ctl_clear_bit(cr, bit) ({ \
191 __u8 dummy[16]; \
192 __asm__ __volatile__ ( \
193 " la 1,%0\n" /* align to 8 byte */ \
194 " ahi 1,7\n" \
195 " srl 1,3\n" \
196 " sll 1,3\n" \
197 " bras 2,0f\n" /* skip indirect insns */ \
198 " stctl 0,0,0(1)\n" \
199 " lctl 0,0,0(1)\n" \
200 "0: ex %1,0(2)\n" /* execute stctl */ \
201 " l 0,0(1)\n" \
202 " nr 0,%2\n" /* set the bit */ \
203 " st 0,0(1)\n" \
204 "1: ex %1,4(2)" /* execute lctl */ \
205 : "=m" (dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
206 : "cc", "0", "1", "2"); \
207 })
208
209 #define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
210 #define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
211
212 /* For spinlocks etc */
213 #define local_irq_save(x) ((x) = __cli())
214 #define local_irq_set(x) __save_and_sti(x)
215 #define local_irq_restore(x) __restore_flags(x)
216 #define local_irq_disable() __cli()
217 #define local_irq_enable() __sti()
218
219 #ifdef CONFIG_SMP
220
221 extern void __global_cli(void);
222 extern void __global_sti(void);
223
224 extern unsigned long __global_save_flags(void);
225 extern void __global_restore_flags(unsigned long);
226 #define cli() __global_cli()
227 #define sti() __global_sti()
228 #define save_flags(x) ((x)=__global_save_flags())
229 #define restore_flags(x) __global_restore_flags(x)
230 #define save_and_cli(x) do { save_flags(x); cli(); } while(0);
231 #define save_and_sti(x) do { save_flags(x); sti(); } while(0);
232
233 extern void smp_ctl_set_bit(int cr, int bit);
234 extern void smp_ctl_clear_bit(int cr, int bit);
235 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
236 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
237
238 #else
239
240 #define cli() __cli()
241 #define sti() __sti()
242 #define save_flags(x) __save_flags(x)
243 #define restore_flags(x) __restore_flags(x)
244 #define save_and_cli(x) __save_and_cli(x)
245 #define save_and_sti(x) __save_and_sti(x)
246
247 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
248 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
249
250
251 #endif
252
253 #ifdef __KERNEL__
254 extern struct task_struct *resume(void *, void *);
255
256 extern int save_fp_regs1(s390_fp_regs *fpregs);
257 extern void save_fp_regs(s390_fp_regs *fpregs);
258 extern int restore_fp_regs1(s390_fp_regs *fpregs);
259 extern void restore_fp_regs(s390_fp_regs *fpregs);
260
261 extern void (*_machine_restart)(char *command);
262 extern void (*_machine_halt)(void);
263 extern void (*_machine_power_off)(void);
264
265 #endif
266
267 #endif
268
269
270
271