1 #ifndef __ASM_SH64_SYSTEM_H
2 #define __ASM_SH64_SYSTEM_H
3
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/system.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16 #include <linux/config.h>
17 #include <asm/registers.h>
18
19 /*
20 * switch_to() should switch tasks to task nr n, first
21 */
22
23 typedef struct {
24 unsigned long seg;
25 } mm_segment_t;
26
27 #ifdef CONFIG_SMP
28 #error "no SMP SH64"
29 #else
30 #define prepare_to_switch() do { } while(0)
31 #ifndef CS_SAVE_ALL
32 #define SAVE_CALLER_SAVED
33 #define RESTORE_CALLER_SAVED
34 #else
35 #define SAVE_CALLER_SAVED \
36 __asm__ __volatile__("addi.l r15, -256, r15\n\t" \
37 "st.q r15, 0, r0\n\t" \
38 "st.q r15, 8, r1\n\t" \
39 "st.q r15, 16, r2\n\t" \
40 "st.q r15, 24, r3\n\t" \
41 "st.q r15, 32, r4\n\t" \
42 "st.q r15, 40, r5\n\t" \
43 "st.q r15, 48, r6\n\t" \
44 "st.q r15, 56, r7\n\t" \
45 "st.q r15, 64, r8\n\t" \
46 "st.q r15, 72, r9\n\t" \
47 "st.q r15, 80, r17\n\t" \
48 "st.q r15, 88, r19\n\t" \
49 "st.q r15, 96, r20\n\t" \
50 "st.q r15, 104, r21\n\t" \
51 "st.q r15, 112, r22\n\t" \
52 "st.q r15, 120, r23\n\t" \
53 "st.q r15, 128, r36\n\t" \
54 "st.q r15, 136, r37\n\t" \
55 "st.q r15, 144, r38\n\t" \
56 "st.q r15, 152, r39\n\t" \
57 "st.q r15, 160, r40\n\t" \
58 "st.q r15, 168, r41\n\t" \
59 "st.q r15, 176, r42\n\t" \
60 "st.q r15, 184, r43\n\t" \
61 "st.q r15, 192, r60\n\t" \
62 "st.q r15, 200, r61\n\t" \
63 "st.q r15, 208, r62\n\t" \
64 "gettr " __t0 ", r0\n\t" \
65 "st.q r15, 216, r0\n\t" \
66 "gettr " __t1 ", r1\n\t" \
67 "st.q r15, 224, r1\n\t" \
68 "gettr " __t2 ", r2\n\t" \
69 "st.q r15, 232, r2\n\t" \
70 "gettr " __t3 ", r3\n\t" \
71 "st.q r15, 240, r3\n\t" \
72 "gettr " __t4 ", r4\n\t" \
73 "st.q r15, 248, r4\n\t");
74
75 /* Note. Do not restore r42 ! */
76 #define RESTORE_CALLER_SAVED \
77 __asm__ __volatile__("ld.q r15, 216, r0\n\t" \
78 "ptabs r0, " __t0 "\n\t" \
79 "ld.q r15, 224, r1\n\t" \
80 "ptabs r1, " __t1 "\n\t" \
81 "ld.q r15, 232, r2\n\t" \
82 "ptabs r2, " __t2 "\n\t" \
83 "ld.q r15, 240, r3\n\t" \
84 "ptabs r3, " __t3 "\n\t" \
85 "ld.q r15, 248, r4\n\t" \
86 "ptabs r4, " __t4 "\n\t" \
87 "ld.q r15, 0, r0\n\t" \
88 "ld.q r15, 8, r1\n\t" \
89 "ld.q r15, 16, r2\n\t" \
90 "ld.q r15, 24, r3\n\t" \
91 "ld.q r15, 32, r4\n\t" \
92 "ld.q r15, 40, r5\n\t" \
93 "ld.q r15, 48, r6\n\t" \
94 "ld.q r15, 56, r7\n\t" \
95 "ld.q r15, 64, r8\n\t" \
96 "ld.q r15, 72, r9\n\t" \
97 "ld.q r15, 80, r17\n\t" \
98 "ld.q r15, 88, r19\n\t" \
99 "ld.q r15, 96, r20\n\t" \
100 "ld.q r15, 104, r21\n\t" \
101 "ld.q r15, 112, r22\n\t" \
102 "ld.q r15, 120, r23\n\t" \
103 "ld.q r15, 128, r36\n\t" \
104 "ld.q r15, 136, r37\n\t" \
105 "ld.q r15, 144, r38\n\t" \
106 "ld.q r15, 152, r39\n\t" \
107 "ld.q r15, 160, r40\n\t" \
108 "ld.q r15, 168, r41\n\t" \
109 "ld.q r15, 184, r43\n\t" \
110 "ld.q r15, 192, r60\n\t" \
111 "ld.q r15, 200, r61\n\t" \
112 "ld.q r15, 208, r62\n\t" \
113 "addi.l r15, 256, r15\n\t");
114 #endif
115
116 #define switch_to(prev,next,last) do { \
117 register unsigned long * r36 __asm__ ("r36"); \
118 register unsigned long * r37 __asm__ ("r37"); \
119 register unsigned long long r38 __asm__ ("r38"); \
120 register unsigned long long r39 __asm__ ("r39"); \
121 register unsigned long * r40 __asm__ ("r40"); \
122 register unsigned long * r41 __asm__ ("r41"); \
123 register struct task_struct * r42 __asm__ ("r42"); \
124 \
125 /* printk("switch_to prev %08x next %08x last %08x\n", prev, next,last); */ \
126 if (last_task_used_math != next) { \
127 struct pt_regs* regs; \
128 regs = next->thread.kregs; \
129 regs->sr |= SR_FD; \
130 } \
131 \
132 SAVE_CALLER_SAVED \
133 \
134 r36 = &prev->thread.sp; \
135 r37 = &prev->thread.pc; \
136 \
137 /* Note that we always are in kernel space */ \
138 r38 = next->thread.sp | NEFF_MASK; \
139 r39 = next->thread.pc | NEFF_MASK; \
140 \
141 r40 = (unsigned long *) prev; \
142 r41 = (unsigned long *) next; \
143 \
144 __asm__ __volatile__("addi.l r15, -304, r15\n\t" \
145 "st.q r15, 0, r10\n\t" \
146 "st.q r15, 8, r11\n\t" \
147 "st.q r15, 16, r12\n\t" \
148 "st.q r15, 24, r13\n\t" \
149 "st.q r15, 32, r14\n\t" \
150 "st.q r15, 40, r16\n\t" \
151 "st.q r15, 48, r18\n\t" \
152 "st.q r15, 56, r24\n\t" \
153 "st.q r15, 64, r25\n\t" \
154 "st.q r15, 72, r26\n\t" \
155 "st.q r15, 80, r27\n\t" \
156 "st.q r15, 88, r28\n\t" \
157 "st.q r15, 96, r29\n\t" \
158 "st.q r15, 104, r30\n\t" \
159 "st.q r15, 112, r31\n\t" \
160 "st.q r15, 120, r32\n\t" \
161 "st.q r15, 128, r33\n\t" \
162 "st.q r15, 136, r34\n\t" \
163 "st.q r15, 144, r35\n\t" \
164 "st.q r15, 152, r44\n\t" \
165 "st.q r15, 160, r45\n\t" \
166 "st.q r15, 168, r46\n\t" \
167 "st.q r15, 176, r47\n\t" \
168 "st.q r15, 184, r48\n\t" \
169 "st.q r15, 192, r49\n\t" \
170 "st.q r15, 200, r50\n\t" \
171 "st.q r15, 208, r51\n\t" \
172 "st.q r15, 216, r52\n\t" \
173 "st.q r15, 224, r53\n\t" \
174 "st.q r15, 232, r54\n\t" \
175 "st.q r15, 240, r55\n\t" \
176 "st.q r15, 248, r56\n\t" \
177 "st.q r15, 256, r57\n\t" \
178 "st.q r15, 264, r58\n\t" \
179 "st.q r15, 272, r59\n\t" \
180 "gettr " __t5 ", r55\n\t" \
181 "st.q r15, 280, r55\n\t" \
182 "gettr " __t6 ", r56\n\t" \
183 "st.q r15, 288, r56\n\t" \
184 "gettr " __t7 ", r57\n\t" \
185 "st.q r15, 296, r57\n\t" \
186 "_loada __switch_to, r18\n\t" \
187 "ptabs r18, " __t5 "\n\t" \
188 "_pta 36, " __t6 "\n\t" \
189 "gettr " __t6 ", r18\n\t" \
190 "st.l %1, 0, r15\n\t" \
191 "st.l %2, 0, r18\n\t" \
192 "or %3, r63, r15\n\t" \
193 "or %4, r63, r18\n\t" \
194 "or %5, r63, r2\n\t" \
195 "or %6, r63, r3\n\t" \
196 "blink " __t5 ", r63\n\t" \
197 "or r2, r63, %0\n\t" \
198 "ld.q r15, 280, r55\n\t" \
199 "ptabs r55, " __t5 "\n\t" \
200 "ld.q r15, 288, r56\n\t" \
201 "ptabs r56, " __t6 "\n\t" \
202 "ld.q r15, 296, r57\n\t" \
203 "ptabs r57, " __t7 "\n\t" \
204 "ld.q r15, 0, r10\n\t" \
205 "ld.q r15, 8, r11\n\t" \
206 "ld.q r15, 16, r12\n\t" \
207 "ld.q r15, 24, r13\n\t" \
208 "ld.q r15, 32, r14\n\t" \
209 "ld.q r15, 40, r16\n\t" \
210 "ld.q r15, 48, r18\n\t" \
211 "ld.q r15, 56, r24\n\t" \
212 "ld.q r15, 64, r25\n\t" \
213 "ld.q r15, 72, r26\n\t" \
214 "ld.q r15, 80, r27\n\t" \
215 "ld.q r15, 88, r28\n\t" \
216 "ld.q r15, 96, r29\n\t" \
217 "ld.q r15, 104, r30\n\t" \
218 "ld.q r15, 112, r31\n\t" \
219 "ld.q r15, 120, r32\n\t" \
220 "ld.q r15, 128, r33\n\t" \
221 "ld.q r15, 136, r34\n\t" \
222 "ld.q r15, 144, r35\n\t" \
223 "ld.q r15, 152, r44\n\t" \
224 "ld.q r15, 160, r45\n\t" \
225 "ld.q r15, 168, r46\n\t" \
226 "ld.q r15, 176, r47\n\t" \
227 "ld.q r15, 184, r48\n\t" \
228 "ld.q r15, 192, r49\n\t" \
229 "ld.q r15, 200, r50\n\t" \
230 "ld.q r15, 208, r51\n\t" \
231 "ld.q r15, 216, r52\n\t" \
232 "ld.q r15, 224, r53\n\t" \
233 "ld.q r15, 232, r54\n\t" \
234 "ld.q r15, 240, r55\n\t" \
235 "ld.q r15, 248, r56\n\t" \
236 "ld.q r15, 256, r57\n\t" \
237 "ld.q r15, 264, r58\n\t" \
238 "ld.q r15, 272, r59\n\t" \
239 "addi.l r15, 304, r15\n\t" \
240 : "=r" (r42) \
241 : "r" (r36), "r" (r37), "r" (r38), \
242 "r" (r39), "r" (r40), "r" (r41)); \
243 RESTORE_CALLER_SAVED \
244 last = r42; \
245 } while (0)
246 #endif
247
248 #define nop() __asm__ __volatile__ ("nop")
249
250 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
251
252 #define tas(ptr) (xchg((ptr), 1))
253
254 extern void __xchg_called_with_bad_pointer(void);
255
256 #define mb() __asm__ __volatile__ ("synco": : :"memory")
257 #define rmb() mb()
258 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
259 #define set_rmb(var, value) do { xchg(&var, value); } while (0)
260 #define set_mb(var, value) set_rmb(var, value)
261 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
262
263 /* Interrupt Control */
264 #ifndef HARD_CLI
265 #define SR_MASK_L 0x000000f0L
266 #define SR_MASK_LL 0x00000000000000f0LL
267 #else
268 #define SR_MASK_L 0x10000000L
269 #define SR_MASK_LL 0x0000000010000000LL
270 #endif
271
__sti(void)272 extern __inline__ void __sti(void)
273 {
274 /* cli/sti based on SR.BL */
275 unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
276
277 __asm__ __volatile__("getcon " __c0 ", %0\n\t"
278 "and %0, %1, %0\n\t"
279 "putcon %0, " __c0 "\n\t"
280 : "=&r" (__dummy0)
281 : "r" (__dummy1));
282 }
283
__cli(void)284 extern __inline__ void __cli(void)
285 {
286 /* cli/sti based on SR.BL */
287 unsigned long long __dummy0, __dummy1=SR_MASK_LL;
288 __asm__ __volatile__("getcon " __c0 ", %0\n\t"
289 "or %0, %1, %0\n\t"
290 "putcon %0, " __c0 "\n\t"
291 : "=&r" (__dummy0)
292 : "r" (__dummy1));
293 }
294
295 #define __save_flags(x) \
296 (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
297 __asm__ __volatile__( \
298 "getcon " __c0 ", %0\n\t" \
299 "and %0, %1, %0" \
300 : "=&r" (x) \
301 : "r" (__dummy));}))
302
303 #define __save_and_cli(x) \
304 (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
305 __asm__ __volatile__( \
306 "getcon " __c0 ", %1\n\t" \
307 "or %1, r63, %0\n\t" \
308 "or %1, %2, %1\n\t" \
309 "putcon %1, " __c0 "\n\t" \
310 "and %0, %2, %0" \
311 : "=&r" (x), "=&r" (__d1) \
312 : "r" (__d2));}));
313
314 #define __restore_flags(x) do { \
315 if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
316 __sti(); /* yes...re-enable */ \
317 } while (0)
318
319 #define __save_and_sti(x) do { __save_flags(x); __sti(); } while (0)
320
321 /* For spinlocks etc */
322 #define local_irq_save(x) __save_and_cli(x)
323 #define local_irq_set(x) __save_and_sti(x)
324 #define local_irq_restore(x) __restore_flags(x)
325 #define local_irq_disable() __cli()
326 #define local_irq_enable() __sti()
327
328 #ifdef CONFIG_SMP
329
330 extern void __global_cli(void);
331 extern void __global_sti(void);
332 extern unsigned long __global_save_flags(void);
333 extern void __global_restore_flags(unsigned long);
334 #define cli() __global_cli()
335 #define sti() __global_sti()
336 #define save_flags(x) ((x)=__global_save_flags())
337 #define restore_flags(x) __global_restore_flags(x)
338 #define save_and_sti(x) do { save_flags(x); sti(x); } while (0)
339
340 #else
341
342 #define cli() __cli()
343 #define sti() __sti()
344 #define save_flags(x) __save_flags(x)
345 #define save_and_cli(x) __save_and_cli(x)
346 #define restore_flags(x) __restore_flags(x)
347 #define save_and_sti(x) __save_and_sti(x)
348
349 #endif
350
xchg_u32(volatile int * m,unsigned long val)351 extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
352 {
353 unsigned long flags, retval;
354
355 save_and_cli(flags);
356 retval = *m;
357 *m = val;
358 restore_flags(flags);
359 return retval;
360 }
361
xchg_u8(volatile unsigned char * m,unsigned long val)362 extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
363 {
364 unsigned long flags, retval;
365
366 save_and_cli(flags);
367 retval = *m;
368 *m = val & 0xff;
369 restore_flags(flags);
370 return retval;
371 }
372
__xchg(unsigned long x,volatile void * ptr,int size)373 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
374 {
375 switch (size) {
376 case 4:
377 return xchg_u32(ptr, x);
378 break;
379 case 1:
380 return xchg_u8(ptr, x);
381 break;
382 }
383 __xchg_called_with_bad_pointer();
384 return x;
385 }
386
387 /* XXX
388 * disable hlt during certain critical i/o operations
389 */
390 #define HAVE_DISABLE_HLT
391 void disable_hlt(void);
392 void enable_hlt(void);
393
394
395 #define smp_mb() barrier()
396 #define smp_rmb() barrier()
397 #define smp_wmb() barrier()
398
399 extern void print_seg(char *file,int line);
400
401 #define PLS() print_seg(__FILE__,__LINE__)
402
403 #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
404
405 #endif /* __ASM_SH64_SYSTEM_H */
406