1 #ifndef __ALPHA_SYSTEM_H
2 #define __ALPHA_SYSTEM_H
3
4 #include <linux/config.h>
5 #include <asm/pal.h>
6 #include <asm/page.h>
7
8 /*
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
11 */
12
13 /*
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
17 */
18 #define BOOT_PCB 0x20000000
19 #define BOOT_ADDR 0x20000000
20 /* Remove when official MILO sources have ELF support: */
21 #define BOOT_SIZE (16*1024)
22
23 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24 #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
25 #else
26 #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
27 #endif
28
29 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30 #define SWAPPER_PGD KERNEL_START
31 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
35
36 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
37
38 /*
39 * This is setup by the secondary bootstrap loader. Because
40 * the zero page is zeroed out as soon as the vm system is
41 * initialized, we need to copy things out into a more permanent
42 * place.
43 */
44 #define PARAM ZERO_PGE
45 #define COMMAND_LINE ((char*)(PARAM + 0x0000))
46 #define COMMAND_LINE_SIZE 256
47 #define INITRD_START (*(unsigned long *) (PARAM+0x100))
48 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
49
50 #ifndef __ASSEMBLY__
51 #include <linux/kernel.h>
52
53 /*
54 * This is the logout header that should be common to all platforms
55 * (assuming they are running OSF/1 PALcode, I guess).
56 */
57 struct el_common {
58 unsigned int size; /* size in bytes of logout area */
59 int sbz1 : 30; /* should be zero */
60 int err2 : 1; /* second error */
61 int retry : 1; /* retry flag */
62 unsigned int proc_offset; /* processor-specific offset */
63 unsigned int sys_offset; /* system-specific offset */
64 unsigned int code; /* machine check code */
65 unsigned int frame_rev; /* frame revision */
66 };
67
68 /* Machine Check Frame for uncorrectable errors (Large format)
69 * --- This is used to log uncorrectable errors such as
70 * double bit ECC errors.
71 * --- These errors are detected by both processor and systems.
72 */
73 struct el_common_EV5_uncorrectable_mcheck {
74 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
75 unsigned long paltemp[24]; /* PAL TEMP REGS. */
76 unsigned long exc_addr; /* Address of excepting instruction*/
77 unsigned long exc_sum; /* Summary of arithmetic traps. */
78 unsigned long exc_mask; /* Exception mask (from exc_sum). */
79 unsigned long pal_base; /* Base address for PALcode. */
80 unsigned long isr; /* Interrupt Status Reg. */
81 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
82 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
83 <12> set TAG parity*/
84 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
85 <2> Data error in bank 0
86 <3> Data error in bank 1
87 <4> Tag error in bank 0
88 <5> Tag error in bank 1 */
89 unsigned long va; /* Effective VA of fault or miss. */
90 unsigned long mm_stat; /* Holds the reason for D-stream
91 fault or D-cache parity errors */
92 unsigned long sc_addr; /* Address that was being accessed
93 when EV5 detected Secondary cache
94 failure. */
95 unsigned long sc_stat; /* Helps determine if the error was
96 TAG/Data parity(Secondary Cache)*/
97 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
98 unsigned long ei_addr; /* Physical address of any transfer
99 that is logged in EV5 EI_STAT */
100 unsigned long fill_syndrome; /* For correcting ECC errors. */
101 unsigned long ei_stat; /* Helps identify reason of any
102 processor uncorrectable error
103 at its external interface. */
104 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
105 };
106
107 struct el_common_EV6_mcheck {
108 unsigned int FrameSize; /* Bytes, including this field */
109 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
110 unsigned int CpuOffset; /* Offset to CPU-specific info */
111 unsigned int SystemOffset; /* Offset to system-specific info */
112 unsigned int MCHK_Code;
113 unsigned int MCHK_Frame_Rev;
114 unsigned long I_STAT; /* EV6 Internal Processor Registers */
115 unsigned long DC_STAT; /* (See the 21264 Spec) */
116 unsigned long C_ADDR;
117 unsigned long DC1_SYNDROME;
118 unsigned long DC0_SYNDROME;
119 unsigned long C_STAT;
120 unsigned long C_STS;
121 unsigned long MM_STAT;
122 unsigned long EXC_ADDR;
123 unsigned long IER_CM;
124 unsigned long ISUM;
125 unsigned long RESERVED0;
126 unsigned long PAL_BASE;
127 unsigned long I_CTL;
128 unsigned long PCTX;
129 };
130
131 extern void halt(void) __attribute__((noreturn));
132 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
133
134 #define prepare_to_switch() do { } while(0)
135 #define switch_to(prev,next,last) \
136 do { \
137 unsigned long pcbb; \
138 current = (next); \
139 pcbb = virt_to_phys(¤t->thread); \
140 (last) = alpha_switch_to(pcbb, (prev)); \
141 check_mmu_context(); \
142 } while (0)
143
144 extern struct task_struct* alpha_switch_to(unsigned long, struct task_struct*);
145
146 #define mb() \
147 __asm__ __volatile__("mb": : :"memory")
148
149 #define rmb() \
150 __asm__ __volatile__("mb": : :"memory")
151
152 #define wmb() \
153 __asm__ __volatile__("wmb": : :"memory")
154
155 #ifdef CONFIG_SMP
156 #define smp_mb() mb()
157 #define smp_rmb() rmb()
158 #define smp_wmb() wmb()
159 #else
160 #define smp_mb() barrier()
161 #define smp_rmb() barrier()
162 #define smp_wmb() barrier()
163 #endif
164
165 #define set_mb(var, value) \
166 do { var = value; mb(); } while (0)
167
168 #define set_wmb(var, value) \
169 do { var = value; wmb(); } while (0)
170
171 #define imb() \
172 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
173
174 #define draina() \
175 __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
176
177 enum implver_enum {
178 IMPLVER_EV4,
179 IMPLVER_EV5,
180 IMPLVER_EV6
181 };
182
183 #ifdef CONFIG_ALPHA_GENERIC
184 #define implver() \
185 ({ unsigned long __implver; \
186 __asm__ ("implver %0" : "=r"(__implver)); \
187 (enum implver_enum) __implver; })
188 #else
189 /* Try to eliminate some dead code. */
190 #ifdef CONFIG_ALPHA_EV4
191 #define implver() IMPLVER_EV4
192 #endif
193 #ifdef CONFIG_ALPHA_EV5
194 #define implver() IMPLVER_EV5
195 #endif
196 #if defined(CONFIG_ALPHA_EV6)
197 #define implver() IMPLVER_EV6
198 #endif
199 #endif
200
201 enum amask_enum {
202 AMASK_BWX = (1UL << 0),
203 AMASK_FIX = (1UL << 1),
204 AMASK_CIX = (1UL << 2),
205 AMASK_MAX = (1UL << 8),
206 AMASK_PRECISE_TRAP = (1UL << 9),
207 };
208
209 #define amask(mask) \
210 ({ unsigned long __amask, __input = (mask); \
211 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
212 __amask; })
213
214 #define __CALL_PAL_R0(NAME, TYPE) \
215 static inline TYPE NAME(void) \
216 { \
217 register TYPE __r0 __asm__("$0"); \
218 __asm__ __volatile__( \
219 "call_pal %1 # " #NAME \
220 :"=r" (__r0) \
221 :"i" (PAL_ ## NAME) \
222 :"$1", "$16", "$22", "$23", "$24", "$25"); \
223 return __r0; \
224 }
225
226 #define __CALL_PAL_W1(NAME, TYPE0) \
227 static inline void NAME(TYPE0 arg0) \
228 { \
229 register TYPE0 __r16 __asm__("$16") = arg0; \
230 __asm__ __volatile__( \
231 "call_pal %1 # "#NAME \
232 : "=r"(__r16) \
233 : "i"(PAL_ ## NAME), "0"(__r16) \
234 : "$1", "$22", "$23", "$24", "$25"); \
235 }
236
237 #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
238 static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
239 { \
240 register TYPE0 __r16 __asm__("$16") = arg0; \
241 register TYPE1 __r17 __asm__("$17") = arg1; \
242 __asm__ __volatile__( \
243 "call_pal %2 # "#NAME \
244 : "=r"(__r16), "=r"(__r17) \
245 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
246 : "$1", "$22", "$23", "$24", "$25"); \
247 }
248
249 #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
250 static inline RTYPE NAME(TYPE0 arg0) \
251 { \
252 register RTYPE __r0 __asm__("$0"); \
253 register TYPE0 __r16 __asm__("$16") = arg0; \
254 __asm__ __volatile__( \
255 "call_pal %2 # "#NAME \
256 : "=r"(__r16), "=r"(__r0) \
257 : "i"(PAL_ ## NAME), "0"(__r16) \
258 : "$1", "$22", "$23", "$24", "$25"); \
259 return __r0; \
260 }
261
262 #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
263 static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
264 { \
265 register RTYPE __r0 __asm__("$0"); \
266 register TYPE0 __r16 __asm__("$16") = arg0; \
267 register TYPE1 __r17 __asm__("$17") = arg1; \
268 __asm__ __volatile__( \
269 "call_pal %3 # "#NAME \
270 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
271 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
272 : "$1", "$22", "$23", "$24", "$25"); \
273 return __r0; \
274 }
275
276 __CALL_PAL_W1(cflush, unsigned long);
277 __CALL_PAL_R0(rdmces, unsigned long);
278 __CALL_PAL_R0(rdps, unsigned long);
279 __CALL_PAL_R0(rdusp, unsigned long);
280 __CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
281 __CALL_PAL_R0(whami, unsigned long);
282 __CALL_PAL_W2(wrent, void*, unsigned long);
283 __CALL_PAL_W1(wripir, unsigned long);
284 __CALL_PAL_W1(wrkgp, unsigned long);
285 __CALL_PAL_W1(wrmces, unsigned long);
286 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
287 __CALL_PAL_W1(wrusp, unsigned long);
288 __CALL_PAL_W1(wrvptptr, unsigned long);
289
290 #define IPL_MIN 0
291 #define IPL_SW0 1
292 #define IPL_SW1 2
293 #define IPL_DEV0 3
294 #define IPL_DEV1 4
295 #define IPL_TIMER 5
296 #define IPL_PERF 6
297 #define IPL_POWERFAIL 6
298 #define IPL_MCHECK 7
299 #define IPL_MAX 7
300
301 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
302 #undef IPL_MIN
303 #define IPL_MIN __min_ipl
304 extern int __min_ipl;
305 #endif
306
307 #define getipl() (rdps() & 7)
308 #define setipl(ipl) ((void) swpipl(ipl))
309
310 #define __cli() do { setipl(IPL_MAX); barrier(); } while(0)
311 #define __sti() do { barrier(); setipl(IPL_MIN); } while(0)
312 #define __save_flags(flags) ((flags) = rdps())
313 #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
314 #define __save_and_sti(flags) do { barrier(); (flags) = swpipl(IPL_MIN); } while(0)
315 #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0)
316
317 #define local_irq_save(flags) __save_and_cli(flags)
318 #define local_irq_set(flags) __save_and_sti(flags)
319 #define local_irq_restore(flags) __restore_flags(flags)
320 #define local_irq_disable() __cli()
321 #define local_irq_enable() __sti()
322
323 #ifdef CONFIG_SMP
324
325 extern int global_irq_holder;
326
327 #define save_and_cli(flags) (save_flags(flags), cli())
328
329 extern void __global_cli(void);
330 extern void __global_sti(void);
331 extern unsigned long __global_save_flags(void);
332 extern void __global_restore_flags(unsigned long flags);
333
334 #define cli() __global_cli()
335 #define sti() __global_sti()
336 #define save_flags(flags) ((flags) = __global_save_flags())
337 #define restore_flags(flags) __global_restore_flags(flags)
338
339 #else /* CONFIG_SMP */
340
341 #define cli() __cli()
342 #define sti() __sti()
343 #define save_flags(flags) __save_flags(flags)
344 #define save_and_cli(flags) __save_and_cli(flags)
345 #define restore_flags(flags) __restore_flags(flags)
346
347 #endif /* CONFIG_SMP */
348
349 /*
350 * TB routines..
351 */
352 #define __tbi(nr,arg,arg1...) \
353 ({ \
354 register unsigned long __r16 __asm__("$16") = (nr); \
355 register unsigned long __r17 __asm__("$17"); arg; \
356 __asm__ __volatile__( \
357 "call_pal %3 #__tbi" \
358 :"=r" (__r16),"=r" (__r17) \
359 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
360 :"$0", "$1", "$22", "$23", "$24", "$25"); \
361 })
362
363 #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
364 #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
365 #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
366 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
367 #define tbiap() __tbi(-1, /* no second argument */)
368 #define tbia() __tbi(-2, /* no second argument */)
369
370 /*
371 * Atomic exchange.
372 * Since it can be used to implement critical sections
373 * it must clobber "memory" (also for interrupts in UP).
374 */
375
376 extern __inline__ unsigned long
__xchg_u32(volatile int * m,unsigned long val)377 __xchg_u32(volatile int *m, unsigned long val)
378 {
379 unsigned long dummy;
380
381 __asm__ __volatile__(
382 "1: ldl_l %0,%4\n"
383 " bis $31,%3,%1\n"
384 " stl_c %1,%2\n"
385 " beq %1,2f\n"
386 #ifdef CONFIG_SMP
387 " mb\n"
388 #endif
389 ".subsection 2\n"
390 "2: br 1b\n"
391 ".previous"
392 : "=&r" (val), "=&r" (dummy), "=m" (*m)
393 : "rI" (val), "m" (*m) : "memory");
394
395 return val;
396 }
397
398 extern __inline__ unsigned long
__xchg_u64(volatile long * m,unsigned long val)399 __xchg_u64(volatile long *m, unsigned long val)
400 {
401 unsigned long dummy;
402
403 __asm__ __volatile__(
404 "1: ldq_l %0,%4\n"
405 " bis $31,%3,%1\n"
406 " stq_c %1,%2\n"
407 " beq %1,2f\n"
408 #ifdef CONFIG_SMP
409 " mb\n"
410 #endif
411 ".subsection 2\n"
412 "2: br 1b\n"
413 ".previous"
414 : "=&r" (val), "=&r" (dummy), "=m" (*m)
415 : "rI" (val), "m" (*m) : "memory");
416
417 return val;
418 }
419
420 /* This function doesn't exist, so you'll get a linker error
421 if something tries to do an invalid xchg(). */
422 extern void __xchg_called_with_bad_pointer(void);
423
424 static __inline__ unsigned long
__xchg(volatile void * ptr,unsigned long x,int size)425 __xchg(volatile void *ptr, unsigned long x, int size)
426 {
427 switch (size) {
428 case 4:
429 return __xchg_u32(ptr, x);
430 case 8:
431 return __xchg_u64(ptr, x);
432 }
433 __xchg_called_with_bad_pointer();
434 return x;
435 }
436
437 #define xchg(ptr,x) \
438 ({ \
439 __typeof__(*(ptr)) _x_ = (x); \
440 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
441 })
442
443 #define tas(ptr) (xchg((ptr),1))
444
445
446 /*
447 * Atomic compare and exchange. Compare OLD with MEM, if identical,
448 * store NEW in MEM. Return the initial value in MEM. Success is
449 * indicated by comparing RETURN with OLD.
450 *
451 * The memory barrier should be placed in SMP only when we actually
452 * make the change. If we don't change anything (so if the returned
453 * prev is equal to old) then we aren't acquiring anything new and
454 * we don't need any memory barrier as far I can tell.
455 */
456
457 #define __HAVE_ARCH_CMPXCHG 1
458
459 extern __inline__ unsigned long
__cmpxchg_u32(volatile int * m,int old,int new)460 __cmpxchg_u32(volatile int *m, int old, int new)
461 {
462 unsigned long prev, cmp;
463
464 __asm__ __volatile__(
465 "1: ldl_l %0,%5\n"
466 " cmpeq %0,%3,%1\n"
467 " beq %1,2f\n"
468 " mov %4,%1\n"
469 " stl_c %1,%2\n"
470 " beq %1,3f\n"
471 #ifdef CONFIG_SMP
472 " mb\n"
473 #endif
474 "2:\n"
475 ".subsection 2\n"
476 "3: br 1b\n"
477 ".previous"
478 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
479 : "r"((long) old), "r"(new), "m"(*m) : "memory");
480
481 return prev;
482 }
483
484 extern __inline__ unsigned long
__cmpxchg_u64(volatile long * m,unsigned long old,unsigned long new)485 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
486 {
487 unsigned long prev, cmp;
488
489 __asm__ __volatile__(
490 "1: ldq_l %0,%5\n"
491 " cmpeq %0,%3,%1\n"
492 " beq %1,2f\n"
493 " mov %4,%1\n"
494 " stq_c %1,%2\n"
495 " beq %1,3f\n"
496 #ifdef CONFIG_SMP
497 " mb\n"
498 #endif
499 "2:\n"
500 ".subsection 2\n"
501 "3: br 1b\n"
502 ".previous"
503 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
504 : "r"((long) old), "r"(new), "m"(*m) : "memory");
505
506 return prev;
507 }
508
509 /* This function doesn't exist, so you'll get a linker error
510 if something tries to do an invalid cmpxchg(). */
511 extern void __cmpxchg_called_with_bad_pointer(void);
512
513 static __inline__ unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)514 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
515 {
516 switch (size) {
517 case 4:
518 return __cmpxchg_u32(ptr, old, new);
519 case 8:
520 return __cmpxchg_u64(ptr, old, new);
521 }
522 __cmpxchg_called_with_bad_pointer();
523 return old;
524 }
525
526 #define cmpxchg(ptr,o,n) \
527 ({ \
528 __typeof__(*(ptr)) _o_ = (o); \
529 __typeof__(*(ptr)) _n_ = (n); \
530 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
531 (unsigned long)_n_, sizeof(*(ptr))); \
532 })
533
534 #endif /* __ASSEMBLY__ */
535
536 #endif
537