1 #ifndef __ASM_CRIS_SYSTEM_H
2 #define __ASM_CRIS_SYSTEM_H
3
4 #include <linux/config.h>
5
6 #include <asm/segment.h>
7
8 /* the switch_to macro calls resume, an asm function in entry.S which does the actual
9 * task switching.
10 */
11
12 extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);
13 #define prepare_to_switch() do { } while(0)
14 #define switch_to(prev,next,last) last = resume(prev,next, \
15 (int)&((struct task_struct *)0)->thread)
16
17 /* read the CPU PC register */
18
rdpc(void)19 extern inline unsigned long rdpc(void)
20 {
21 unsigned long pc;
22 __asm__ volatile ("move.d $pc,%0" : "=rm" (pc));
23 return pc;
24 }
25
26 /* read the CPU version register */
27
rdvr(void)28 extern inline unsigned long rdvr(void) {
29 unsigned char vr;
30 __asm__ volatile ("move $vr,%0" : "=rm" (vr));
31 return vr;
32 }
33
34 /* read/write the user-mode stackpointer */
35
rdusp(void)36 extern inline unsigned long rdusp(void) {
37 unsigned long usp;
38 __asm__ __volatile__("move $usp,%0" : "=rm" (usp));
39 return usp;
40 }
41
42 #define wrusp(usp) \
43 __asm__ __volatile__("move %0,$usp" : /* no outputs */ : "rm" (usp))
44
45 /* read the current stackpointer */
46
rdsp(void)47 extern inline unsigned long rdsp(void) {
48 unsigned long sp;
49 __asm__ __volatile__("move.d $sp,%0" : "=rm" (sp));
50 return sp;
51 }
52
_get_base(char * addr)53 extern inline unsigned long _get_base(char * addr)
54 {
55 return 0;
56 }
57
58 #define nop() __asm__ __volatile__ ("nop");
59
60 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
61 #define tas(ptr) (xchg((ptr),1))
62
63 struct __xchg_dummy { unsigned long a[100]; };
64 #define __xg(x) ((struct __xchg_dummy *)(x))
65
66 #ifdef CONFIG_ETRAX_DEBUG_INTERRUPT
67 #if 0
68 /* use these and an oscilloscope to see the fraction of time we're running with IRQ's disabled */
69 /* it assumes the LED's are on port 0x90000000 of course. */
70 #define sti() __asm__ __volatile__ ( "ei\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0" );
71 #define cli() __asm__ __volatile__ ( "di\n\tpush $r0\n\tmove.d 0x40000,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0");
72 #define save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
73 #define restore_flags(x) __asm__ __volatile__ ("move %0,$ccr\n\tbtstq 5,%0\n\tbpl 1f\n\tnop\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0\n1:\n" : : "r" (x) : "memory");
74 #else
75
76 /* Log when interrupts are turned on and off and who did it. */
77 #define CCR_EI_MASK (1 << 5)
78 /* in debug.c */
79 extern int log_int_pos;
80 extern int log_int_size;
81 extern int log_int_enable;
82 extern int log_int_trig0_pos;
83 extern int log_int_trig1_pos;
84 extern void log_int(unsigned long pc, unsigned long prev_ccr, unsigned long next_ccr);
85
86 /* If you only want to log changes - change to 1 to a 0 below */
87 #define LOG_INT(pc, curr_ccr, next_ccr) do { \
88 if (1 || (curr_ccr ^ next_ccr) & CCR_EI_MASK) \
89 log_int((pc), curr_ccr, next_ccr); \
90 }while(0)
91
92 #define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
93
__cli(void)94 extern inline void __cli(void)
95 {
96 unsigned long pc = rdpc();
97 unsigned long curr_ccr; __save_flags(curr_ccr);
98 LOG_INT(pc, curr_ccr, 0);
99 __asm__ __volatile__ ( "di" : : :"memory");
100 }
101
102
__sti(void)103 extern inline void __sti(void)
104 {
105 unsigned long pc = rdpc();
106 unsigned long curr_ccr; __save_flags(curr_ccr);
107 LOG_INT(pc, curr_ccr, CCR_EI_MASK);
108 __asm__ __volatile__ ( "ei" : : :"memory");
109 }
110
__restore_flags(unsigned long x)111 extern inline void __restore_flags(unsigned long x)
112 {
113 unsigned long pc = rdpc();
114 unsigned long curr_ccr; __save_flags(curr_ccr);
115 LOG_INT(pc, curr_ccr, x);
116 __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
117 }
118
119 /* For spinlocks etc */
120 #define local_irq_save(x) do { __save_flags(x); __cli(); }while (0)
121 #define local_irq_restore(x) restore_flags(x)
122
123 #define local_irq_disable() cli()
124 #define local_irq_enable() sti()
125
126 #endif
127
128 #else
129 #define __cli() __asm__ __volatile__ ( "di" : : :"memory");
130 #define __sti() __asm__ __volatile__ ( "ei" : : :"memory");
131 #define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
132 #define __restore_flags(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
133
134 /* For spinlocks etc */
135 #define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
136 #define local_irq_set(x) __asm__ __volatile__ ("move $ccr,%0\n\tei" : "=rm" (x) : : "memory");
137 #define local_irq_restore(x) restore_flags(x)
138
139 #define local_irq_disable() cli()
140 #define local_irq_enable() sti()
141
142 #endif
143
144 #define cli() __cli()
145 #define sti() __sti()
146 #define save_flags(x) __save_flags(x)
147 #define restore_flags(x) __restore_flags(x)
148 #define save_and_cli(x) do { save_flags(x); cli(); } while(0)
149 #define save_and_sti(x) do { save_flags(x); sti(); } while(0)
150
__xchg(unsigned long x,void * ptr,int size)151 extern inline unsigned long __xchg(unsigned long x, void * ptr, int size)
152 {
153 /* since Etrax doesn't have any atomic xchg instructions, we need to disable
154 irq's (if enabled) and do it with move.d's */
155 #if 0
156 unsigned int flags;
157 save_flags(flags); /* save flags, including irq enable bit */
158 cli(); /* shut off irq's */
159 switch (size) {
160 case 1:
161 __asm__ __volatile__ (
162 "move.b %0,r0\n\t"
163 "move.b %1,%0\n\t"
164 "move.b r0,%1\n\t"
165 : "=r" (x)
166 : "m" (*__xg(ptr)), "r" (x)
167 : "memory","r0");
168 break;
169 case 2:
170 __asm__ __volatile__ (
171 "move.w %0,r0\n\t"
172 "move.w %1,%0\n\t"
173 "move.w r0,%1\n\t"
174 : "=r" (x)
175 : "m" (*__xg(ptr)), "r" (x)
176 : "memory","r0");
177 break;
178 case 4:
179 __asm__ __volatile__ (
180 "move.d %0,r0\n\t"
181 "move.d %1,%0\n\t"
182 "move.d r0,%1\n\t"
183 : "=r" (x)
184 : "m" (*__xg(ptr)), "r" (x)
185 : "memory","r0");
186 break;
187 }
188 restore_flags(flags); /* restore irq enable bit */
189 return x;
190 #else
191 unsigned long flags,temp;
192 save_flags(flags); /* save flags, including irq enable bit */
193 cli(); /* shut off irq's */
194 switch (size) {
195 case 1:
196 *((unsigned char *)&temp) = x;
197 x = *(unsigned char *)ptr;
198 *(unsigned char *)ptr = *((unsigned char *)&temp);
199 break;
200 case 2:
201 *((unsigned short *)&temp) = x;
202 x = *(unsigned short *)ptr;
203 *(unsigned short *)ptr = *((unsigned short *)&temp);
204 break;
205 case 4:
206 temp = x;
207 x = *(unsigned long *)ptr;
208 *(unsigned long *)ptr = temp;
209 break;
210 }
211 restore_flags(flags); /* restore irq enable bit */
212 return x;
213 #endif
214 }
215
216 #define mb() __asm__ __volatile__ ("" : : : "memory")
217 #define rmb() mb()
218 #define wmb() mb()
219 #define set_mb(var, value) do { var = value; mb(); } while (0)
220 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
221
222 #ifdef CONFIG_SMP
223 #define smp_mb() mb()
224 #define smp_rmb() rmb()
225 #define smp_wmb() wmb()
226 #else
227 #define smp_mb() barrier()
228 #define smp_rmb() barrier()
229 #define smp_wmb() barrier()
230 #endif
231
232 #define iret()
233
234 /*
235 * disable hlt during certain critical i/o operations
236 */
237 #define HAVE_DISABLE_HLT
238 void disable_hlt(void);
239 void enable_hlt(void);
240
241 #endif
242