1 /*
2  *  arch/s390/kernel/irq.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *
8  *  Derived from "arch/i386/kernel/irq.c"
9  *    Copyright (C) 1992, 1999 Linus Torvalds, Ingo Molnar
10  *
11  *  S/390 I/O interrupt processing and I/O request processing is
12  *   implemented in arch/s390/kernel/s390io.c
13  */
14 #include <linux/module.h>
15 #include <linux/config.h>
16 #include <linux/ptrace.h>
17 #include <linux/errno.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/timex.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/random.h>
27 #include <linux/smp.h>
28 #include <linux/threads.h>
29 #include <linux/smp_lock.h>
30 #include <linux/init.h>
31 
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/bitops.h>
36 #include <asm/smp.h>
37 #include <asm/pgtable.h>
38 #include <asm/delay.h>
39 #include <asm/lowcore.h>
40 
41 void          s390_init_IRQ   ( void );
42 void          s390_free_irq   ( unsigned int irq, void *dev_id);
43 int           s390_request_irq( unsigned int irq,
44                      void           (*handler)(int, void *, struct pt_regs *),
45                      unsigned long  irqflags,
46                      const char    *devname,
47                      void          *dev_id);
48 
49 #if 0
50 /*
51  * The following vectors are part of the Linux architecture, there
52  * is no hardware IRQ pin equivalent for them, they are triggered
53  * through the ICC by us (IPIs), via smp_message_pass():
54  */
55 BUILD_SMP_INTERRUPT(reschedule_interrupt)
56 BUILD_SMP_INTERRUPT(invalidate_interrupt)
57 BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
58 BUILD_SMP_INTERRUPT(mtrr_interrupt)
59 BUILD_SMP_INTERRUPT(spurious_interrupt)
60 #endif
61 
62 /*
63  * Global interrupt locks for SMP. Allow interrupts to come in on any
64  * CPU, yet make cli/sti act globally to protect critical regions..
65  */
66 #ifdef CONFIG_SMP
67 atomic_t global_irq_holder = ATOMIC_INIT(NO_PROC_ID);
68 atomic_t global_irq_lock = ATOMIC_INIT(0);
69 atomic_t global_irq_count = ATOMIC_INIT(0);
70 atomic_t global_bh_count;
71 
72 /*
73  * "global_cli()" is a special case, in that it can hold the
74  * interrupts disabled for a longish time, and also because
75  * we may be doing TLB invalidates when holding the global
76  * IRQ lock for historical reasons. Thus we may need to check
77  * SMP invalidate events specially by hand here (but not in
78  * any normal spinlocks)
79  *
80  * Thankfully we don't need this as we can deliver flush tlbs with
81  * interrupts disabled DJB :-)
82  */
83 #define check_smp_invalidate(cpu)
84 
show(char * str)85 static void show(char * str)
86 {
87 	int i;
88 	unsigned long *stack;
89 	int cpu = smp_processor_id();
90 
91 	printk("\n%s, CPU %d:\n", str, cpu);
92 	printk("irq:  %d [%d]\n",
93 	       atomic_read(&global_irq_count),local_irq_count(smp_processor_id()));
94 	printk("bh:   %d [%d]\n",
95 	       atomic_read(&global_bh_count),local_bh_count(smp_processor_id()));
96 	stack = (unsigned long *) &str;
97 	for (i = 40; i ; i--) {
98 		unsigned long x = *++stack;
99 		if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
100 			printk("<[%08lx]> ", x);
101 		}
102 	}
103 }
104 
105 #define MAXCOUNT 100000000
106 
wait_on_bh(void)107 static inline void wait_on_bh(void)
108 {
109 	int count = MAXCOUNT;
110 	do {
111 		if (!--count) {
112 			show("wait_on_bh");
113 			count = ~0;
114 		}
115 		/* nothing .. wait for the other bh's to go away */
116 	} while (atomic_read(&global_bh_count) != 0);
117 }
118 
wait_on_irq(int cpu)119 static inline void wait_on_irq(int cpu)
120 {
121 	int count = MAXCOUNT;
122 
123 	for (;;) {
124 
125 		/*
126 		 * Wait until all interrupts are gone. Wait
127 		 * for bottom half handlers unless we're
128 		 * already executing in one..
129 		 */
130 		if (!atomic_read(&global_irq_count)) {
131 			if (local_bh_count(cpu)||
132 			    !atomic_read(&global_bh_count))
133 				break;
134 		}
135 
136 		/* Duh, we have to loop. Release the lock to avoid deadlocks */
137                 atomic_set(&global_irq_lock, 0);
138 
139 		for (;;) {
140 			if (!--count) {
141 				show("wait_on_irq");
142 				count = ~0;
143 			}
144 			__sti();
145 			SYNC_OTHER_CORES(cpu);
146 			__cli();
147 			check_smp_invalidate(cpu);
148 			if (atomic_read(&global_irq_count))
149 				continue;
150 			if (atomic_read(&global_irq_lock))
151 				continue;
152 			if (!local_bh_count(cpu)
153 			    && atomic_read(&global_bh_count))
154 				continue;
155                         if (!atomic_compare_and_swap(0, 1, &global_irq_lock))
156 				break;
157 		}
158 	}
159 }
160 
161 /*
162  * This is called when we want to synchronize with
163  * bottom half handlers. We need to wait until
164  * no other CPU is executing any bottom half handler.
165  *
166  * Don't wait if we're already running in an interrupt
167  * context or are inside a bh handler.
168  */
synchronize_bh(void)169 void synchronize_bh(void)
170 {
171 	if (atomic_read(&global_bh_count) && !in_interrupt())
172 		wait_on_bh();
173 }
174 
175 /*
176  * This is called when we want to synchronize with
177  * interrupts. We may for example tell a device to
178  * stop sending interrupts: but to make sure there
179  * are no interrupts that are executing on another
180  * CPU we need to call this function.
181  */
synchronize_irq(void)182 void synchronize_irq(void)
183 {
184 	if (atomic_read(&global_irq_count)) {
185 		/* Stupid approach */
186 		cli();
187 		sti();
188 	}
189 }
190 
get_irqlock(int cpu)191 static inline void get_irqlock(int cpu)
192 {
193 	if (atomic_compare_and_swap(0,1,&global_irq_lock) != 0) {
194 		/* do we already hold the lock? */
195 		if ( cpu == atomic_read(&global_irq_holder))
196 			return;
197 		/* Uhhuh.. Somebody else got it. Wait.. */
198 		do {
199 			check_smp_invalidate(cpu);
200 		} while (atomic_compare_and_swap(0,1,&global_irq_lock) != 0);
201 	}
202 	/*
203 	 * We also to make sure that nobody else is running
204 	 * in an interrupt context.
205 	 */
206 	wait_on_irq(cpu);
207 
208 	/*
209 	 * Ok, finally..
210 	 */
211 	atomic_set(&global_irq_holder,cpu);
212 }
213 
214 #define EFLAGS_I_SHIFT 57
215 
216 /*
217  * A global "cli()" while in an interrupt context
218  * turns into just a local cli(). Interrupts
219  * should use spinlocks for the (very unlikely)
220  * case that they ever want to protect against
221  * each other.
222  *
223  * If we already have local interrupts disabled,
224  * this will not turn a local disable into a
225  * global one (problems with spinlocks: this makes
226  * save_flags+cli+sti usable inside a spinlock).
227  */
__global_cli(void)228 void __global_cli(void)
229 {
230 	unsigned long flags;
231 
232 	__save_flags(flags);
233 	if (flags & (1UL << EFLAGS_I_SHIFT)) {
234 		int cpu = smp_processor_id();
235 		__cli();
236 		if (!in_irq())
237 			get_irqlock(cpu);
238 	}
239 }
240 
__global_sti(void)241 void __global_sti(void)
242 {
243 
244 	if (!in_irq())
245 		release_irqlock(smp_processor_id());
246 	__sti();
247 }
248 
249 /*
250  * SMP flags value to restore to:
251  * 0 - global cli
252  * 1 - global sti
253  * 2 - local cli
254  * 3 - local sti
255  */
__global_save_flags(void)256 unsigned long __global_save_flags(void)
257 {
258 	int retval;
259 	int local_enabled;
260 	unsigned long flags;
261 
262 	__save_flags(flags);
263 	local_enabled = (flags >> EFLAGS_I_SHIFT) & 1;
264 	/* default to local */
265 	retval = 2 + local_enabled;
266 
267 	/* check for global flags if we're not in an interrupt */
268 	if (!in_irq())
269 	{
270 		if (local_enabled)
271 			retval = 1;
272 		if (atomic_read(&global_irq_holder)== smp_processor_id())
273 			retval = 0;
274 	}
275 	return retval;
276 }
277 
__global_restore_flags(unsigned long flags)278 void __global_restore_flags(unsigned long flags)
279 {
280 	switch (flags) {
281 	case 0:
282 		__global_cli();
283 		break;
284 	case 1:
285 		__global_sti();
286 		break;
287 	case 2:
288 		__cli();
289 		break;
290 	case 3:
291 		__sti();
292 		break;
293 	default:
294 		printk("global_restore_flags: %08lx (%08lx)\n",
295 		       flags, (&flags)[-1]);
296 	}
297 }
298 
299 #endif
300 
301 
init_IRQ(void)302 void __init init_IRQ(void)
303 {
304    s390_init_IRQ();
305 }
306 
307 
free_irq(unsigned int irq,void * dev_id)308 void free_irq(unsigned int irq, void *dev_id)
309 {
310    s390_free_irq( irq, dev_id);
311 }
312 
313 
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)314 int request_irq( unsigned int   irq,
315                  void           (*handler)(int, void *, struct pt_regs *),
316                  unsigned long  irqflags,
317                  const char    *devname,
318                  void          *dev_id)
319 {
320    return( s390_request_irq( irq, handler, irqflags, devname, dev_id ) );
321 
322 }
323 
init_irq_proc(void)324 void init_irq_proc(void)
325 {
326         /* For now, nothing... */
327 }
328 
329 #ifdef CONFIG_SMP
330 EXPORT_SYMBOL(__global_cli);
331 EXPORT_SYMBOL(__global_sti);
332 EXPORT_SYMBOL(__global_save_flags);
333 EXPORT_SYMBOL(__global_restore_flags);
334 EXPORT_SYMBOL(global_irq_holder);
335 EXPORT_SYMBOL(global_irq_lock);
336 EXPORT_SYMBOL(global_irq_count);
337 EXPORT_SYMBOL(global_bh_count);
338 #endif
339 
340 EXPORT_SYMBOL(global_bh_lock);
341