1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
3 
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
6 
7 #define __cpu_bh_enable(cpu) \
8 		do { barrier(); local_bh_count(cpu)--; } while (0)
9 #define cpu_bh_disable(cpu) \
10 		do { local_bh_count(cpu)++; barrier(); } while (0)
11 
12 #define local_bh_disable()	cpu_bh_disable(smp_processor_id())
13 #define __local_bh_enable()	__cpu_bh_enable(smp_processor_id())
14 
15 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
16 
17 /*
18  * NOTE: this assembly code assumes:
19  *
20  *    (char *)&local_bh_count - 8 == (char *)&softirq_pending
21  *
22  * If you change the offsets in irq_stat then you have to
23  * update this code as well.
24  */
25 #define local_bh_enable()						\
26 do {									\
27 	unsigned int *ptr = &local_bh_count(smp_processor_id());	\
28 									\
29 	barrier();							\
30 	if (!--*ptr)							\
31 		__asm__ __volatile__ (					\
32 			"cmpl $0, -8(%0);"				\
33 			"jnz 2f;"					\
34 			"1:;"						\
35 			LOCK_SECTION_START("") 		\
36 			"2:"	\
37 			"call do_softirq_thunk;"		\
38 			""		\
39 			"jmp 1b;"					\
40 			LOCK_SECTION_END			\
41 		: /* no output */					\
42 		: "r" (ptr)				\
43 		/* no registers clobbered */ );				\
44 } while (0)
45 
46 #endif	/* __ASM_SOFTIRQ_H */
47