1 #ifndef __ASM_SMPLOCK_H
2 #define __ASM_SMPLOCK_H
3 
4 /*
5  * <asm/smplock.h>
6  *
7  * i386 SMP lock implementation
8  */
9 #include <linux/interrupt.h>
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <asm/current.h>
13 
14 extern spinlock_cacheline_t kernel_flag_cacheline;
15 #define kernel_flag kernel_flag_cacheline.lock
16 
17 #define kernel_locked()		spin_is_locked(&kernel_flag)
18 
19 /*
20  * Release global kernel lock and global interrupt lock
21  */
22 #define release_kernel_lock(task, cpu) \
23 do { \
24 	if (task->lock_depth >= 0) \
25 		spin_unlock(&kernel_flag); \
26 	release_irqlock(cpu); \
27 	__sti(); \
28 } while (0)
29 
30 /*
31  * Re-acquire the kernel lock
32  */
33 #define reacquire_kernel_lock(task) \
34 do { \
35 	if (task->lock_depth >= 0) \
36 		spin_lock(&kernel_flag); \
37 } while (0)
38 
39 
40 /*
41  * Getting the big kernel lock.
42  *
43  * This cannot happen asynchronously,
44  * so we only need to worry about other
45  * CPU's.
46  */
lock_kernel(void)47 static __inline__ void lock_kernel(void)
48 {
49 #if 1
50 	if (!++current->lock_depth)
51 		spin_lock(&kernel_flag);
52 #else
53 	__asm__ __volatile__(
54 		"incl %1\n\t"
55 		"jne 9f"
56 		spin_lock_string
57 		"\n9:"
58 		:"=m" (__dummy_lock(&kernel_flag)),
59 		 "=m" (current->lock_depth));
60 #endif
61 }
62 
unlock_kernel(void)63 static __inline__ void unlock_kernel(void)
64 {
65 	if (current->lock_depth < 0)
66 		out_of_line_bug();
67 #if 1
68 	if (--current->lock_depth < 0)
69 		spin_unlock(&kernel_flag);
70 #else
71 	__asm__ __volatile__(
72 		"decl %1\n\t"
73 		"jns 9f\n\t"
74 		spin_unlock_string
75 		"\n9:"
76 		:"=m" (__dummy_lock(&kernel_flag)),
77 		 "=m" (current->lock_depth));
78 #endif
79 }
80 
81 #endif /* __ASM_SMPLOCK_H */
82