1 /*
2  * <asm/smplock.h>
3  *
4  * Default SMP lock implementation
5  */
6 #ifdef __KERNEL__
7 #ifndef __ASM_SMPLOCK_H__
8 #define __ASM_SMPLOCK_H__
9 
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
12 
13 extern spinlock_t kernel_flag;
14 
15 #define kernel_locked()		spin_is_locked(&kernel_flag)
16 
17 /*
18  * Release global kernel lock and global interrupt lock
19  */
20 #define release_kernel_lock(task, cpu) \
21 do { \
22 	if (task->lock_depth >= 0) \
23 		spin_unlock(&kernel_flag); \
24 	release_irqlock(cpu); \
25 	__sti(); \
26 } while (0)
27 
28 /*
29  * Re-acquire the kernel lock
30  */
31 #define reacquire_kernel_lock(task) \
32 do { \
33 	if (task->lock_depth >= 0) \
34 		spin_lock(&kernel_flag); \
35 } while (0)
36 
37 
38 /*
39  * Getting the big kernel lock.
40  *
41  * This cannot happen asynchronously,
42  * so we only need to worry about other
43  * CPU's.
44  */
lock_kernel(void)45 static __inline__ void lock_kernel(void)
46 {
47 	if (!++current->lock_depth)
48 		spin_lock(&kernel_flag);
49 }
50 
unlock_kernel(void)51 static __inline__ void unlock_kernel(void)
52 {
53 	if (--current->lock_depth < 0)
54 		spin_unlock(&kernel_flag);
55 }
56 #endif /* __ASM_SMPLOCK_H__ */
57 #endif /* __KERNEL__ */
58