1 /*
2  * <asm/smplock.h>
3  *
4  * Default SMP lock implementation
5  */
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
8 
9 extern spinlock_t kernel_flag;
10 
11 #define kernel_locked() spin_is_locked(&kernel_flag)
12 
13 /*
14  * Release global kernel lock and global interrupt lock
15  */
16 #define release_kernel_lock(task, cpu) \
17 do { \
18 	if (task->lock_depth >= 0) \
19 		spin_unlock(&kernel_flag); \
20 	release_irqlock(cpu); \
21 	__sti(); \
22 } while (0)
23 
24 /*
25  * Re-acquire the kernel lock
26  */
27 #define reacquire_kernel_lock(task) \
28 do { \
29 	if (task->lock_depth >= 0) \
30 		spin_lock(&kernel_flag); \
31 } while (0)
32 
33 
34 /*
35  * Getting the big kernel lock.
36  *
37  * This cannot happen asynchronously,
38  * so we only need to worry about other
39  * CPU's.
40  */
lock_kernel(void)41 extern __inline__ void lock_kernel(void)
42 {
43 	if (!++current->lock_depth)
44 		spin_lock(&kernel_flag);
45 }
46 
unlock_kernel(void)47 extern __inline__ void unlock_kernel(void)
48 {
49 	if (--current->lock_depth < 0)
50 		spin_unlock(&kernel_flag);
51 }
52