1 /*
2  * <asm/smplock.h>
3  *
4  * Default SMP lock implementation
5  */
6 #ifndef __ASM_SMPLOCK_H
7 #define __ASM_SMPLOCK_H
8 
9 #include <linux/sched.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
12 
13 extern spinlock_t kernel_flag;
14 
15 #define kernel_locked()			spin_is_locked(&kernel_flag)
16 
17 /*
18  * Release global kernel lock and global interrupt lock
19  */
release_kernel_lock(struct task_struct * task,int cpu)20 static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
21 {
22 	if (task->lock_depth >= 0)
23 		spin_unlock(&kernel_flag);
24 	release_irqlock(cpu);
25 	__sti();
26 }
27 
28 /*
29  * Re-acquire the kernel lock
30  */
reacquire_kernel_lock(struct task_struct * task)31 static __inline__ void reacquire_kernel_lock(struct task_struct *task)
32 {
33 	if (task->lock_depth >= 0)
34 		spin_lock(&kernel_flag);
35 }
36 
37 /*
38  * Getting the big kernel lock.
39  *
40  * This cannot happen asynchronously,
41  * so we only need to worry about other
42  * CPU's.
43  */
lock_kernel(void)44 static __inline__ void lock_kernel(void)
45 {
46 	if (!++current->lock_depth)
47 		spin_lock(&kernel_flag);
48 }
49 
unlock_kernel(void)50 static __inline__ void unlock_kernel(void)
51 {
52 	if (--current->lock_depth < 0)
53 		spin_unlock(&kernel_flag);
54 }
55 
56 #endif /* __ASM_SMPLOCK_H */
57