1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Default SMP lock implementation
7  */
8 #ifndef __ASM_SMPLOCK_H
9 #define __ASM_SMPLOCK_H
10 
11 #include <linux/interrupt.h>
12 #include <linux/spinlock.h>
13 
14 extern spinlock_t kernel_flag;
15 
16 #define kernel_locked()		spin_is_locked(&kernel_flag)
17 
18 /*
19  * Release global kernel lock and global interrupt lock
20  */
21 #define release_kernel_lock(task, cpu) \
22 do { \
23 	if (task->lock_depth >= 0) \
24 		spin_unlock(&kernel_flag); \
25 	release_irqlock(cpu); \
26 	__sti(); \
27 } while (0)
28 
29 /*
30  * Re-acquire the kernel lock
31  */
32 #define reacquire_kernel_lock(task) \
33 do { \
34 	if (task->lock_depth >= 0) \
35 		spin_lock(&kernel_flag); \
36 } while (0)
37 
38 
39 /*
40  * Getting the big kernel lock.
41  *
42  * This cannot happen asynchronously,
43  * so we only need to worry about other
44  * CPU's.
45  */
lock_kernel(void)46 static __inline__ void lock_kernel(void)
47 {
48 	if (!++current->lock_depth)
49 		spin_lock(&kernel_flag);
50 }
51 
unlock_kernel(void)52 static __inline__ void unlock_kernel(void)
53 {
54 	if (--current->lock_depth < 0)
55 		spin_unlock(&kernel_flag);
56 }
57 
58 #endif /* __ASM_SMPLOCK_H */
59