1 /*
2  * Specialised local-global spinlock. Can only be declared as global variables
3  * to avoid overhead and keep things simple (and we don't want to start using
4  * these inside dynamically allocated structures).
5  *
6  * "local/global locks" (lglocks) can be used to:
7  *
8  * - Provide fast exclusive access to per-CPU data, with exclusive access to
9  *   another CPU's data allowed but possibly subject to contention, and to
10  *   provide very slow exclusive access to all per-CPU data.
11  * - Or to provide very fast and scalable read serialisation, and to provide
12  *   very slow exclusive serialisation of data (not necessarily per-CPU data).
13  *
14  * Brlocks are also implemented as a short-hand notation for the latter use
15  * case.
16  *
17  * Copyright 2009, 2010, Nick Piggin, Novell Inc.
18  */
19 #ifndef __LINUX_LGLOCK_H
20 #define __LINUX_LGLOCK_H
21 
22 #include <linux/spinlock.h>
23 #include <linux/lockdep.h>
24 #include <linux/percpu.h>
25 
26 /* can make br locks by using local lock for read side, global lock for write */
27 #define br_lock_init(name)	name##_lock_init()
28 #define br_read_lock(name)	name##_local_lock()
29 #define br_read_unlock(name)	name##_local_unlock()
30 #define br_write_lock(name)	name##_global_lock_online()
31 #define br_write_unlock(name)	name##_global_unlock_online()
32 
33 #define DECLARE_BRLOCK(name)	DECLARE_LGLOCK(name)
34 #define DEFINE_BRLOCK(name)	DEFINE_LGLOCK(name)
35 
36 
37 #define lg_lock_init(name)	name##_lock_init()
38 #define lg_local_lock(name)	name##_local_lock()
39 #define lg_local_unlock(name)	name##_local_unlock()
40 #define lg_local_lock_cpu(name, cpu)	name##_local_lock_cpu(cpu)
41 #define lg_local_unlock_cpu(name, cpu)	name##_local_unlock_cpu(cpu)
42 #define lg_global_lock(name)	name##_global_lock()
43 #define lg_global_unlock(name)	name##_global_unlock()
44 #define lg_global_lock_online(name) name##_global_lock_online()
45 #define lg_global_unlock_online(name) name##_global_unlock_online()
46 
47 #ifdef CONFIG_DEBUG_LOCK_ALLOC
48 #define LOCKDEP_INIT_MAP lockdep_init_map
49 
50 #define DEFINE_LGLOCK_LOCKDEP(name)					\
51  struct lock_class_key name##_lock_key;					\
52  struct lockdep_map name##_lock_dep_map;				\
53  EXPORT_SYMBOL(name##_lock_dep_map)
54 
55 #else
56 #define LOCKDEP_INIT_MAP(a, b, c, d)
57 
58 #define DEFINE_LGLOCK_LOCKDEP(name)
59 #endif
60 
61 
62 #define DECLARE_LGLOCK(name)						\
63  extern void name##_lock_init(void);					\
64  extern void name##_local_lock(void);					\
65  extern void name##_local_unlock(void);					\
66  extern void name##_local_lock_cpu(int cpu);				\
67  extern void name##_local_unlock_cpu(int cpu);				\
68  extern void name##_global_lock(void);					\
69  extern void name##_global_unlock(void);				\
70  extern void name##_global_lock_online(void);				\
71  extern void name##_global_unlock_online(void);				\
72 
73 #define DEFINE_LGLOCK(name)						\
74 									\
75  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
76  DEFINE_LGLOCK_LOCKDEP(name);						\
77 									\
78  void name##_lock_init(void) {						\
79 	int i;								\
80 	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
81 	for_each_possible_cpu(i) {					\
82 		arch_spinlock_t *lock;					\
83 		lock = &per_cpu(name##_lock, i);			\
84 		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\
85 	}								\
86  }									\
87  EXPORT_SYMBOL(name##_lock_init);					\
88 									\
89  void name##_local_lock(void) {						\
90 	arch_spinlock_t *lock;						\
91 	preempt_disable();						\
92 	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
93 	lock = &__get_cpu_var(name##_lock);				\
94 	arch_spin_lock(lock);						\
95  }									\
96  EXPORT_SYMBOL(name##_local_lock);					\
97 									\
98  void name##_local_unlock(void) {					\
99 	arch_spinlock_t *lock;						\
100 	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
101 	lock = &__get_cpu_var(name##_lock);				\
102 	arch_spin_unlock(lock);						\
103 	preempt_enable();						\
104  }									\
105  EXPORT_SYMBOL(name##_local_unlock);					\
106 									\
107  void name##_local_lock_cpu(int cpu) {					\
108 	arch_spinlock_t *lock;						\
109 	preempt_disable();						\
110 	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
111 	lock = &per_cpu(name##_lock, cpu);				\
112 	arch_spin_lock(lock);						\
113  }									\
114  EXPORT_SYMBOL(name##_local_lock_cpu);					\
115 									\
116  void name##_local_unlock_cpu(int cpu) {				\
117 	arch_spinlock_t *lock;						\
118 	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
119 	lock = &per_cpu(name##_lock, cpu);				\
120 	arch_spin_unlock(lock);						\
121 	preempt_enable();						\
122  }									\
123  EXPORT_SYMBOL(name##_local_unlock_cpu);				\
124 									\
125  void name##_global_lock_online(void) {					\
126 	int i;								\
127 	preempt_disable();						\
128 	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
129 	for_each_online_cpu(i) {					\
130 		arch_spinlock_t *lock;					\
131 		lock = &per_cpu(name##_lock, i);			\
132 		arch_spin_lock(lock);					\
133 	}								\
134  }									\
135  EXPORT_SYMBOL(name##_global_lock_online);				\
136 									\
137  void name##_global_unlock_online(void) {				\
138 	int i;								\
139 	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
140 	for_each_online_cpu(i) {					\
141 		arch_spinlock_t *lock;					\
142 		lock = &per_cpu(name##_lock, i);			\
143 		arch_spin_unlock(lock);					\
144 	}								\
145 	preempt_enable();						\
146  }									\
147  EXPORT_SYMBOL(name##_global_unlock_online);				\
148 									\
149  void name##_global_lock(void) {					\
150 	int i;								\
151 	preempt_disable();						\
152 	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
153 	for_each_possible_cpu(i) {					\
154 		arch_spinlock_t *lock;					\
155 		lock = &per_cpu(name##_lock, i);			\
156 		arch_spin_lock(lock);					\
157 	}								\
158  }									\
159  EXPORT_SYMBOL(name##_global_lock);					\
160 									\
161  void name##_global_unlock(void) {					\
162 	int i;								\
163 	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
164 	for_each_possible_cpu(i) {					\
165 		arch_spinlock_t *lock;					\
166 		lock = &per_cpu(name##_lock, i);			\
167 		arch_spin_unlock(lock);					\
168 	}								\
169 	preempt_enable();						\
170  }									\
171  EXPORT_SYMBOL(name##_global_unlock);
172 #endif
173