1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5 * Simple spin lock operations.
6 *
7 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>, IBM
10 * Rework to support virtual processors
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 #include <asm/memory.h>
19 #include <asm/hvcall.h>
20
21 /*
22 * The following define is being used to select basic or shared processor
23 * locking when running on an RPA platform. As we do more performance
24 * tuning, I would expect this selection mechanism to change. Dave E.
25 */
26 /* #define SPLPAR_LOCKS */
27
28 typedef struct {
29 volatile unsigned long lock;
30 } spinlock_t;
31
32 #ifdef __KERNEL__
33 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
34
35 #define spin_is_locked(x) ((x)->lock != 0)
36
spin_trylock(spinlock_t * lock)37 static __inline__ int spin_trylock(spinlock_t *lock)
38 {
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42 "1: ldarx %0,0,%1 # spin_trylock\n\
43 cmpdi 0,%0,0\n\
44 li %0,0\n\
45 bne- 2f\n\
46 li %0,1\n\
47 stdcx. 13,0,%1\n\
48 bne- 1b\n\
49 isync\n\
50 2:" : "=&r"(tmp)
51 : "r"(&lock->lock)
52 : "cr0", "memory");
53
54 return tmp;
55 }
56
57 /*
58 * Spin lock states:
59 * 0 : Unlocked
60 * Negative : Locked. Value is paca pointer (0xc...0) of holder
61 */
62 #ifdef CONFIG_PPC_ISERIES
spin_lock(spinlock_t * lock)63 static __inline__ void spin_lock(spinlock_t *lock)
64 {
65 unsigned long tmp, tmp2;
66
67 __asm__ __volatile__(
68 "b 2f # spin_lock\n\
69 1:"
70 HMT_LOW
71 " ldx %0,0,%2 # load the lock value\n\
72 cmpdi 0,%0,0 # if not locked, try to acquire\n\
73 beq- 2f\n\
74 lwz 5,0x280(%0) # load yield counter\n\
75 andi. %1,5,1 # if even then spin\n\
76 beq 1b\n\
77 lwsync # if odd, give up cycles\n\
78 ldx %1,0,%2 # reverify the lock holder\n\
79 cmpd %0,%1\n\
80 bne 1b # new holder so restart\n\
81 li 3,0x25 # yield hcall 0x8-12 \n\
82 rotrdi 3,3,1 # put the bits in the right spot\n\
83 lhz 4,0x18(%0) # processor number\n\
84 sldi 4,4,32 # move into top half of word\n\
85 or 5,5,4 # r5 has yield cnt - or it in\n\
86 li 4,2 # yield to processor\n\
87 li 0,-1 # indicate an hcall\n\
88 sc # do the hcall \n\
89 b 1b\n\
90 2: \n"
91 HMT_MEDIUM
92 " ldarx %0,0,%2\n\
93 cmpdi 0,%0,0\n\
94 bne- 1b\n\
95 stdcx. 13,0,%2\n\
96 bne- 2b\n\
97 isync"
98 : "=&r"(tmp), "=&r"(tmp2)
99 : "r"(&lock->lock)
100 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
101 "xer", "memory");
102 }
103 #else
104 #ifdef SPLPAR_LOCKS
spin_lock(spinlock_t * lock)105 static __inline__ void spin_lock(spinlock_t *lock)
106 {
107 unsigned long tmp, tmp2;
108
109 __asm__ __volatile__(
110 "b 2f # spin_lock\n\
111 1:"
112 HMT_LOW
113 " ldx %0,0,%2 # load the lock value\n\
114 cmpdi 0,%0,0 # if not locked, try to acquire\n\
115 beq- 2f\n\
116 lwz 5,0x280(%0) # load dispatch counter\n\
117 andi. %1,5,1 # if even then spin\n\
118 beq 1b\n\
119 lwsync # if odd, give up cycles\n\
120 ldx %1,0,%2 # reverify the lock holder\n\
121 cmpd %0,%1\n\
122 bne 1b # new holder so restart\n\
123 li 3,0xE4 # give up the cycles H_CONFER\n\
124 lhz 4,0x18(%0) # processor number\n\
125 # r5 has dispatch cnt already\n"
126 HVSC
127 " b 1b\n\
128 2: \n"
129 HMT_MEDIUM
130 " ldarx %0,0,%2\n\
131 cmpdi 0,%0,0\n\
132 bne- 1b\n\
133 stdcx. 13,0,%2\n\
134 bne- 2b\n\
135 isync"
136 : "=&r"(tmp), "=&r"(tmp2)
137 : "r"(&lock->lock)
138 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
139 }
140 #else
spin_lock(spinlock_t * lock)141 static __inline__ void spin_lock(spinlock_t *lock)
142 {
143 unsigned long tmp;
144
145 __asm__ __volatile__(
146 "b 2f # spin_lock\n\
147 1:"
148 HMT_LOW
149 " ldx %0,0,%1 # load the lock value\n\
150 cmpdi 0,%0,0 # if not locked, try to acquire\n\
151 bne+ 1b\n\
152 2: \n"
153 HMT_MEDIUM
154 " ldarx %0,0,%1\n\
155 cmpdi 0,%0,0\n\
156 bne- 1b\n\
157 stdcx. 13,0,%1\n\
158 bne- 2b\n\
159 isync"
160 : "=&r"(tmp)
161 : "r"(&lock->lock)
162 : "cr0", "memory");
163 }
164 #endif
165 #endif
166
spin_unlock(spinlock_t * lock)167 static __inline__ void spin_unlock(spinlock_t *lock)
168 {
169 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
170 lock->lock = 0;
171 }
172
173 /*
174 * Read-write spinlocks, allowing multiple readers
175 * but only one writer.
176 *
177 * NOTE! it is quite common to have readers in interrupts
178 * but no interrupt writers. For those circumstances we
179 * can "mix" irq-safe locks - any writer needs to get a
180 * irq-safe write-lock, but readers can get non-irqsafe
181 * read-locks.
182 *
183 * Write lock states:
184 * 0 : Unlocked
185 * Positive : Reader count
186 * Negative : Writer locked. Value is paca pointer (0xc...0) of holder
187 *
188 * If lock is not held, try to acquire.
189 * If lock is held by a writer, yield cycles to the holder.
190 * If lock is help by reader(s), spin.
191 */
192 typedef struct {
193 volatile signed long lock;
194 } rwlock_t;
195
196 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
197
read_trylock(rwlock_t * rw)198 static __inline__ int read_trylock(rwlock_t *rw)
199 {
200 unsigned long tmp;
201 unsigned int ret;
202
203 __asm__ __volatile__(
204 "1: ldarx %0,0,%2 # read_trylock\n\
205 li %1,0\n\
206 addic. %0,%0,1\n\
207 ble- 2f\n\
208 stdcx. %0,0,%2\n\
209 bne- 1b\n\
210 li %1,1\n\
211 isync\n\
212 2:" : "=&r"(tmp), "=&r"(ret)
213 : "r"(&rw->lock)
214 : "cr0", "memory");
215
216 return ret;
217 }
218
219 #ifdef CONFIG_PPC_ISERIES
read_lock(rwlock_t * rw)220 static __inline__ void read_lock(rwlock_t *rw)
221 {
222 unsigned long tmp, tmp2;
223
224 __asm__ __volatile__(
225 "b 2f # read_lock\n\
226 1:"
227 HMT_LOW
228 " ldx %0,0,%2\n\
229 cmpdi 0,%0,0\n\
230 bge- 2f\n\
231 lwz 5,0x280(%0) # load yield counter\n\
232 andi. %1,5,1 # if even then spin\n\
233 beq 1b\n\
234 lwsync # if odd, give up cycles\n\
235 ldx %1,0,%2 # reverify the lock holder\n\
236 cmpd %0,%1\n\
237 bne 1b # new holder so restart\n\
238 li 3,0x25 # yield hcall 0x8-12 \n\
239 rotrdi 3,3,1 # put the bits in the right spot\n\
240 lhz 4,0x18(%0) # processor number\n\
241 sldi 4,4,32 # move into top half of word\n\
242 or 5,5,4 # r5 has yield cnt - or it in\n\
243 li 4,2 # yield to processor\n\
244 li 0,-1 # indicate an hcall\n\
245 sc # do the hcall \n\
246 2: \n"
247 HMT_MEDIUM
248 " ldarx %0,0,%2\n\
249 addic. %0,%0,1\n\
250 ble- 1b\n\
251 stdcx. %0,0,%2\n\
252 bne- 2b\n\
253 isync"
254 : "=&r"(tmp), "=&r"(tmp2)
255 : "r"(&rw->lock)
256 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
257 "xer", "memory");
258 }
259 #else
260 #ifdef SPLPAR_LOCKS
read_lock(rwlock_t * rw)261 static __inline__ void read_lock(rwlock_t *rw)
262 {
263 unsigned long tmp, tmp2;
264
265 __asm__ __volatile__(
266 "b 2f # read_lock\n\
267 1:"
268 HMT_LOW
269 " ldx %0,0,%2\n\
270 cmpdi 0,%0,0\n\
271 bge- 2f\n\
272 lwz 5,0x280(%0) # load dispatch counter\n\
273 andi. %1,5,1 # if even then spin\n\
274 beq 1b\n\
275 lwsync # if odd, give up cycles\n\
276 ldx %1,0,%2 # reverify the lock holder\n\
277 cmpd %0,%1\n\
278 bne 1b # new holder so restart\n\
279 li 3,0xE4 # give up the cycles H_CONFER\n\
280 lhz 4,0x18(%0) # processor number\n\
281 # r5 has dispatch cnt already\n"
282 HVSC
283 "2: \n"
284 HMT_MEDIUM
285 " ldarx %0,0,%2\n\
286 addic. %0,%0,1\n\
287 ble- 1b\n\
288 stdcx. %0,0,%2\n\
289 bne- 2b\n\
290 isync"
291 : "=&r"(tmp), "=&r"(tmp2)
292 : "r"(&rw->lock)
293 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
294 }
295 #else
read_lock(rwlock_t * rw)296 static __inline__ void read_lock(rwlock_t *rw)
297 {
298 unsigned long tmp;
299
300 __asm__ __volatile__(
301 "b 2f # read_lock\n\
302 1:"
303 HMT_LOW
304 " ldx %0,0,%1\n\
305 cmpdi 0,%0,0\n\
306 blt+ 1b\n\
307 2: \n"
308 HMT_MEDIUM
309 " ldarx %0,0,%1\n\
310 addic. %0,%0,1\n\
311 ble- 1b\n\
312 stdcx. %0,0,%1\n\
313 bne- 2b\n\
314 isync"
315 : "=&r"(tmp)
316 : "r"(&rw->lock)
317 : "cr0", "memory");
318 }
319 #endif
320 #endif
321
read_unlock(rwlock_t * rw)322 static __inline__ void read_unlock(rwlock_t *rw)
323 {
324 unsigned long tmp;
325
326 __asm__ __volatile__(
327 "eieio # read_unlock\n\
328 1: ldarx %0,0,%1\n\
329 addic %0,%0,-1\n\
330 stdcx. %0,0,%1\n\
331 bne- 1b"
332 : "=&r"(tmp)
333 : "r"(&rw->lock)
334 : "cr0", "memory");
335 }
336
write_trylock(rwlock_t * rw)337 static __inline__ int write_trylock(rwlock_t *rw)
338 {
339 unsigned long tmp;
340 unsigned long ret;
341
342 __asm__ __volatile__(
343 "1: ldarx %0,0,%2 # write_trylock\n\
344 cmpdi 0,%0,0\n\
345 li %1,0\n\
346 bne- 2f\n\
347 stdcx. 13,0,%2\n\
348 bne- 1b\n\
349 li %1,1\n\
350 isync\n\
351 2:" : "=&r"(tmp), "=&r"(ret)
352 : "r"(&rw->lock)
353 : "cr0", "memory");
354
355 return ret;
356 }
357
358 #ifdef CONFIG_PPC_ISERIES
write_lock(rwlock_t * rw)359 static __inline__ void write_lock(rwlock_t *rw)
360 {
361 unsigned long tmp, tmp2;
362
363 __asm__ __volatile__(
364 "b 2f # spin_lock\n\
365 1:"
366 HMT_LOW
367 " ldx %0,0,%2 # load the lock value\n\
368 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
369 beq- 2f\n\
370 bgt 1b # negative(0xc..)->cycles to holder\n"
371 "3: lwz 5,0x280(%0) # load yield counter\n\
372 andi. %1,5,1 # if even then spin\n\
373 beq 1b\n\
374 lwsync # if odd, give up cycles\n\
375 ldx %1,0,%2 # reverify the lock holder\n\
376 cmpd %0,%1\n\
377 bne 1b # new holder so restart\n\
378 lhz 4,0x18(%0) # processor number\n\
379 sldi 4,4,32 # move into top half of word\n\
380 or 5,5,4 # r5 has yield cnt - or it in\n\
381 li 3,0x25 # yield hcall 0x8-12 \n\
382 rotrdi 3,3,1 # put the bits in the right spot\n\
383 li 4,2 # yield to processor\n\
384 li 0,-1 # indicate an hcall\n\
385 sc # do the hcall \n\
386 2: \n"
387 HMT_MEDIUM
388 " ldarx %0,0,%2\n\
389 cmpdi 0,%0,0\n\
390 bne- 1b\n\
391 stdcx. 13,0,%2\n\
392 bne- 2b\n\
393 isync"
394 : "=&r"(tmp), "=&r"(tmp2)
395 : "r"(&rw->lock)
396 : "r0", "r3", "r4", "r5", "ctr", "cr0", "cr1", "cr2", "cr3", "cr4",
397 "xer", "memory");
398 }
399 #else
400 #ifdef SPLPAR_LOCKS
write_lock(rwlock_t * rw)401 static __inline__ void write_lock(rwlock_t *rw)
402 {
403 unsigned long tmp, tmp2;
404
405 __asm__ __volatile__(
406 "b 2f # spin_lock\n\
407 1:"
408 HMT_LOW
409 " ldx %0,0,%2 # load the lock value\n\
410 li 3,0xE4 # give up the cycles H_CONFER\n\
411 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
412 beq- 2f\n\
413 blt 3f # negative(0xc..)->confer to holder\n\
414 b 1b\n"
415 "3: lwz 5,0x280(%0) # load dispatch counter\n\
416 andi. %1,5,1 # if even then spin\n\
417 beq 1b\n\
418 lwsync # if odd, give up cycles\n\
419 ldx %1,0,%2 # reverify the lock holder\n\
420 cmpd %0,%1\n\
421 bne 1b # new holder so restart\n\
422 lhz 4,0x18(%0) # processor number\n\
423 # r5 has dispatch cnt already\n"
424 HVSC
425 " b 1b\n\
426 2: \n"
427 HMT_MEDIUM
428 " ldarx %0,0,%2\n\
429 cmpdi 0,%0,0\n\
430 bne- 1b\n\
431 stdcx. 13,0,%2\n\
432 bne- 2b\n\
433 isync"
434 : "=&r"(tmp), "=&r"(tmp2)
435 : "r"(&rw->lock)
436 : "r3", "r4", "r5", "cr0", "cr1", "ctr", "xer", "memory");
437 }
438 #else
write_lock(rwlock_t * rw)439 static __inline__ void write_lock(rwlock_t *rw)
440 {
441 unsigned long tmp;
442
443 __asm__ __volatile__(
444 "b 2f # spin_lock\n\
445 1:"
446 HMT_LOW
447 " ldx %0,0,%1 # load the lock value\n\
448 cmpdi 0,%0,0 # if not locked(0), try to acquire\n\
449 bne+ 1b\n\
450 2: \n"
451 HMT_MEDIUM
452 " ldarx %0,0,%1\n\
453 cmpdi 0,%0,0\n\
454 bne- 1b\n\
455 stdcx. 13,0,%1\n\
456 bne- 2b\n\
457 isync"
458 : "=&r"(tmp)
459 : "r"(&rw->lock)
460 : "cr0", "memory");
461 }
462 #endif
463 #endif
464
write_unlock(rwlock_t * rw)465 static __inline__ void write_unlock(rwlock_t *rw)
466 {
467 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
468 rw->lock = 0;
469 }
470
is_read_locked(rwlock_t * rw)471 static __inline__ int is_read_locked(rwlock_t *rw)
472 {
473 return rw->lock > 0;
474 }
475
is_write_locked(rwlock_t * rw)476 static __inline__ int is_write_locked(rwlock_t *rw)
477 {
478 return rw->lock < 0;
479 }
480
481 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
482 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
483
484 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
485
486 #endif /* __KERNEL__ */
487 #endif /* __ASM_SPINLOCK_H */
488