1 /*
2  * rwsem.h: R/W semaphores implemented using CAS
3  *
4  * Written by David S. Miller (davem@redhat.com), 2001.
5  * Derived from asm-i386/rwsem.h
6  */
7 #ifndef _SPARC64_RWSEM_H
8 #define _SPARC64_RWSEM_H
9 
10 #ifndef _LINUX_RWSEM_H
11 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12 #endif
13 
14 #ifdef __KERNEL__
15 
16 #define RWSEM_UNLOCKED_VALUE		0x00000000L
17 #define RWSEM_ACTIVE_BIAS		0x00000001L
18 #define RWSEM_ACTIVE_MASK		0xffffffffL
19 #define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
20 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
21 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
22 
23 /*
24  * lock for reading
25  */
__down_read(struct rw_semaphore * sem)26 static inline void __down_read(struct rw_semaphore *sem)
27 {
28 	if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
29 		rwsem_down_read_failed(sem);
30 }
31 
__down_read_trylock(struct rw_semaphore * sem)32 static inline int __down_read_trylock(struct rw_semaphore *sem)
33 {
34 	long tmp;
35 
36 	while ((tmp = sem->count) >= 0L) {
37 		if (tmp == cmpxchg(&sem->count, tmp,
38 				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
39 			return 1;
40 		}
41 	}
42 	return 0;
43 }
44 
45 /*
46  * lock for writing
47  */
__down_write_nested(struct rw_semaphore * sem,int subclass)48 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
49 {
50 	long tmp;
51 
52 	tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
53 				  (atomic64_t *)(&sem->count));
54 	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
55 		rwsem_down_write_failed(sem);
56 }
57 
__down_write(struct rw_semaphore * sem)58 static inline void __down_write(struct rw_semaphore *sem)
59 {
60 	__down_write_nested(sem, 0);
61 }
62 
__down_write_trylock(struct rw_semaphore * sem)63 static inline int __down_write_trylock(struct rw_semaphore *sem)
64 {
65 	long tmp;
66 
67 	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
68 		      RWSEM_ACTIVE_WRITE_BIAS);
69 	return tmp == RWSEM_UNLOCKED_VALUE;
70 }
71 
72 /*
73  * unlock after reading
74  */
__up_read(struct rw_semaphore * sem)75 static inline void __up_read(struct rw_semaphore *sem)
76 {
77 	long tmp;
78 
79 	tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
80 	if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
81 		rwsem_wake(sem);
82 }
83 
84 /*
85  * unlock after writing
86  */
__up_write(struct rw_semaphore * sem)87 static inline void __up_write(struct rw_semaphore *sem)
88 {
89 	if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
90 					 (atomic64_t *)(&sem->count)) < 0L))
91 		rwsem_wake(sem);
92 }
93 
94 /*
95  * implement atomic add functionality
96  */
rwsem_atomic_add(long delta,struct rw_semaphore * sem)97 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
98 {
99 	atomic64_add(delta, (atomic64_t *)(&sem->count));
100 }
101 
102 /*
103  * downgrade write lock to read lock
104  */
__downgrade_write(struct rw_semaphore * sem)105 static inline void __downgrade_write(struct rw_semaphore *sem)
106 {
107 	long tmp;
108 
109 	tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
110 	if (tmp < 0L)
111 		rwsem_downgrade_wake(sem);
112 }
113 
114 /*
115  * implement exchange and add functionality
116  */
rwsem_atomic_update(long delta,struct rw_semaphore * sem)117 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
118 {
119 	return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
120 }
121 
122 #endif /* __KERNEL__ */
123 
124 #endif /* _SPARC64_RWSEM_H */
125