Lines Matching refs:sem

68 # define DEBUG_RWSEMS_WARN_ON(c, sem)	do {			\  argument
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
78 # define DEBUG_RWSEMS_WARN_ON(c, sem) argument
140 static inline void rwsem_set_owner(struct rw_semaphore *sem) in rwsem_set_owner() argument
143 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
146 static inline void rwsem_clear_owner(struct rw_semaphore *sem) in rwsem_clear_owner() argument
149 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
155 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) in rwsem_test_oflags() argument
157 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
170 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
176 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
179 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) in rwsem_set_reader_owned() argument
181 __rwsem_set_reader_owned(sem, current); in rwsem_set_reader_owned()
187 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
193 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned()
198 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); in is_rwsem_reader_owned()
208 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
210 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
213 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
219 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
228 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) in rwsem_set_nonspinnable() argument
230 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable()
237 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
241 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) in rwsem_read_trylock() argument
243 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
246 rwsem_set_nonspinnable(sem); in rwsem_read_trylock()
249 rwsem_set_reader_owned(sem); in rwsem_read_trylock()
256 static inline bool rwsem_write_trylock(struct rw_semaphore *sem) in rwsem_write_trylock() argument
262 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
263 rwsem_set_owner(sem); in rwsem_write_trylock()
274 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) in rwsem_owner() argument
277 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
285 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) in rwsem_owner_flags() argument
287 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags()
313 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
320 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
321 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
324 sem->magic = sem; in __init_rwsem()
326 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
327 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
328 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
329 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
331 osq_lock_init(&sem->osq); in __init_rwsem()
348 #define rwsem_first_waiter(sem) \ argument
349 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
373 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_add_waiter() argument
375 lockdep_assert_held(&sem->wait_lock); in rwsem_add_waiter()
376 list_add_tail(&waiter->list, &sem->wait_list); in rwsem_add_waiter()
389 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_del_waiter() argument
391 lockdep_assert_held(&sem->wait_lock); in rwsem_del_waiter()
393 if (likely(!list_empty(&sem->wait_list))) in rwsem_del_waiter()
396 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
414 static void rwsem_mark_wake(struct rw_semaphore *sem, in rwsem_mark_wake() argument
422 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake()
428 waiter = rwsem_first_waiter(sem); in rwsem_mark_wake()
449 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
461 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
476 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
486 __rwsem_set_reader_owned(sem, owner); in rwsem_mark_wake()
513 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake()
530 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
531 if (list_empty(&sem->wait_list)) { in rwsem_mark_wake()
549 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
580 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, in rwsem_del_wake_waiter() argument
582 __releases(&sem->wait_lock) in rwsem_del_wake_waiter()
584 bool first = rwsem_first_waiter(sem) == waiter; in rwsem_del_wake_waiter()
593 if (rwsem_del_waiter(sem, waiter) && first) in rwsem_del_wake_waiter()
594 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q); in rwsem_del_wake_waiter()
595 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_del_wake_waiter()
607 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, in rwsem_try_write_lock() argument
610 struct rwsem_waiter *first = rwsem_first_waiter(sem); in rwsem_try_write_lock()
613 lockdep_assert_held(&sem->wait_lock); in rwsem_try_write_lock()
615 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
648 if (list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
651 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
668 rwsem_set_owner(sem); in rwsem_try_write_lock()
694 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
696 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued()
699 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
701 rwsem_set_owner(sem); in rwsem_try_write_lock_unqueued()
709 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
725 owner = rwsem_owner_flags(sem, &flags); in rwsem_can_spin_on_owner()
753 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
761 owner = rwsem_owner_flags(sem, &flags); in rwsem_spin_on_owner()
773 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner()
812 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) in rwsem_rspin_threshold() argument
814 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold()
825 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
835 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
847 owner_state = rwsem_spin_on_owner(sem); in rwsem_optimistic_spin()
854 taken = rwsem_try_write_lock_unqueued(sem); in rwsem_optimistic_spin()
871 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) in rwsem_optimistic_spin()
873 rspin_threshold = rwsem_rspin_threshold(sem); in rwsem_optimistic_spin()
885 rwsem_set_nonspinnable(sem); in rwsem_optimistic_spin()
939 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
950 static inline void clear_nonspinnable(struct rw_semaphore *sem) in clear_nonspinnable() argument
952 if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))) in clear_nonspinnable()
953 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
957 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
962 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
967 static inline void clear_nonspinnable(struct rw_semaphore *sem) { } in clear_nonspinnable() argument
970 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
984 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, in rwsem_cond_wake_waiter() argument
996 clear_nonspinnable(sem); in rwsem_cond_wake_waiter()
998 rwsem_mark_wake(sem, wake_type, wake_q); in rwsem_cond_wake_waiter()
1005 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument
1017 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1025 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1033 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1034 if (!list_empty(&sem->wait_list)) in rwsem_down_read_slowpath()
1035 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, in rwsem_down_read_slowpath()
1037 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1040 return sem; in rwsem_down_read_slowpath()
1049 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1050 if (list_empty(&sem->wait_list)) { in rwsem_down_read_slowpath()
1057 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1060 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1061 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1063 return sem; in rwsem_down_read_slowpath()
1067 rwsem_add_waiter(sem, &waiter); in rwsem_down_read_slowpath()
1070 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1072 rwsem_cond_wake_waiter(sem, count, &wake_q); in rwsem_down_read_slowpath()
1073 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1078 trace_contention_begin(sem, LCB_F_READ); in rwsem_down_read_slowpath()
1088 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1091 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1101 trace_contention_end(sem, 0); in rwsem_down_read_slowpath()
1102 return sem; in rwsem_down_read_slowpath()
1105 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_read_slowpath()
1108 trace_contention_end(sem, -EINTR); in rwsem_down_read_slowpath()
1116 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) in rwsem_down_write_slowpath() argument
1122 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) { in rwsem_down_write_slowpath()
1124 return sem; in rwsem_down_write_slowpath()
1136 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1137 rwsem_add_waiter(sem, &waiter); in rwsem_down_write_slowpath()
1140 if (rwsem_first_waiter(sem) != &waiter) { in rwsem_down_write_slowpath()
1141 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1148 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1150 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1153 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1158 trace_contention_begin(sem, LCB_F_WRITE); in rwsem_down_write_slowpath()
1161 if (rwsem_try_write_lock(sem, &waiter)) { in rwsem_down_write_slowpath()
1166 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1183 owner_state = rwsem_spin_on_owner(sem); in rwsem_down_write_slowpath()
1194 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1197 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1199 trace_contention_end(sem, 0); in rwsem_down_write_slowpath()
1200 return sem; in rwsem_down_write_slowpath()
1204 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1205 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_write_slowpath()
1207 trace_contention_end(sem, -EINTR); in rwsem_down_write_slowpath()
1215 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
1220 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
1222 if (!list_empty(&sem->wait_list)) in rwsem_wake()
1223 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); in rwsem_wake()
1225 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
1228 return sem; in rwsem_wake()
1236 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
1241 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1243 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
1244 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); in rwsem_downgrade_wake()
1246 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1249 return sem; in rwsem_downgrade_wake()
1255 static inline int __down_read_common(struct rw_semaphore *sem, int state) in __down_read_common() argument
1259 if (!rwsem_read_trylock(sem, &count)) { in __down_read_common()
1260 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) in __down_read_common()
1262 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __down_read_common()
1267 static inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1269 __down_read_common(sem, TASK_UNINTERRUPTIBLE); in __down_read()
1272 static inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1274 return __down_read_common(sem, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1277 static inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1279 return __down_read_common(sem, TASK_KILLABLE); in __down_read_killable()
1282 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1286 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_read_trylock()
1288 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1290 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1292 rwsem_set_reader_owned(sem); in __down_read_trylock()
1302 static inline int __down_write_common(struct rw_semaphore *sem, int state) in __down_write_common() argument
1304 if (unlikely(!rwsem_write_trylock(sem))) { in __down_write_common()
1305 if (IS_ERR(rwsem_down_write_slowpath(sem, state))) in __down_write_common()
1312 static inline void __down_write(struct rw_semaphore *sem) in __down_write() argument
1314 __down_write_common(sem, TASK_UNINTERRUPTIBLE); in __down_write()
1317 static inline int __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1319 return __down_write_common(sem, TASK_KILLABLE); in __down_write_killable()
1322 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1324 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_write_trylock()
1325 return rwsem_write_trylock(sem); in __down_write_trylock()
1331 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1335 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_read()
1336 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __up_read()
1338 rwsem_clear_reader_owned(sem); in __up_read()
1339 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1340 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); in __up_read()
1343 clear_nonspinnable(sem); in __up_read()
1344 rwsem_wake(sem); in __up_read()
1351 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1355 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_write()
1360 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && in __up_write()
1361 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); in __up_write()
1364 rwsem_clear_owner(sem); in __up_write()
1365 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1368 rwsem_wake(sem); in __up_write()
1374 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1385 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); in __downgrade_write()
1387 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1388 rwsem_set_reader_owned(sem); in __downgrade_write()
1390 rwsem_downgrade_wake(sem); in __downgrade_write()
1424 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
1427 init_rwbase_rt(&(sem)->rwbase); in __init_rwsem()
1430 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
1431 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
1436 static inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1438 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_read()
1441 static inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1443 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1446 static inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1448 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); in __down_read_killable()
1451 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1453 return rwbase_read_trylock(&sem->rwbase); in __down_read_trylock()
1456 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1458 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); in __up_read()
1461 static inline void __sched __down_write(struct rw_semaphore *sem) in __down_write() argument
1463 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_write()
1466 static inline int __sched __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1468 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); in __down_write_killable()
1471 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1473 return rwbase_write_trylock(&sem->rwbase); in __down_write_trylock()
1476 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1478 rwbase_write_unlock(&sem->rwbase); in __up_write()
1481 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1483 rwbase_write_downgrade(&sem->rwbase); in __downgrade_write()
1487 #define DEBUG_RWSEMS_WARN_ON(c, sem) argument
1489 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
1494 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
1496 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned()
1506 void __sched down_read(struct rw_semaphore *sem) in down_read() argument
1509 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read()
1511 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read()
1515 int __sched down_read_interruptible(struct rw_semaphore *sem) in down_read_interruptible() argument
1518 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_interruptible()
1520 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { in down_read_interruptible()
1521 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_interruptible()
1529 int __sched down_read_killable(struct rw_semaphore *sem) in down_read_killable() argument
1532 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_killable()
1534 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable()
1535 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable()
1546 int down_read_trylock(struct rw_semaphore *sem) in down_read_trylock() argument
1548 int ret = __down_read_trylock(sem); in down_read_trylock()
1551 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); in down_read_trylock()
1559 void __sched down_write(struct rw_semaphore *sem) in down_write() argument
1562 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write()
1563 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write()
1570 int __sched down_write_killable(struct rw_semaphore *sem) in down_write_killable() argument
1573 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write_killable()
1575 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable()
1577 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable()
1588 int down_write_trylock(struct rw_semaphore *sem) in down_write_trylock() argument
1590 int ret = __down_write_trylock(sem); in down_write_trylock()
1593 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in down_write_trylock()
1602 void up_read(struct rw_semaphore *sem) in up_read() argument
1604 rwsem_release(&sem->dep_map, _RET_IP_); in up_read()
1605 __up_read(sem); in up_read()
1612 void up_write(struct rw_semaphore *sem) in up_write() argument
1614 rwsem_release(&sem->dep_map, _RET_IP_); in up_write()
1615 __up_write(sem); in up_write()
1622 void downgrade_write(struct rw_semaphore *sem) in downgrade_write() argument
1624 lock_downgrade(&sem->dep_map, _RET_IP_); in downgrade_write()
1625 __downgrade_write(sem); in downgrade_write()
1631 void down_read_nested(struct rw_semaphore *sem, int subclass) in down_read_nested() argument
1634 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_nested()
1635 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read_nested()
1639 int down_read_killable_nested(struct rw_semaphore *sem, int subclass) in down_read_killable_nested() argument
1642 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_killable_nested()
1644 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable_nested()
1645 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable_nested()
1653 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) in _down_write_nest_lock() argument
1656 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); in _down_write_nest_lock()
1657 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in _down_write_nest_lock()
1661 void down_read_non_owner(struct rw_semaphore *sem) in down_read_non_owner() argument
1664 __down_read(sem); in down_read_non_owner()
1665 __rwsem_set_reader_owned(sem, NULL); in down_read_non_owner()
1669 void down_write_nested(struct rw_semaphore *sem, int subclass) in down_write_nested() argument
1672 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_nested()
1673 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write_nested()
1677 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) in down_write_killable_nested() argument
1680 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_killable_nested()
1682 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable_nested()
1684 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable_nested()
1692 void up_read_non_owner(struct rw_semaphore *sem) in up_read_non_owner() argument
1694 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in up_read_non_owner()
1695 __up_read(sem); in up_read_non_owner()