Lines Matching refs:lockres

35 static inline int user_check_wait_flag(struct user_lock_res *lockres,  in user_check_wait_flag()  argument
40 spin_lock(&lockres->l_lock); in user_check_wait_flag()
41 ret = lockres->l_flags & flag; in user_check_wait_flag()
42 spin_unlock(&lockres->l_lock); in user_check_wait_flag()
47 static inline void user_wait_on_busy_lock(struct user_lock_res *lockres) in user_wait_on_busy_lock() argument
50 wait_event(lockres->l_event, in user_wait_on_busy_lock()
51 !user_check_wait_flag(lockres, USER_LOCK_BUSY)); in user_wait_on_busy_lock()
54 static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) in user_wait_on_blocked_lock() argument
57 wait_event(lockres->l_event, in user_wait_on_blocked_lock()
58 !user_check_wait_flag(lockres, USER_LOCK_BLOCKED)); in user_wait_on_blocked_lock()
63 cluster_connection_from_user_lockres(struct user_lock_res *lockres) in cluster_connection_from_user_lockres() argument
67 ip = container_of(lockres, in cluster_connection_from_user_lockres()
74 user_dlm_inode_from_user_lockres(struct user_lock_res *lockres) in user_dlm_inode_from_user_lockres() argument
78 ip = container_of(lockres, in user_dlm_inode_from_user_lockres()
84 static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) in user_recover_from_dlm_error() argument
86 spin_lock(&lockres->l_lock); in user_recover_from_dlm_error()
87 lockres->l_flags &= ~USER_LOCK_BUSY; in user_recover_from_dlm_error()
88 spin_unlock(&lockres->l_lock); in user_recover_from_dlm_error()
113 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_ast() local
117 lockres->l_namelen, lockres->l_name, lockres->l_level, in user_ast()
118 lockres->l_requested); in user_ast()
120 spin_lock(&lockres->l_lock); in user_ast()
122 status = ocfs2_dlm_lock_status(&lockres->l_lksb); in user_ast()
125 status, lockres->l_namelen, lockres->l_name); in user_ast()
126 spin_unlock(&lockres->l_lock); in user_ast()
130 mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV, in user_ast()
132 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_ast()
135 if (lockres->l_requested < lockres->l_level) { in user_ast()
136 if (lockres->l_requested <= in user_ast()
137 user_highest_compat_lock_level(lockres->l_blocking)) { in user_ast()
138 lockres->l_blocking = DLM_LOCK_NL; in user_ast()
139 lockres->l_flags &= ~USER_LOCK_BLOCKED; in user_ast()
143 lockres->l_level = lockres->l_requested; in user_ast()
144 lockres->l_requested = DLM_LOCK_IV; in user_ast()
145 lockres->l_flags |= USER_LOCK_ATTACHED; in user_ast()
146 lockres->l_flags &= ~USER_LOCK_BUSY; in user_ast()
148 spin_unlock(&lockres->l_lock); in user_ast()
150 wake_up(&lockres->l_event); in user_ast()
153 static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) in user_dlm_grab_inode_ref() argument
156 inode = user_dlm_inode_from_user_lockres(lockres); in user_dlm_grab_inode_ref()
163 static void __user_dlm_queue_lockres(struct user_lock_res *lockres) in __user_dlm_queue_lockres() argument
165 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { in __user_dlm_queue_lockres()
166 user_dlm_grab_inode_ref(lockres); in __user_dlm_queue_lockres()
168 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); in __user_dlm_queue_lockres()
170 queue_work(user_dlm_worker, &lockres->l_work); in __user_dlm_queue_lockres()
171 lockres->l_flags |= USER_LOCK_QUEUED; in __user_dlm_queue_lockres()
175 static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) in __user_dlm_cond_queue_lockres() argument
179 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) in __user_dlm_cond_queue_lockres()
182 switch (lockres->l_blocking) { in __user_dlm_cond_queue_lockres()
184 if (!lockres->l_ex_holders && !lockres->l_ro_holders) in __user_dlm_cond_queue_lockres()
188 if (!lockres->l_ex_holders) in __user_dlm_cond_queue_lockres()
196 __user_dlm_queue_lockres(lockres); in __user_dlm_cond_queue_lockres()
201 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_bast() local
204 lockres->l_namelen, lockres->l_name, level, lockres->l_level); in user_bast()
206 spin_lock(&lockres->l_lock); in user_bast()
207 lockres->l_flags |= USER_LOCK_BLOCKED; in user_bast()
208 if (level > lockres->l_blocking) in user_bast()
209 lockres->l_blocking = level; in user_bast()
211 __user_dlm_queue_lockres(lockres); in user_bast()
212 spin_unlock(&lockres->l_lock); in user_bast()
214 wake_up(&lockres->l_event); in user_bast()
219 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_unlock_ast() local
222 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_unlock_ast()
227 spin_lock(&lockres->l_lock); in user_unlock_ast()
231 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN in user_unlock_ast()
232 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { in user_unlock_ast()
233 lockres->l_level = DLM_LOCK_IV; in user_unlock_ast()
238 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); in user_unlock_ast()
239 lockres->l_flags &= ~USER_LOCK_IN_CANCEL; in user_unlock_ast()
242 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); in user_unlock_ast()
244 lockres->l_requested = DLM_LOCK_IV; /* cancel an in user_unlock_ast()
247 lockres->l_flags &= ~USER_LOCK_IN_CANCEL; in user_unlock_ast()
250 if (lockres->l_flags & USER_LOCK_BLOCKED) in user_unlock_ast()
251 __user_dlm_queue_lockres(lockres); in user_unlock_ast()
254 lockres->l_flags &= ~USER_LOCK_BUSY; in user_unlock_ast()
256 spin_unlock(&lockres->l_lock); in user_unlock_ast()
258 wake_up(&lockres->l_event); in user_unlock_ast()
276 static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) in user_dlm_drop_inode_ref() argument
279 inode = user_dlm_inode_from_user_lockres(lockres); in user_dlm_drop_inode_ref()
286 struct user_lock_res *lockres = in user_dlm_unblock_lock() local
289 cluster_connection_from_user_lockres(lockres); in user_dlm_unblock_lock()
291 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
293 spin_lock(&lockres->l_lock); in user_dlm_unblock_lock()
295 mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), in user_dlm_unblock_lock()
297 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_dlm_unblock_lock()
301 lockres->l_flags &= ~USER_LOCK_QUEUED; in user_dlm_unblock_lock()
308 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { in user_dlm_unblock_lock()
310 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
311 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
315 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_unblock_lock()
317 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
318 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
322 if (lockres->l_flags & USER_LOCK_BUSY) { in user_dlm_unblock_lock()
323 if (lockres->l_flags & USER_LOCK_IN_CANCEL) { in user_dlm_unblock_lock()
325 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
326 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
330 lockres->l_flags |= USER_LOCK_IN_CANCEL; in user_dlm_unblock_lock()
331 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
333 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, in user_dlm_unblock_lock()
336 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); in user_dlm_unblock_lock()
343 if ((lockres->l_blocking == DLM_LOCK_EX) in user_dlm_unblock_lock()
344 && (lockres->l_ex_holders || lockres->l_ro_holders)) { in user_dlm_unblock_lock()
345 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
347 lockres->l_namelen, lockres->l_name, in user_dlm_unblock_lock()
348 lockres->l_ex_holders, lockres->l_ro_holders); in user_dlm_unblock_lock()
352 if ((lockres->l_blocking == DLM_LOCK_PR) in user_dlm_unblock_lock()
353 && lockres->l_ex_holders) { in user_dlm_unblock_lock()
354 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
356 lockres->l_namelen, lockres->l_name, in user_dlm_unblock_lock()
357 lockres->l_ex_holders); in user_dlm_unblock_lock()
362 new_level = user_highest_compat_lock_level(lockres->l_blocking); in user_dlm_unblock_lock()
363 lockres->l_requested = new_level; in user_dlm_unblock_lock()
364 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_unblock_lock()
366 lockres->l_namelen, lockres->l_name, lockres->l_level, new_level); in user_dlm_unblock_lock()
367 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
370 status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb, in user_dlm_unblock_lock()
372 lockres->l_name, in user_dlm_unblock_lock()
373 lockres->l_namelen); in user_dlm_unblock_lock()
375 user_log_dlm_error("ocfs2_dlm_lock", status, lockres); in user_dlm_unblock_lock()
376 user_recover_from_dlm_error(lockres); in user_dlm_unblock_lock()
380 user_dlm_drop_inode_ref(lockres); in user_dlm_unblock_lock()
383 static inline void user_dlm_inc_holders(struct user_lock_res *lockres, in user_dlm_inc_holders() argument
388 lockres->l_ex_holders++; in user_dlm_inc_holders()
391 lockres->l_ro_holders++; in user_dlm_inc_holders()
402 user_may_continue_on_blocked_lock(struct user_lock_res *lockres, in user_may_continue_on_blocked_lock() argument
405 BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); in user_may_continue_on_blocked_lock()
407 return wanted <= user_highest_compat_lock_level(lockres->l_blocking); in user_may_continue_on_blocked_lock()
410 int user_dlm_cluster_lock(struct user_lock_res *lockres, in user_dlm_cluster_lock() argument
416 cluster_connection_from_user_lockres(lockres); in user_dlm_cluster_lock()
421 lockres->l_namelen, lockres->l_name); in user_dlm_cluster_lock()
427 lockres->l_namelen, lockres->l_name, level, lkm_flags); in user_dlm_cluster_lock()
435 spin_lock(&lockres->l_lock); in user_dlm_cluster_lock()
436 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_cluster_lock()
437 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
445 if ((lockres->l_flags & USER_LOCK_BUSY) && in user_dlm_cluster_lock()
446 (level > lockres->l_level)) { in user_dlm_cluster_lock()
449 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
451 user_wait_on_busy_lock(lockres); in user_dlm_cluster_lock()
455 if ((lockres->l_flags & USER_LOCK_BLOCKED) && in user_dlm_cluster_lock()
456 (!user_may_continue_on_blocked_lock(lockres, level))) { in user_dlm_cluster_lock()
459 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
461 user_wait_on_blocked_lock(lockres); in user_dlm_cluster_lock()
465 if (level > lockres->l_level) { in user_dlm_cluster_lock()
467 if (lockres->l_level != DLM_LOCK_IV) in user_dlm_cluster_lock()
470 lockres->l_requested = level; in user_dlm_cluster_lock()
471 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_cluster_lock()
472 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
478 status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb, in user_dlm_cluster_lock()
479 local_flags, lockres->l_name, in user_dlm_cluster_lock()
480 lockres->l_namelen); in user_dlm_cluster_lock()
485 status, lockres); in user_dlm_cluster_lock()
486 user_recover_from_dlm_error(lockres); in user_dlm_cluster_lock()
490 user_wait_on_busy_lock(lockres); in user_dlm_cluster_lock()
494 user_dlm_inc_holders(lockres, level); in user_dlm_cluster_lock()
495 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
502 static inline void user_dlm_dec_holders(struct user_lock_res *lockres, in user_dlm_dec_holders() argument
507 BUG_ON(!lockres->l_ex_holders); in user_dlm_dec_holders()
508 lockres->l_ex_holders--; in user_dlm_dec_holders()
511 BUG_ON(!lockres->l_ro_holders); in user_dlm_dec_holders()
512 lockres->l_ro_holders--; in user_dlm_dec_holders()
519 void user_dlm_cluster_unlock(struct user_lock_res *lockres, in user_dlm_cluster_unlock() argument
525 lockres->l_namelen, lockres->l_name); in user_dlm_cluster_unlock()
529 spin_lock(&lockres->l_lock); in user_dlm_cluster_unlock()
530 user_dlm_dec_holders(lockres, level); in user_dlm_cluster_unlock()
531 __user_dlm_cond_queue_lockres(lockres); in user_dlm_cluster_unlock()
532 spin_unlock(&lockres->l_lock); in user_dlm_cluster_unlock()
539 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; in user_dlm_write_lvb() local
544 spin_lock(&lockres->l_lock); in user_dlm_write_lvb()
546 BUG_ON(lockres->l_level < DLM_LOCK_EX); in user_dlm_write_lvb()
547 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); in user_dlm_write_lvb()
550 spin_unlock(&lockres->l_lock); in user_dlm_write_lvb()
555 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; in user_dlm_read_lvb() local
559 spin_lock(&lockres->l_lock); in user_dlm_read_lvb()
561 BUG_ON(lockres->l_level < DLM_LOCK_PR); in user_dlm_read_lvb()
562 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) { in user_dlm_read_lvb()
563 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); in user_dlm_read_lvb()
568 spin_unlock(&lockres->l_lock); in user_dlm_read_lvb()
572 void user_dlm_lock_res_init(struct user_lock_res *lockres, in user_dlm_lock_res_init() argument
575 memset(lockres, 0, sizeof(*lockres)); in user_dlm_lock_res_init()
577 spin_lock_init(&lockres->l_lock); in user_dlm_lock_res_init()
578 init_waitqueue_head(&lockres->l_event); in user_dlm_lock_res_init()
579 lockres->l_level = DLM_LOCK_IV; in user_dlm_lock_res_init()
580 lockres->l_requested = DLM_LOCK_IV; in user_dlm_lock_res_init()
581 lockres->l_blocking = DLM_LOCK_IV; in user_dlm_lock_res_init()
586 memcpy(lockres->l_name, in user_dlm_lock_res_init()
589 lockres->l_namelen = dentry->d_name.len; in user_dlm_lock_res_init()
592 int user_dlm_destroy_lock(struct user_lock_res *lockres) in user_dlm_destroy_lock() argument
596 cluster_connection_from_user_lockres(lockres); in user_dlm_destroy_lock()
598 mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_destroy_lock()
600 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
601 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_destroy_lock()
602 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
606 lockres->l_flags |= USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
608 while (lockres->l_flags & USER_LOCK_BUSY) { in user_dlm_destroy_lock()
609 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
611 user_wait_on_busy_lock(lockres); in user_dlm_destroy_lock()
613 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
616 if (lockres->l_ro_holders || lockres->l_ex_holders) { in user_dlm_destroy_lock()
617 lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
618 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
623 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { in user_dlm_destroy_lock()
628 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
632 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_destroy_lock()
633 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
635 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK); in user_dlm_destroy_lock()
637 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
638 lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
639 lockres->l_flags &= ~USER_LOCK_BUSY; in user_dlm_destroy_lock()
640 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
641 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); in user_dlm_destroy_lock()
645 user_wait_on_busy_lock(lockres); in user_dlm_destroy_lock()