1 #include <common/spinlock.h> 2 #include <process/preempt.h> 3 4 void __arch_spin_lock(spinlock_t *lock) 5 { 6 __asm__ __volatile__("1: \n\t" 7 "lock decb %0 \n\t" // 尝试-1 8 "jns 3f \n\t" // 加锁成功,跳转到步骤3 9 "2: \n\t" // 加锁失败,稍后再试 10 "pause \n\t" 11 "cmpb $0, %0 \n\t" 12 "jle 2b \n\t" // 若锁被占用,则继续重试 13 "jmp 1b \n\t" // 尝试加锁 14 "3:" 15 : "=m"(lock->lock)::"memory"); 16 rs_preempt_disable(); 17 } 18 19 void __arch_spin_unlock(spinlock_t *lock) 20 { 21 __asm__ __volatile__("movb $1, %0 \n\t" : "=m"(lock->lock)::"memory"); 22 rs_preempt_enable(); 23 } 24 25 void __arch_spin_lock_no_preempt(spinlock_t *lock) 26 { 27 __asm__ __volatile__("1: \n\t" 28 "lock decb %0 \n\t" // 尝试-1 29 "jns 3f \n\t" // 加锁成功,跳转到步骤3 30 "2: \n\t" // 加锁失败,稍后再试 31 "pause \n\t" 32 "cmpb $0, %0 \n\t" 33 "jle 2b \n\t" // 若锁被占用,则继续重试 34 "jmp 1b \n\t" // 尝试加锁 35 "3:" 36 : "=m"(lock->lock)::"memory"); 37 } 38 39 void __arch_spin_unlock_no_preempt(spinlock_t *lock) 40 { 41 __asm__ __volatile__("movb $1, %0 \n\t" : "=m"(lock->lock)::"memory"); 42 } 43 44 long __arch_spin_trylock(spinlock_t *lock) 45 { 46 uint64_t tmp_val = 0; 47 rs_preempt_disable(); 48 // 交换tmp_val和lock的值,若tmp_val==1则证明加锁成功 49 asm volatile("lock xchg %%bx, %1 \n\t" // 确保只有1个进程能得到锁 50 : "=q"(tmp_val), "=m"(lock->lock) 51 : "b"(0) 52 : "memory"); 53 if (!tmp_val) 54 rs_preempt_enable(); 55 return tmp_val; 56 } 57