/DragonOS-0.1.8/kernel/src/common/ |
D | spinlock.h | 22 int8_t lock; // 1:unlocked 0:locked member 25 extern void __arch_spin_lock(spinlock_t *lock); 26 extern void __arch_spin_unlock(spinlock_t *lock); 28 extern void __arch_spin_lock_no_preempt(spinlock_t *lock); 29 extern void __arch_spin_unlock_no_preempt(spinlock_t *lock); 31 extern long __arch_spin_trylock(spinlock_t *lock); 38 void spin_lock(spinlock_t *lock) in spin_lock() argument 40 __arch_spin_lock(lock); in spin_lock() 48 void spin_unlock(spinlock_t *lock) in spin_unlock() argument 50 __arch_spin_unlock(lock); in spin_unlock() [all …]
|
D | mutex.h | 39 void mutex_init(mutex_t *lock); 46 void mutex_lock(mutex_t *lock); 53 void mutex_unlock(mutex_t *lock); 62 int mutex_trylock(mutex_t *lock); 69 #define mutex_is_locked(lock) ((atomic_read(&(lock)->count) == 1) ? 0 : 1) argument
|
D | kfifo.h | 130 …ays_inline kfifo_in_locked(struct kfifo_t *fifo, const void *from, uint32_t size, spinlock_t *lock) in kfifo_in_locked() argument 132 spin_lock(lock); in kfifo_in_locked() 134 spin_unlock(lock); in kfifo_in_locked() 147 …t __always_inline kfifo_out_locked(struct kfifo_t *fifo, void *to, uint32_t size, spinlock_t *lock) in kfifo_out_locked() argument 149 spin_lock(lock); in kfifo_out_locked() 151 spin_unlock(lock); in kfifo_out_locked()
|
D | wait_queue.h | 38 void wait_queue_sleep_on_unlock(wait_queue_node_t *wait_queue_head, void *lock); 57 spinlock_t lock; // 队列需要有一个自旋锁,虽然目前内部并没有使用,但是以后可能会用.[在completion内部使用] member 102 void wait_queue_sleep_with_node_unlock(wait_queue_head_t *q, wait_queue_node_t *wait, void *lock);
|
/DragonOS-0.1.8/kernel/src/libs/ |
D | mutex.c | 10 void mutex_init(mutex_t *lock) in mutex_init() argument 12 atomic_set(&lock->count, 1); in mutex_init() 13 spin_init(&lock->wait_lock); in mutex_init() 14 list_init(&lock->wait_list); in mutex_init() 24 static void __mutex_acquire(mutex_t *lock) in __mutex_acquire() argument 32 void mutex_lock(mutex_t *lock) in mutex_lock() argument 38 spin_lock(&lock->wait_lock); in mutex_lock() 39 if (likely(mutex_is_locked(lock))) in mutex_lock() 45 spin_unlock(&lock->wait_lock); in mutex_lock() 51 list_append(&lock->wait_list, &waiter->list); in mutex_lock() [all …]
|
D | spinlock.rs | 17 pub fn spin_lock_irqsave(lock: *mut spinlock_t, flags: &mut usize) { in spin_lock_irqsave() 20 spin_lock(lock); in spin_lock_irqsave() 26 pub fn spin_unlock_irqrestore(lock: *mut spinlock_t, flags: usize) { in spin_unlock_irqrestore() 28 spin_unlock(lock); in spin_unlock_irqrestore() 35 pub fn spin_is_locked(lock: &spinlock_t) -> bool { in spin_is_locked() 36 let val = unsafe { read_volatile(&lock.lock as *const i8) }; in spin_is_locked() 43 Self { lock: 1 } in default() 48 pub fn spin_lock_irq(lock: *mut spinlock_t) { in spin_lock_irq() 51 spin_lock(lock); in spin_lock_irq() 56 pub fn spin_unlock_irq(lock: *mut spinlock_t) { in spin_unlock_irq() [all …]
|
D | lockref.rs | 11 pub lock: RawSpinlock, field 18 lock: RawSpinlock, field 34 lock: RawSpinlock::INIT, 58 if !old.lock.is_locked() { in cmpxchg_loop() 63 new.lock.set_value(false); in cmpxchg_loop() 133 self.lock.lock(); in inc() 135 self.lock.unlock(); in inc() 155 self.lock.lock(); in inc_not_zero() 162 self.lock.unlock(); in inc_not_zero() 184 self.lock.lock(); in inc_not_dead() [all …]
|
D | lockref.c | 13 … while (likely(!spin_is_locked(&old.lock))) \ 46 spin_lock(&lock_ref->lock); in lockref_inc() 48 spin_unlock(&lock_ref->lock); in lockref_inc() 70 spin_lock(&lock_ref->lock); in lockref_inc_not_zero() 77 spin_unlock(&lock_ref->lock); in lockref_inc_not_zero() 104 spin_lock(&lock_ref->lock); in lockref_dec() 110 spin_unlock(&lock_ref->lock); in lockref_dec() 161 spin_lock(&lock_ref->lock); in lockref_dec_not_zero() 167 spin_unlock(&lock_ref->lock); in lockref_dec_not_zero() 193 spin_lock(&lock_ref->lock); in lockref_dec_or_lock_not_zero() [all …]
|
D | rwlock.rs | 36 lock: AtomicU32, field 44 lock: &'a AtomicU32, field 73 lock: AtomicU32::new(0), in new() 100 let value = self.lock.fetch_add(READER, Ordering::Acquire); in current_reader() 104 self.lock.fetch_sub(READER, Ordering::Release); in current_reader() 138 self.lock.fetch_sub(READER, Ordering::Release); in inner_try_read() 143 lock: &self.lock, in inner_try_read() 164 let state = self.lock.load(Ordering::Relaxed); in reader_count() 172 return (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER; in writer_count() 193 .lock in inner_try_write() [all …]
|
D | mutex.rs | 40 lock: &'a Mutex<T>, field 62 pub fn lock(&self) -> MutexGuard<T> { in lock() method 64 let mut inner: SpinLockGuard<MutexInner> = self.inner.lock(); in lock() 84 return MutexGuard { lock: self }; in lock() 93 let mut inner = self.inner.lock(); in try_lock() 101 return Ok(MutexGuard { lock: self }); in try_lock() 116 let mut inner: SpinLockGuard<MutexInner> = self.inner.lock(); in unlock() 154 return unsafe { &*self.lock.data.get() }; in deref() 161 return unsafe { &mut *self.lock.data.get() }; in deref_mut() 168 self.lock.unlock(); in drop()
|
D | wait_queue.rs | 23 lock: Default::default(), in default() 45 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep() 58 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_with_func() 83 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_without_schedule() 90 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_uninterruptible() 100 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_unlock_spinlock() 111 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_unlock_mutex() 122 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_uninterruptible_unlock_spinlock() 133 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in sleep_uninterruptible_unlock_mutex() 149 let mut guard: SpinLockGuard<InnerWaitQueue> = self.0.lock(); in wakeup() [all …]
|
D | wait_queue_head.c | 14 spin_init(&wait_queue->lock); in wait_queue_head_init() 37 void wait_queue_sleep_with_node_unlock(wait_queue_head_t *q, wait_queue_node_t *wait, void *lock) in wait_queue_sleep_with_node_unlock() argument 43 spin_unlock((spinlock_t *)lock); in wait_queue_sleep_with_node_unlock()
|
/DragonOS-0.1.8/kernel/src/arch/x86_64/asm/ |
D | spinlock.c | 4 void __arch_spin_lock(spinlock_t *lock) in __arch_spin_lock() argument 15 : "=m"(lock->lock)::"memory"); in __arch_spin_lock() 19 void __arch_spin_unlock(spinlock_t *lock) in __arch_spin_unlock() argument 22 __asm__ __volatile__("movb $1, %0 \n\t" : "=m"(lock->lock)::"memory"); in __arch_spin_unlock() 25 void __arch_spin_lock_no_preempt(spinlock_t *lock) in __arch_spin_lock_no_preempt() argument 36 : "=m"(lock->lock)::"memory"); in __arch_spin_lock_no_preempt() 39 void __arch_spin_unlock_no_preempt(spinlock_t *lock) in __arch_spin_unlock_no_preempt() argument 41 __asm__ __volatile__("movb $1, %0 \n\t" : "=m"(lock->lock)::"memory"); in __arch_spin_unlock_no_preempt() 44 long __arch_spin_trylock(spinlock_t *lock) in __arch_spin_trylock() argument 50 : "=q"(tmp_val), "=m"(lock->lock) in __arch_spin_trylock()
|
/DragonOS-0.1.8/kernel/src/sched/ |
D | completion.c | 23 spin_lock(&x->wait_queue.lock); in complete() 29 spin_unlock(&x->wait_queue.lock); in complete() 39 spin_lock(&x->wait_queue.lock); in complete_all() 45 spin_unlock(&x->wait_queue.lock); in complete_all() 70 spin_unlock(&x->wait_queue.lock); in __wait_for_common() 73 spin_lock(&x->wait_queue.lock); in __wait_for_common() 94 spin_lock(&x->wait_queue.lock); in wait_for_completion() 96 spin_unlock(&x->wait_queue.lock); in wait_for_completion() 109 spin_lock(&x->wait_queue.lock); in wait_for_completion_timeout() 111 spin_unlock(&x->wait_queue.lock); in wait_for_completion_timeout() [all …]
|
D | rt.rs | 39 lock: RawSpinlock, field 48 lock: RawSpinlock::INIT, in new() 54 self.lock.lock_irqsave(&mut rflags); in enqueue() 58 self.lock.unlock_irqrestore(rflags); in enqueue() 62 self.lock.unlock_irqrestore(rflags); in enqueue() 69 self.lock.lock_irqsave(&mut rflags); in dequeue() 77 self.lock.unlock_irqrestore(rflags); in dequeue() 82 self.lock.lock_irqsave(&mut rflags); in enqueue_front() 86 self.lock.unlock_irqrestore(rflags); in enqueue_front() 90 self.lock.unlock_irqrestore(rflags); in enqueue_front()
|
D | cfs.rs | 42 lock: RawSpinlock, field 53 lock: RawSpinlock::INIT, in new() 62 self.lock.lock_irqsave(&mut rflags); in enqueue() 66 self.lock.unlock_irqrestore(rflags); in enqueue() 72 self.lock.unlock_irqrestore(rflags); in enqueue() 79 self.lock.lock_irqsave(&mut rflags); in dequeue() 87 self.lock.unlock_irqrestore(rflags); in dequeue() 147 current_cpu_queue.lock.lock(); in timer_update_jiffies() 153 current_cpu_queue.lock.unlock(); in timer_update_jiffies()
|
/DragonOS-0.1.8/kernel/src/driver/base/platform/ |
D | mod.rs | 78 let device_map = &self.0.lock().devices; in get_device() 88 let driver_map = &self.0.lock().drivers; in get_driver() 99 let drivers = &mut self.0.lock().drivers; in register_platform_driver() 116 self.0.lock().drivers.remove(&id_table); in unregister_platform_driver() 129 let devices = &mut self.0.lock().devices; in register_platform_device() 145 self.0.lock().devices.remove(&id_table); in unregister_platform_device() 154 let devices = &self.0.lock().devices; in driver_match_device() 186 let drivers = &mut self.0.lock().drivers; in device_match_driver() 244 return self.0.lock().sys_info.clone(); in sys_info() 250 self.0.lock().sys_info = sys_info; in set_sys_info() [all …]
|
/DragonOS-0.1.8/docs/kernel/locking/ |
D | mutex.md | 49   当需要读取、修改Mutex保护的数据时,请先使用Mutex的`lock()`方法。该方法会返回一个`MutexGuard`。您可以使用被保护的数据的成员函数来进行一些操作。或… 56 let mut g :MutexGuard<Vec<i32>>= x.lock(); 101 ### 4.2. lock - 加锁 106 pub fn lock(&self) -> MutexGuard<T> 145 **`void mutex_init(mutex_t *lock)`** 151 **`void mutex_lock(mutex_t *lock)`** 157 **`void mutex_unlock(mutex_t *lock)`** 163 **`void mutex_trylock(mutex_t *lock)`** 169 **`void mutex_is_locked(mutex_t *lock)`**
|
D | spinlock.md | 19 需要先调用`lock()`方法,然后当离开临界区时,手动调用`unlock()`方法。我们并没有向编译器显式地指定该自旋锁到底保护的是哪些数据。 39 lock: RawSpinlock, 55   当需要读取、修改SpinLock保护的数据时,请先使用SpinLock的`lock()`方法。该方法会返回一个`SpinLockGuard`。您可以使用被保护的数据的成员函数… 62 let mut g :SpinLockGuard<Vec<i32>>= x.lock(); 94   `SpinLock`之所以能够实现编译期检查,是因为它引入了一个`SpinLockGuard`作为守卫。我们在编写代码的时候,保证只有调用`SpinLock`的`lock()…
|
/DragonOS-0.1.8/kernel/src/filesystem/ramfs/ |
D | mod.rs | 105 let mut root_guard: SpinLockGuard<RamFSInode> = result.root_inode.0.lock(); in new() 128 let inode: SpinLockGuard<RamFSInode> = self.0.lock(); in read_at() 161 let mut inode: SpinLockGuard<RamFSInode> = self.0.lock(); in write_at() 182 let inode: SpinLockGuard<RamFSInode> = self.0.lock(); in poll() 193 return self.0.lock().fs.upgrade().unwrap(); in fs() 201 let inode = self.0.lock(); in metadata() 209 let mut inode = self.0.lock(); in set_metadata() 221 let mut inode = self.0.lock(); in resize() 238 let mut inode = self.0.lock(); in create_with_data() 274 result.0.lock().self_ref = Arc::downgrade(&result); in create_with_data() [all …]
|
/DragonOS-0.1.8/kernel/src/driver/disk/ahci/ |
D | ahci_inode.rs | 61 result.0.lock().self_ref = Arc::downgrade(&result); in new() 69 self.0.lock().fs = fs; in set_fs() 87 return Ok(self.0.lock().metadata.clone()); in metadata() 91 return self.0.lock().fs.upgrade().unwrap(); in fs() 99 let mut inode = self.0.lock(); in set_metadata() 127 return self.0.lock().disk.read_at(offset, len, buf); in read_at() 146 return self.0.lock().disk.write_at(offset, len, buf); in write_at()
|
/DragonOS-0.1.8/kernel/src/net/ |
D | socket.rs | 75 SocketType::UdpSocket => self.udp_port_table.lock(), in get_ephemeral_port() 76 SocketType::TcpSocket => self.tcp_port_table.lock(), in get_ephemeral_port() 99 SocketType::UdpSocket => self.udp_port_table.lock(), in get_port() 100 SocketType::TcpSocket => self.tcp_port_table.lock(), in get_port() 115 SocketType::UdpSocket => self.udp_port_table.lock(), in unbind_port() 116 SocketType::TcpSocket => self.tcp_port_table.lock(), in unbind_port() 149 let mut socket_set_guard = SOCKET_SET.lock(); in drop() 264 GlobalSocketHandle::new(SOCKET_SET.lock().add(socket)); in new() 287 let mut socket_set_guard = SOCKET_SET.lock(); in read() 317 let mut socket_set_guard = SOCKET_SET.lock(); in write() [all …]
|
/DragonOS-0.1.8/kernel/src/time/ |
D | clocksource.rs | 143 let watchdog_list = &WATCHDOG_LIST.lock(); in clocksource_start_watchdog() 289 let list_guard = &mut CLOCKSOURCE_LIST.lock(); in clocksource_enqueue() 343 let mut list_guard = WATCHDOG_LIST.lock(); in clocksource_enqueue_watchdog() 348 let cs_watchdog = &mut CLOCKSOUCE_WATCHDOG.lock(); in clocksource_enqueue_watchdog() 400 let mut locked_watchdog = CLOCKSOUCE_WATCHDOG.lock(); in clocksource_dequeue_watchdog() 407 let mut list = WATCHDOG_LIST.lock(); in clocksource_dequeue_watchdog() 442 let mut clocksource_list = CLOCKSOURCE_LIST.lock(); in clocksource_dequeue_watchdog() 495 let mut list = CLOCKSOURCE_LIST.lock(); in clocksource_dequeue() 622 let list = CLOCKSOURCE_LIST.lock(); in clocksource_resume() 638 let list = CLOCKSOURCE_LIST.lock(); in clocksource_suspend() [all …]
|
/DragonOS-0.1.8/kernel/src/driver/base/device/ |
D | bus.rs | 109 let mut bus_manager = self.0.lock(); in add_bus() 120 let mut bus_manager = self.0.lock(); in add_driver() 130 let mut bus_manager = self.0.lock(); in remove_bus() 140 let mut bus_manager = self.0.lock(); in remove_bus_driver() 150 let bus_manager = self.0.lock(); in get_bus() 160 let bus_manager = self.0.lock(); in get_driver() 170 return self.0.lock().sys_info.clone(); in sys_info()
|
/DragonOS-0.1.8/kernel/src/filesystem/procfs/ |
D | mod.rs | 271 let mut root_guard: SpinLockGuard<ProcFSInode> = result.root_inode.0.lock(); in new() 295 _sf.0.lock().fdata.pid = pid; in register_pid() 296 _sf.0.lock().fdata.ftype = ProcFileType::ProcStatus; in register_pid() 326 let mut inode: SpinLockGuard<ProcFSInode> = self.0.lock(); in open() 348 let guard: SpinLockGuard<ProcFSInode> = self.0.lock(); in close() 376 let inode: SpinLockGuard<ProcFSInode> = self.0.lock(); in read_at() 424 let inode: SpinLockGuard<ProcFSInode> = self.0.lock(); in poll() 435 return self.0.lock().fs.upgrade().unwrap(); in fs() 443 let inode = self.0.lock(); in metadata() 450 let mut inode = self.0.lock(); in set_metadata() [all …]
|