/linux-6.6.21/mm/ |
D | swap_slots.c | 148 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache() 176 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu() 184 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu() 327 mutex_lock(&cache->alloc_lock); in folio_alloc_swap() 338 mutex_unlock(&cache->alloc_lock); in folio_alloc_swap()
|
D | vmscan.c | 3511 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
|
/linux-6.6.21/drivers/uio/ |
D | uio_dmem_genirq.c | 40 struct mutex alloc_lock; member 57 mutex_lock(&priv->alloc_lock); in uio_dmem_genirq_open() 73 mutex_unlock(&priv->alloc_lock); in uio_dmem_genirq_open() 90 mutex_lock(&priv->alloc_lock); in uio_dmem_genirq_release() 106 mutex_unlock(&priv->alloc_lock); in uio_dmem_genirq_release() 207 mutex_init(&priv->alloc_lock); in uio_dmem_genirq_probe()
|
/linux-6.6.21/drivers/misc/ |
D | hpilo.c | 547 spin_lock_irqsave(&hw->alloc_lock, flags); in ilo_close() 549 spin_unlock_irqrestore(&hw->alloc_lock, flags); in ilo_close() 594 spin_lock_irqsave(&hw->alloc_lock, flags); in ilo_open() 597 spin_unlock_irqrestore(&hw->alloc_lock, flags); in ilo_open() 603 spin_lock_irqsave(&hw->alloc_lock, flags); in ilo_open() 605 spin_unlock_irqrestore(&hw->alloc_lock, flags); in ilo_open() 651 spin_lock(&hw->alloc_lock); in ilo_isr() 656 spin_unlock(&hw->alloc_lock); in ilo_isr() 676 spin_unlock(&hw->alloc_lock); in ilo_isr() 806 spin_lock_init(&ilo_hw->alloc_lock); in ilo_probe()
|
D | hpilo.h | 61 spinlock_t alloc_lock; member
|
/linux-6.6.21/init/ |
D | init_task.c | 129 .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock), 164 &init_task.alloc_lock),
|
/linux-6.6.21/include/linux/sched/ |
D | task.h | 221 spin_lock(&p->alloc_lock); in task_lock() 226 spin_unlock(&p->alloc_lock); in task_unlock()
|
/linux-6.6.21/include/linux/ |
D | swap_slots.h | 15 struct mutex alloc_lock; /* protects slots, nr, cur */ member
|
D | sched.h | 1132 spinlock_t alloc_lock; member
|
/linux-6.6.21/drivers/infiniband/hw/hfi1/ |
D | pio.c | 700 spin_lock_init(&sc->alloc_lock); in sc_alloc() 887 spin_lock_irq(&sc->alloc_lock); in sc_disable() 931 spin_unlock_irq(&sc->alloc_lock); in sc_disable() 1261 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_enable() 1332 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_enable() 1387 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_stop() 1391 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_stop() 1420 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_buffer_alloc() 1422 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc() 1431 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc() [all …]
|
D | pio.h | 73 spinlock_t alloc_lock ____cacheline_aligned_in_smp;
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cmd.c | 164 spin_lock_irqsave(&cmd->alloc_lock, flags); in cmd_alloc_index() 171 spin_unlock_irqrestore(&cmd->alloc_lock, flags); in cmd_alloc_index() 178 lockdep_assert_held(&cmd->alloc_lock); in cmd_free_index() 192 spin_lock_irqsave(&cmd->alloc_lock, flags); in cmd_ent_put() 203 spin_unlock_irqrestore(&cmd->alloc_lock, flags); in cmd_ent_put() 1000 spin_lock_irqsave(&cmd->alloc_lock, flags); in cmd_work_handler() 1003 spin_unlock_irqrestore(&cmd->alloc_lock, flags); in cmd_work_handler() 1745 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); in mlx5_cmd_trigger_completions() 1758 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); in mlx5_cmd_trigger_completions() 1767 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); in mlx5_cmd_trigger_completions() [all …]
|
/linux-6.6.21/arch/powerpc/kvm/ |
D | book3s_64_vio.c | 218 mutex_lock(&stt->alloc_lock); in kvm_spapr_get_tce_page() 226 mutex_unlock(&stt->alloc_lock); in kvm_spapr_get_tce_page() 319 mutex_init(&stt->alloc_lock); in kvm_vm_ioctl_create_spapr_tce()
|
/linux-6.6.21/block/ |
D | ioprio.c | 155 lockdep_assert_held(&p->alloc_lock); in __get_task_ioprio()
|
/linux-6.6.21/security/yama/ |
D | yama_lsm.c | 80 assert_spin_locked(&target->alloc_lock); /* for target->comm */ in report_access()
|
/linux-6.6.21/arch/powerpc/include/asm/ |
D | kvm_host.h | 199 struct mutex alloc_lock; member
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx4/ |
D | resource_tracker.c | 321 spin_lock(&res_alloc->alloc_lock); in mlx4_grant_resource() 374 spin_unlock(&res_alloc->alloc_lock); in mlx4_grant_resource() 390 spin_lock(&res_alloc->alloc_lock); in mlx4_release_resource() 419 spin_unlock(&res_alloc->alloc_lock); in mlx4_release_resource() 551 spin_lock_init(&res_alloc->alloc_lock); in mlx4_init_resource_tracker()
|
D | mlx4.h | 543 spinlock_t alloc_lock; /* protect quotas */ member
|
/linux-6.6.21/include/linux/mlx5/ |
D | driver.h | 311 spinlock_t alloc_lock; member
|
/linux-6.6.21/kernel/ |
D | fork.c | 2389 spin_lock_init(&p->alloc_lock); in copy_process() 2442 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); in copy_process()
|
/linux-6.6.21/Documentation/admin-guide/cgroup-v1/ |
D | cgroups.rst | 535 - while holding the task's alloc_lock (via task_lock())
|