/linux-6.1.9/drivers/net/wireless/realtek/rtlwifi/ |
D | ps.h | 14 void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block); 15 void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block);
|
D | ps.c | 656 void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block) in rtl_lps_enter() argument 660 if (may_block) in rtl_lps_enter() 667 void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block) in rtl_lps_leave() argument 671 if (may_block) in rtl_lps_leave()
|
/linux-6.1.9/virt/kvm/ |
D | kvm_mm.h | 34 bool may_block); 39 bool may_block) in gfn_to_pfn_cache_invalidate_start() argument
|
D | pfncache.c | 26 unsigned long end, bool may_block) in gfn_to_pfn_cache_invalidate_start() argument 70 if (!may_block) in gfn_to_pfn_cache_invalidate_start() 75 WARN_ON_ONCE(called && !may_block); in gfn_to_pfn_cache_invalidate_start()
|
D | kvm_main.c | 556 bool may_block; member 617 gfn_range.may_block = range->may_block; in __kvm_handle_hva_range() 669 .may_block = false, in kvm_handle_hva_range() 689 .may_block = false, in kvm_handle_hva_range_no_flush() 759 .may_block = mmu_notifier_range_blockable(range), in kvm_mmu_notifier_invalidate_range_start() 787 hva_range.may_block); in kvm_mmu_notifier_invalidate_range_start() 824 .may_block = mmu_notifier_range_blockable(range), in kvm_mmu_notifier_invalidate_range_end()
|
/linux-6.1.9/include/linux/ |
D | dm-region-hash.h | 64 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); 66 enum dm_rh_region_states state, int may_block);
|
D | nfs_fs.h | 534 u32 *mask, bool may_block);
|
D | kvm_host.h | 253 bool may_block; member
|
/linux-6.1.9/arch/riscv/kvm/ |
D | mmu.c | 270 gpa_t size, bool may_block) in gstage_unmap_range() argument 300 if (may_block && addr < end) in gstage_unmap_range() 557 range->may_block); in kvm_unmap_gfn_range()
|
/linux-6.1.9/drivers/md/ |
D | dm-region-hash.c | 338 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) in dm_rh_get_state() argument 354 r = rh->log->type->in_sync(rh->log, region, may_block); in dm_rh_get_state()
|
D | dm-raid1.c | 551 int may_block) in region_in_sync() argument 553 int state = dm_rh_get_state(ms->rh, region, may_block); in region_in_sync()
|
/linux-6.1.9/arch/arm64/kvm/ |
D | mmu.c | 212 bool may_block) in __unmap_stage2_range() argument 220 may_block)); in __unmap_stage2_range() 1521 range->may_block); in kvm_unmap_gfn_range()
|
/linux-6.1.9/fs/nfs/ |
D | dir.c | 2951 …s_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) in nfs_access_get_cached_locked() argument 2972 if (!may_block) in nfs_access_get_cached_locked() 3023 u32 *mask, bool may_block) in nfs_access_get_cached() argument 3030 may_block); in nfs_access_get_cached() 3149 bool may_block = (mask & MAY_NOT_BLOCK) == 0; in nfs_do_access() local 3155 status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); in nfs_do_access() 3160 if (!may_block) in nfs_do_access()
|
/linux-6.1.9/fs/fuse/ |
D | fuse_i.h | 263 bool may_block:1; member
|
D | virtio_fs.c | 638 if (req->args->may_block) { in virtio_fs_requests_done_work()
|
D | file.c | 771 ia->ap.args.may_block = io->should_dirty; in fuse_async_req_send()
|
/linux-6.1.9/arch/x86/kvm/mmu/ |
D | tdp_mmu.c | 1234 range->end, range->may_block, flush); in kvm_tdp_mmu_unmap_gfn_range()
|