/linux-6.1.9/mm/ |
D | page_alloc.c | 2537 unsigned int alloc_flags) in prep_new_page() argument 2550 if (alloc_flags & ALLOC_NO_WATERMARKS) in prep_new_page() 2764 unsigned int alloc_flags, int start_type, bool whole_block) in steal_suitable_fallback() argument 2790 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback() 3006 unsigned int alloc_flags) in __rmqueue_fallback() argument 3020 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_fallback() 3072 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback() 3088 unsigned int alloc_flags) in __rmqueue() argument 3098 if (alloc_flags & ALLOC_CMA && in __rmqueue() 3109 if (alloc_flags & ALLOC_CMA) in __rmqueue() [all …]
|
D | compaction.c | 2170 unsigned int alloc_flags, in __compaction_suitable() argument 2179 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in __compaction_suitable() 2185 alloc_flags)) in __compaction_suitable() 2220 unsigned int alloc_flags, in compaction_suitable() argument 2226 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, in compaction_suitable() 2258 int alloc_flags) in compaction_zonelist_suitable() argument 2280 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable() 2312 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone() 2501 unsigned int alloc_flags, int highest_zoneidx, in compact_zone_order() argument 2512 .alloc_flags = alloc_flags, in compact_zone_order() [all …]
|
D | internal.h | 415 const unsigned int alloc_flags; /* alloc flags of a direct compactor */ member
|
/linux-6.1.9/include/linux/ |
D | compaction.h | 95 unsigned int order, unsigned int alloc_flags, 100 unsigned int alloc_flags, int highest_zoneidx); 178 int alloc_flags); 190 int alloc_flags, int highest_zoneidx) in compaction_suitable() argument
|
D | mmzone.h | 1251 int highest_zoneidx, unsigned int alloc_flags, 1255 unsigned int alloc_flags);
|
/linux-6.1.9/lib/ |
D | stackdepot.c | 422 gfp_t alloc_flags, bool can_alloc) in __stack_depot_save() argument 471 alloc_flags &= ~GFP_ZONEMASK; in __stack_depot_save() 472 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); in __stack_depot_save() 473 alloc_flags |= __GFP_NOWARN; in __stack_depot_save() 474 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); in __stack_depot_save() 531 gfp_t alloc_flags) in stack_depot_save() argument 533 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true); in stack_depot_save()
|
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_gpuvm.c | 233 u32 alloc_flags = bo->kfd_bo->alloc_flags; in amdgpu_amdkfd_release_notify() local 236 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags); in amdgpu_amdkfd_release_notify() 407 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; in get_pte_flags() 408 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; in get_pte_flags() 414 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags() 416 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags() 422 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { in get_pte_flags() 455 if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) in get_pte_flags() 502 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr() 601 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo() [all …]
|
D | amdgpu_amdkfd.h | 76 uint32_t alloc_flags; member
|
/linux-6.1.9/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_chardev.c | 1832 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags; in criu_checkpoint_bos() 1835 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { in criu_checkpoint_bos() 1843 if (bo_bucket->alloc_flags in criu_checkpoint_bos() 1846 bo_bucket->alloc_flags & in criu_checkpoint_bos() 1855 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) in criu_checkpoint_bos() 1858 else if (bo_bucket->alloc_flags & in criu_checkpoint_bos() 1876 bo_bucket->alloc_flags, in criu_checkpoint_bos() 1900 if (bo_buckets[bo_index].alloc_flags in criu_checkpoint_bos() 2195 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { in criu_restore_memory_of_gpu() 2202 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { in criu_restore_memory_of_gpu() [all …]
|
/linux-6.1.9/drivers/md/ |
D | dm-zoned-reclaim.c | 286 int alloc_flags = DMZ_ALLOC_SEQ; in dmz_reclaim_rnd_data() local 292 alloc_flags | DMZ_ALLOC_RECLAIM); in dmz_reclaim_rnd_data() 293 if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) { in dmz_reclaim_rnd_data() 294 alloc_flags = DMZ_ALLOC_RND; in dmz_reclaim_rnd_data()
|
D | dm-zoned-metadata.c | 2057 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND; in dmz_get_chunk_mapping() local 2072 dzone = dmz_alloc_zone(zmd, 0, alloc_flags); in dmz_get_chunk_mapping() 2169 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND; in dmz_get_chunk_buffer() local 2178 bzone = dmz_alloc_zone(zmd, 0, alloc_flags); in dmz_get_chunk_buffer()
|
/linux-6.1.9/fs/btrfs/ |
D | block-group.c | 2559 u64 alloc_flags; in btrfs_inc_block_group_ro() local 2607 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2608 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro() 2609 ret = btrfs_chunk_alloc(trans, alloc_flags, in btrfs_inc_block_group_ro() 2627 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro() 2628 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); in btrfs_inc_block_group_ro() 2644 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2646 check_system_chunk(trans, alloc_flags); in btrfs_inc_block_group_ro() 3465 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc() local 3467 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); in btrfs_force_chunk_alloc()
|
/linux-6.1.9/drivers/base/regmap/ |
D | internal.h | 65 gfp_t alloc_flags; member
|
D | regmap.c | 809 map->alloc_flags = GFP_ATOMIC; in __regmap_init() 811 map->alloc_flags = GFP_KERNEL; in __regmap_init() 2440 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); in regmap_bulk_write()
|
/linux-6.1.9/include/uapi/linux/ |
D | kfd_ioctl.h | 590 __u32 alloc_flags; member
|
/linux-6.1.9/drivers/iommu/ |
D | dma-iommu.c | 762 gfp_t alloc_flags = gfp; in __iommu_dma_alloc_pages() local 766 alloc_flags |= __GFP_NORETRY; in __iommu_dma_alloc_pages() 767 page = alloc_pages_node(nid, alloc_flags, order); in __iommu_dma_alloc_pages()
|
/linux-6.1.9/drivers/gpu/drm/i915/gem/ |
D | i915_gem_object.h | 60 unsigned alloc_flags);
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cmd.c | 79 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; in cmd_alloc_ent() local 82 ent = kzalloc(sizeof(*ent), alloc_flags); in cmd_alloc_ent()
|
/linux-6.1.9/arch/s390/kvm/ |
D | kvm-s390.c | 3127 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT; in kvm_arch_init_vm() local 3150 alloc_flags |= GFP_DMA; in kvm_arch_init_vm() 3153 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
|