/linux-6.6.21/include/linux/ |
D | gfp.h | 213 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() argument 215 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in warn_if_node_offline() 223 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in warn_if_node_offline() 232 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument 235 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node() 237 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node() 254 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 260 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node() 269 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument 271 return alloc_pages_node(numa_node_id(), gfp_mask, order); in alloc_pages() [all …]
|
D | mempool.h | 13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 41 gfp_t gfp_mask, int node_id); 49 gfp_t gfp_mask, int nid); 53 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 61 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 82 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 101 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
D | cpuset.h | 85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 223 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 228 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
D | blk-crypto.h | 84 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 128 gfp_t gfp_mask) in bio_crypt_clone() argument 131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 28 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() 59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
/linux-6.6.21/mm/ |
D | mempool.c | 187 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 197 gfp_mask, node_id); in mempool_init_node() 207 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 268 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 272 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 277 gfp_mask, node_id)) { in mempool_create_node() 380 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 387 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 388 might_alloc(gfp_mask); in mempool_alloc() 390 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() [all …]
|
D | page_alloc.c | 2798 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2800 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page() 2922 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument 3001 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument 3009 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment() 3033 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument 3037 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma() 3048 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 3071 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 3121 gfp_mask)) { in get_page_from_freelist() [all …]
|
D | fail_page_alloc.c | 24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 30 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 35 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 39 if (gfp_mask & __GFP_NOWARN) in __should_fail_alloc_page()
|
D | page_owner.c | 27 gfp_t gfp_mask; member 162 unsigned short order, gfp_t gfp_mask) in __set_page_owner_handle() argument 172 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 187 gfp_t gfp_mask) in __set_page_owner() argument 192 handle = save_stack(gfp_mask); in __set_page_owner() 197 __set_page_owner_handle(page_ext, handle, order, gfp_mask); in __set_page_owner() 250 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __folio_copy_owner() 334 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 412 page_owner->order, page_owner->gfp_mask, in print_page_owner() 413 &page_owner->gfp_mask, page_owner->pid, in print_page_owner() [all …]
|
D | swap.h | 47 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 51 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 84 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument 89 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 128 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument
|
D | swap_state.c | 412 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 456 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false); in __read_swap_cache_async() 488 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry)) in __read_swap_cache_async() 492 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async() 527 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 532 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 620 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 652 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead() 670 return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); in swap_cluster_readahead() 780 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument [all …]
|
/linux-6.6.21/block/ |
D | blk-lib.c | 39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument 67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); in __blkdev_issue_discard() 98 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument 105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 135 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes() 169 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument 181 REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages() 219 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, in __blkdev_issue_zeroout() argument 229 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, in __blkdev_issue_zeroout() [all …]
|
D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 142 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov() 185 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov() 254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() argument 259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc() 264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc() 273 gfp_t gfp_mask) in bio_map_user_iov() argument [all …]
|
D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
/linux-6.6.21/fs/nfs/blocklayout/ |
D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 329 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 384 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 389 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 400 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 407 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 413 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 429 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument [all …]
|
/linux-6.6.21/fs/btrfs/ |
D | ulist.h | 48 struct ulist *ulist_alloc(gfp_t gfp_mask); 50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 52 u64 *old_aux, gfp_t gfp_mask); 57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
D | ulist.c | 97 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 99 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 193 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 195 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 199 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 210 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
/linux-6.6.21/lib/ |
D | generic-radix-tree.c | 79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument 83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node() 90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node() 105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 229 gfp_t gfp_mask) in __genradix_prealloc() argument 234 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
|
/linux-6.6.21/include/linux/sched/ |
D | mm.h | 258 extern void fs_reclaim_acquire(gfp_t gfp_mask); 259 extern void fs_reclaim_release(gfp_t gfp_mask); 263 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument 264 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument 301 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument 303 fs_reclaim_acquire(gfp_mask); in might_alloc() 304 fs_reclaim_release(gfp_mask); in might_alloc() 306 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
|
/linux-6.6.21/net/sunrpc/auth_gss/ |
D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() argument 174 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 178 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() argument 277 inblock.data = kmalloc(inblock.len, gfp_mask); in krb5_derive_key_v2() 281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() argument 401 step.data = kzalloc(step.len, gfp_mask); in krb5_kdf_feedback_cmac() 406 DR.data = kmalloc(DR.len, gfp_mask); in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2() argument [all …]
|
D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); in gss_krb5_import_ctx_v2() 313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 346 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 354 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 363 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 371 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() argument 446 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context() [all …]
|
D | gss_krb5_internal.h | 40 gfp_t gfp_mask); 110 gfp_t gfp_mask); 116 gfp_t gfp_mask); 122 gfp_t gfp_mask); 141 u32 usage, u8 seed, gfp_t gfp_mask) in krb5_derive_key() argument 153 return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask); in krb5_derive_key()
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
/linux-6.6.21/fs/ntfs/ |
D | malloc.h | 28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument 33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc() 37 return __vmalloc(size, gfp_mask); in __ntfs_malloc()
|
/linux-6.6.21/drivers/connector/ |
D | connector.c | 62 gfp_t gfp_mask, in cn_netlink_send_mult() argument 98 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 116 gfp_mask, filter, in cn_netlink_send_mult() 119 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 125 gfp_t gfp_mask) in cn_netlink_send() argument 127 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, in cn_netlink_send()
|