Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 226) sorted by relevance

12345678910

/linux-6.1.9/include/linux/
Dgfp.h213 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() argument
215 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in warn_if_node_offline()
223 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in warn_if_node_offline()
232 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
235 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node()
237 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node()
254 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument
260 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
269 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument
271 return alloc_pages_node(numa_node_id(), gfp_mask, order); in alloc_pages()
[all …]
Dcpuset.h83 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
85 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
88 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed()
92 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
94 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed()
97 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
100 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed()
226 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
231 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
236 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dblk-crypto.h86 gfp_t gfp_mask);
115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
128 gfp_t gfp_mask) in bio_crypt_clone() argument
131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
Dpage_owner.h13 unsigned short order, gfp_t gfp_mask);
28 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
/linux-6.1.9/mm/
Dmempool.c181 gfp_t gfp_mask, int node_id) in mempool_init_node() argument
191 gfp_mask, node_id); in mempool_init_node()
201 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
262 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
266 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
271 gfp_mask, node_id)) { in mempool_create_node()
374 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
381 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
382 might_alloc(gfp_mask); in mempool_alloc()
384 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
[all …]
Dpage_owner.c27 gfp_t gfp_mask; member
161 unsigned short order, gfp_t gfp_mask) in __set_page_owner_handle() argument
170 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle()
185 gfp_t gfp_mask) in __set_page_owner() argument
190 handle = save_stack(gfp_mask); in __set_page_owner()
195 __set_page_owner_handle(page_ext, handle, order, gfp_mask); in __set_page_owner()
248 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __folio_copy_owner()
332 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print()
410 page_owner->order, page_owner->gfp_mask, in print_page_owner()
411 &page_owner->gfp_mask, page_owner->pid, in print_page_owner()
[all …]
Dpage_alloc.c3888 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3894 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3896 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3899 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3903 if (gfp_mask & __GFP_NOWARN) in __should_fail_alloc_page()
3934 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3941 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3943 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
4049 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument
4081 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
[all …]
Dswap.h46 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
51 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
85 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument
90 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument
124 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument
Dswap_state.c412 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument
454 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false); in __read_swap_cache_async()
486 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry)) in __read_swap_cache_async()
490 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async()
516 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
522 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
610 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument
643 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead()
661 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL); in swap_cluster_readahead()
788 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument
[all …]
Dvmalloc.c1560 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) in preload_this_cpu_lock() argument
1574 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1589 int node, gfp_t gfp_mask) in alloc_vmap_area() argument
1605 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; in alloc_vmap_area()
1607 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
1615 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1618 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); in alloc_vmap_area()
1665 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) in alloc_vmap_area()
1946 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
1958 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block()
[all …]
/linux-6.1.9/block/
Dblk-lib.c39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument
67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); in __blkdev_issue_discard()
98 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument
105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard()
119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument
135 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes()
169 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument
181 REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages()
219 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, in __blkdev_issue_zeroout() argument
229 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, in __blkdev_issue_zeroout()
[all …]
Dblk-map.c22 gfp_t gfp_mask) in bio_alloc_map_data() argument
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
130 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument
140 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov()
155 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov()
183 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov()
245 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() argument
252 bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, in blk_rq_map_bio_alloc()
257 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc()
266 gfp_t gfp_mask) in bio_map_user_iov() argument
[all …]
Dblk-crypto.c85 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument
93 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx()
95 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx()
109 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument
111 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone()
299 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument
302 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
/linux-6.1.9/fs/btrfs/
Dulist.h48 struct ulist *ulist_alloc(gfp_t gfp_mask);
50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
52 u64 *old_aux, gfp_t gfp_mask);
57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument
61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr()
65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
Dulist.c92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument
94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc()
186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument
188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add()
192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument
203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
/linux-6.1.9/fs/nfs/blocklayout/
Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple()
327 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
382 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
387 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
398 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
411 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat()
427 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
440 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe()
[all …]
/linux-6.1.9/lib/
Dgeneric-radix-tree.c79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument
83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node()
90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node()
105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument
122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
218 gfp_t gfp_mask) in __genradix_prealloc() argument
223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
Dscatterlist.c149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument
161 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc()
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc()
166 gfp_mask); in sg_kmalloc()
284 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() argument
318 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table()
371 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument
376 NULL, 0, gfp_mask, sg_kmalloc); in sg_alloc_table()
386 gfp_t gfp_mask) in get_next_sg() argument
399 new_sg = sg_kmalloc(alloc_size, gfp_mask); in get_next_sg()
[all …]
/linux-6.1.9/include/linux/sched/
Dmm.h226 extern void fs_reclaim_acquire(gfp_t gfp_mask);
227 extern void fs_reclaim_release(gfp_t gfp_mask);
231 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument
232 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument
269 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument
271 fs_reclaim_acquire(gfp_mask); in might_alloc()
272 fs_reclaim_release(gfp_mask); in might_alloc()
274 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
/linux-6.1.9/net/sunrpc/auth_gss/
Dgss_krb5_mech.c312 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument
339 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3()
357 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument
373 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
388 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
403 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
413 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
423 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
433 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
469 gfp_t gfp_mask) in gss_import_v2_context() argument
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/
Dicm.c99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument
103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages()
115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument
118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent()
133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm()
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm()
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm()
162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm()
[all …]
/linux-6.1.9/fs/ntfs/
Dmalloc.h28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument
33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc()
37 return __vmalloc(size, gfp_mask); in __ntfs_malloc()
/linux-6.1.9/drivers/connector/
Dconnector.c62 gfp_t gfp_mask) in cn_netlink_send_mult() argument
96 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult()
114 gfp_mask); in cn_netlink_send_mult()
116 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult()
122 gfp_t gfp_mask) in cn_netlink_send() argument
124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
/linux-6.1.9/kernel/power/
Dsnapshot.c189 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument
193 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
199 res = (void *)get_zeroed_page(gfp_mask); in get_image_page()
208 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument
217 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page()
220 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument
222 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page()
225 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument
229 page = alloc_page(gfp_mask); in alloc_image_page()
295 gfp_t gfp_mask; /* mask for allocating pages */ member
[all …]

12345678910