Home
last modified time | relevance | path

Searched refs:gfp_t (Results 1 – 25 of 1331) sorted by relevance

12345678910>>...54

/linux-6.6.21/include/linux/
Dgfp_types.h16 typedef unsigned int __bitwise gfp_t;
70 #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
71 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
72 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
73 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
103 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
104 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
105 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
106 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
107 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
[all …]
Dgfp.h16 static inline int gfp_migratetype(const gfp_t gfp_flags) in gfp_migratetype()
34 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) in gfpflags_allow_blocking()
129 static inline enum zone_type gfp_zone(gfp_t flags) in gfp_zone()
147 static inline int gfp_zonelist(gfp_t flags) in gfp_zonelist()
165 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
177 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
179 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
182 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
187 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
193 alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) in alloc_pages_bulk_list()
[all …]
Dslab.h226 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
400 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller) in kmalloc_type()
487 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
499 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
501 gfp_t gfpflags) __assume_slab_alignment __malloc;
512 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
519 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
521 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
524 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
527 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
[all …]
Dkmemleak.h19 gfp_t gfp) __ref;
21 gfp_t gfp) __ref;
23 gfp_t gfp) __ref;
30 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
33 gfp_t gfp) __ref;
39 gfp_t gfp) in kmemleak_alloc_recursive()
62 gfp_t gfp) in kmemleak_alloc()
67 gfp_t gfp) in kmemleak_alloc_recursive()
71 gfp_t gfp) in kmemleak_alloc_percpu()
75 gfp_t gfp) in kmemleak_vmalloc()
[all …]
Dxarray.h267 #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
268 #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
269 #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
270 #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
271 #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
272 #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
273 #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
299 gfp_t xa_flags;
352 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
355 void *entry, gfp_t);
[all …]
Dkasan.h183 void *object, gfp_t flags, bool init);
185 struct kmem_cache *s, void *object, gfp_t flags, bool init) in kasan_slab_alloc()
193 size_t size, gfp_t flags);
195 const void *object, size_t size, gfp_t flags) in kasan_kmalloc()
203 size_t size, gfp_t flags);
205 size_t size, gfp_t flags) in kasan_kmalloc_large()
213 size_t new_size, gfp_t flags);
215 size_t new_size, gfp_t flags) in kasan_krealloc()
261 gfp_t flags, bool init) in kasan_slab_alloc()
266 size_t size, gfp_t flags) in kasan_kmalloc()
[all …]
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
41 gfp_t gfp_mask, int node_id);
49 gfp_t gfp_mask, int nid);
53 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
61 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
82 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
101 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dfault-inject.h94 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
97 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
99 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page()
105 int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
107 extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
109 static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) in __should_failslab()
Ddevcoredump.h56 gfp_t gfp);
59 void *data, size_t datalen, gfp_t gfp,
65 size_t datalen, gfp_t gfp);
68 size_t datalen, gfp_t gfp) in dev_coredumpv()
75 void *data, size_t datalen, gfp_t gfp, in dev_coredumpm()
84 size_t datalen, gfp_t gfp) in dev_coredumpsg()
Didr.h32 #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
112 void idr_preload(gfp_t gfp_mask);
114 int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
116 unsigned long max, gfp_t);
117 int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
257 int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
273 static inline int ida_alloc(struct ida *ida, gfp_t gfp) in ida_alloc()
291 static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) in ida_alloc_min()
309 static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) in ida_alloc_max()
Dkmsan.h69 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
100 void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
119 void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
258 gfp_t flags) in kmsan_alloc_page()
271 gfp_t flags) in kmsan_slab_alloc()
280 gfp_t flags) in kmsan_kmalloc_large()
Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure()
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio()
/linux-6.6.21/include/net/sctp/
Dulpevent.h81 gfp_t gfp);
90 gfp_t gfp);
96 gfp_t gfp);
103 gfp_t gfp);
108 gfp_t gfp);
113 __u32 flags, gfp_t gfp);
116 const struct sctp_association *asoc, gfp_t gfp);
120 gfp_t gfp);
124 __u32 indication, gfp_t gfp);
127 const struct sctp_association *asoc, gfp_t gfp);
[all …]
Dstream_interleave.h25 int len, __u8 flags, gfp_t gfp);
29 struct sctp_chunk *chunk, gfp_t gfp);
33 struct sctp_chunk *chunk, gfp_t gfp);
34 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
35 void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
Dauth.h71 struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
73 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
79 gfp_t gfp);
80 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
94 struct sctp_shared_key *ep_key, gfp_t gfp);
110 int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
Dulpqueue.h43 int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
49 void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
52 void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
55 void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
/linux-6.6.21/net/wireless/
Dnl80211.h60 const u8 *buf, size_t len, gfp_t gfp);
67 bool reconnect, gfp_t gfp);
71 bool reconnect, gfp_t gfp);
74 const u8 *addr, gfp_t gfp);
77 const u8 *addr, gfp_t gfp);
81 gfp_t gfp);
84 struct cfg80211_roam_info *info, gfp_t gfp);
96 int key_id, const u8 *tsc, gfp_t gfp);
105 gfp_t gfp);
109 struct cfg80211_rx_info *info, gfp_t gfp);
[all …]
/linux-6.6.21/mm/
Dswap.h35 gfp_t gfp, void **shadowp);
47 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
51 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
55 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
57 struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
84 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead()
89 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead()
128 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache()
Dslab.h285 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
287 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
292 gfp_t kmalloc_fix_flags(gfp_t flags);
452 gfp_t gfp, bool new_slab);
477 size_t objects, gfp_t flags) in memcg_slab_pre_alloc_hook()
515 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook()
590 struct kmem_cache *s, gfp_t gfp, in memcg_alloc_slab_cgroups()
603 size_t objects, gfp_t flags) in memcg_slab_pre_alloc_hook()
610 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook()
633 struct kmem_cache *s, gfp_t gfp) in account_slab()
[all …]
/linux-6.6.21/include/net/
Dhandshake.h36 int tls_client_hello_anon(const struct tls_handshake_args *args, gfp_t flags);
37 int tls_client_hello_x509(const struct tls_handshake_args *args, gfp_t flags);
38 int tls_client_hello_psk(const struct tls_handshake_args *args, gfp_t flags);
39 int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags);
40 int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags);
/linux-6.6.21/tools/virtio/linux/
Dkernel.h56 static inline void *kmalloc(size_t s, gfp_t gfp) in kmalloc()
62 static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) in kmalloc_array()
67 static inline void *kzalloc(size_t s, gfp_t gfp) in kzalloc()
75 static inline void *alloc_pages_exact(size_t s, gfp_t gfp) in alloc_pages_exact()
92 static inline void *krealloc(void *p, size_t s, gfp_t gfp) in krealloc()
98 static inline unsigned long __get_free_page(gfp_t gfp) in __get_free_page()
118 static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t gfp) in krealloc_array()
/linux-6.6.21/security/apparmor/include/
Dlabel.h60 gfp_t gfp);
280 bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
281 struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
301 gfp_t gfp);
304 bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
314 int flags, gfp_t gfp);
316 struct aa_label *label, int flags, gfp_t gfp);
318 struct aa_label *label, int flags, gfp_t gfp);
320 struct aa_label *label, int flags, gfp_t gfp);
322 gfp_t gfp);
[all …]
/linux-6.6.21/include/drm/
Ddrm_managed.h48 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
60 static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) in drmm_kzalloc()
77 size_t n, size_t size, gfp_t flags) in drmm_kmalloc_array()
99 size_t n, size_t size, gfp_t flags) in drmm_kcalloc()
104 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
/linux-6.6.21/include/linux/sched/
Dmm.h235 static inline gfp_t current_gfp_context(gfp_t flags) in current_gfp_context()
258 extern void fs_reclaim_acquire(gfp_t gfp_mask);
259 extern void fs_reclaim_release(gfp_t gfp_mask);
263 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire()
264 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release()
274 static inline void memalloc_retry_wait(gfp_t gfp_flags) in memalloc_retry_wait()
301 static inline void might_alloc(gfp_t gfp_mask) in might_alloc()
/linux-6.6.21/arch/powerpc/include/asm/
Dpgalloc.h8 static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) in pgtable_gfp_flags()
15 static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) in pgtable_gfp_flags()

12345678910>>...54