Searched refs:__GFP_FS (Results 1 – 25 of 43) sorted by relevance
12
209 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro326 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)331 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
343 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); in gfp_has_io_fs()
29 避免这种死锁问题的传统方法是在调用分配器时,在gfp掩码中清除__GFP_FS和__GFP_IO41 关键部分。从该作用域的任何分配都将从给定的掩码中删除__GFP_FS和__GFP_IO,所以
38 lflags &= ~__GFP_FS; in kmem_flags_convert()
513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
1299 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
245 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()247 flags &= ~__GFP_FS; in current_gfp_context()
19 The traditional way to avoid this deadlock problem is to clear __GFP_FS35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
105 if (current_is_kswapd() || !(gfp & __GFP_FS)) in nfs_fscache_release_folio()
35 gfpflag_string(__GFP_FS), \
268 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
1133 if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
3176 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()3178 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()3188 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()3190 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()
808 if (cc->gfp_mask & __GFP_FS) { in too_many_isolated()1076 if (!(cc->gfp_mask & __GFP_FS) && mapping) in isolate_migratepages_block()
121 if (current_is_kswapd() || !(gfp & __GFP_FS)) in v9fs_release_folio()
499 return mapping_gfp_constraint(mapping, ~__GFP_FS); in btrfs_alloc_write_mask()
745 folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS), in btrfs_read_merkle_tree_page()
380 ~__GFP_FS)); in add_ra_bio_pages()
340 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()527 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
500 if (current_is_kswapd() || !(gfp & __GFP_FS)) in afs_release_folio()
178 if (current_is_kswapd() || !(gfp & __GFP_FS)) in ceph_release_folio()1642 mapping_gfp_constraint(mapping, ~__GFP_FS)); in ceph_filemap_fault()1791 ~__GFP_FS)); in ceph_fill_inline_data()
49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()
635 const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; in alloc_private_pages()
610 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()1075 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()