Searched refs:__GFP_FS (Results 1 – 24 of 24) sorted by relevance
72 #define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ macro104 #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)105 #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \107 #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)108 #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \110 #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \113 #define GFP_IOFS (__GFP_IO | __GFP_FS)128 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\133 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
52 lflags &= ~__GFP_FS; in kmem_flags_convert()121 return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); in kmem_shake_allow()
1039 if (!(gfp_mask & __GFP_FS)) in xfs_reclaim_inode_shrink()
678 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
592 int may_enter_fs = gfp_mask & __GFP_FS; in try_to_compact_pages()
1001 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); in grab_cache_page_nowait()2287 gfp_notmask = __GFP_FS; in grab_cache_page_write_begin()
747 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
2124 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { in __alloc_pages_slowpath()
309 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); in nilfs_new_inode()387 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); in nilfs_set_inode_flags()
49 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)1189 if (!(gfp_mask & __GFP_FS)) in shrink_zcache_memory()
650 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in do_loop_switch()899 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_set_fd()
475 ~__GFP_FS); in add_ra_bio_pages()
85 inode->i_mapping->flags &= ~__GFP_FS; in lookup_free_space_inode()
1650 fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS; in open_ctree()
60 BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS); in get_mapping_page()
2462 if (!(gfp_mask & __GFP_FS)) in __lockdep_trace_alloc()2539 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { in mark_irqflags()
762 if (!(gfp_mask & __GFP_FS)) in shrink_icache_memory()
1236 if (!(gfp_mask & __GFP_FS)) in shrink_dcache_memory()
995 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); in grow_dev_page()
3408 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); in page_symlink()
143 if ((gfp_mask & __GFP_FS) == 0) { in start_this_handle()
1361 if (!(gfp_mask & __GFP_FS)) in gfs2_shrink_glock_memory()
88 if (!(gfp_mask & __GFP_FS)) in gfs2_shrink_qd_memory()
3929 mapping_gfp_mask(mapping) & ~__GFP_FS); in ext4_block_truncate_page()