/linux-6.6.21/arch/arm/mm/ |
D | fault-armv.c | 152 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in make_coherent()
|
D | flush.c | 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases()
|
/linux-6.6.21/fs/hugetlbfs/ |
D | inode.c | 498 struct rb_root_cached *root = &mapping->i_mmap; in hugetlb_unmap_file_folio() 740 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) in hugetlb_vmtruncate() 741 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, in hugetlb_vmtruncate() 801 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) in hugetlbfs_punch_hole() 802 hugetlb_vmdelete_list(&mapping->i_mmap, in hugetlbfs_punch_hole()
|
/linux-6.6.21/mm/ |
D | nommu.c | 557 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm() 573 vma_interval_tree_remove(vma, &mapping->i_mmap); in cleanup_vma_from_mm() 1753 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings() 1769 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
|
D | mmap.c | 114 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 391 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file() 494 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); in vma_prepare() 497 &vp->mapping->i_mmap); in vma_prepare() 516 &vp->mapping->i_mmap); in vma_complete() 517 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); in vma_complete() 2846 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); in mmap_region()
|
D | pagewalk.c | 647 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, in walk_page_mapping()
|
D | memory-failure.c | 651 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 693 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in collect_procs_fsdax()
|
D | khugepaged.c | 1684 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables() 2100 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { in collapse_file()
|
D | memory.c | 3511 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_folio() 3512 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_folio() 3541 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_pages() 3542 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_pages()
|
D | rmap.c | 2493 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
|
D | hugetlb.c | 5534 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private() 7009 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { in huge_pmd_share()
|
/linux-6.6.21/arch/nios2/mm/ |
D | cacheflush.c | 85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_aliases()
|
/linux-6.6.21/Documentation/translations/zh_CN/core-api/ |
D | cachetlb.rst | 272 这个想法是,首先在flush_dcache_page()时,如果page->mapping->i_mmap
|
/linux-6.6.21/arch/parisc/kernel/ |
D | cache.c | 442 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_dcache_folio()
|
/linux-6.6.21/include/linux/ |
D | fs.h | 482 struct rb_root_cached i_mmap; member 557 return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); in mapping_mapped()
|
/linux-6.6.21/fs/ |
D | inode.c | 403 mapping->i_mmap = RB_ROOT_CACHED; in __address_space_init_once()
|
D | dax.c | 993 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { in dax_writeback_one()
|
/linux-6.6.21/kernel/events/ |
D | uprobes.c | 970 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in build_map_info()
|
/linux-6.6.21/kernel/ |
D | fork.c | 741 &mapping->i_mmap); in dup_mmap()
|