/linux-6.1.9/mm/ |
D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw) in map_pte() argument 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 19 if (!(pvmw->flags & PVMW_SYNC)) { in map_pte() 20 if (pvmw->flags & PVMW_MIGRATION) { in map_pte() 21 if (!is_swap_pte(*pvmw->pte)) in map_pte() 39 if (is_swap_pte(*pvmw->pte)) { in map_pte() 43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte() 47 } else if (!pte_present(*pvmw->pte)) in map_pte() [all …]
|
D | rmap.c | 810 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one() 813 while (page_vma_mapped_walk(&pvmw)) { in folio_referenced_one() 814 address = pvmw.address; in folio_referenced_one() 817 (!folio_test_large(folio) || !pvmw.pte)) { in folio_referenced_one() 819 mlock_vma_folio(folio, vma, !pvmw.pte); in folio_referenced_one() 820 page_vma_mapped_walk_done(&pvmw); in folio_referenced_one() 825 if (pvmw.pte) { in folio_referenced_one() 826 if (lru_gen_enabled() && pte_young(*pvmw.pte) && in folio_referenced_one() 828 lru_gen_look_around(&pvmw); in folio_referenced_one() 833 pvmw.pte)) { in folio_referenced_one() [all …]
|
D | page_idle.c | 53 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 56 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 57 addr = pvmw.address; in page_idle_clear_pte_refs_one() 58 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 63 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 66 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
D | migrate.c | 177 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte() 179 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 188 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 193 if (!pvmw.pte) { in remove_migration_pte() 196 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 203 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 209 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte() 216 else if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() 230 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 232 if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() [all …]
|
D | ksm.c | 1020 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); in write_protect_page() 1026 pvmw.address = page_address_in_vma(page, vma); in write_protect_page() 1027 if (pvmw.address == -EFAULT) in write_protect_page() 1033 pvmw.address, in write_protect_page() 1034 pvmw.address + PAGE_SIZE); in write_protect_page() 1037 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1039 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1043 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1044 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || in write_protect_page() 1049 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page() [all …]
|
D | internal.h | 587 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 589 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 594 if (pvmw->nr_pages == 1) in vma_address_end() 595 return pvmw->address + PAGE_SIZE; in vma_address_end() 597 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
D | huge_memory.c | 3192 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 3195 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 3197 unsigned long address = pvmw->address; in set_pmd_migration_entry() 3203 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 3207 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 3212 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry() 3231 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 3239 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 3241 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 3243 unsigned long address = pvmw->address; in remove_migration_pmd() [all …]
|
D | vmscan.c | 4589 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4599 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4606 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4609 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4615 start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start); in lru_gen_look_around() 4616 end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; in lru_gen_look_around() 4619 if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around() 4621 else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around() 4624 start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around() 4625 end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around() [all …]
|
/linux-6.1.9/mm/damon/ |
D | paddr.c | 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_mkold() 24 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_mkold() 25 addr = pvmw.address; in __damon_pa_mkold() 26 if (pvmw.pte) in __damon_pa_mkold() 27 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 29 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 93 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_young() 97 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_young() 98 addr = pvmw.address; in __damon_pa_young() 99 if (pvmw.pte) { in __damon_pa_young() [all …]
|
/linux-6.1.9/include/linux/ |
D | rmap.h | 404 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 407 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 408 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 409 if (pvmw->ptl) in page_vma_mapped_walk_done() 410 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 413 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
D | swapops.h | 521 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 524 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 554 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 560 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
D | mmzone.h | 479 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 492 static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|
/linux-6.1.9/kernel/events/ |
D | uprobes.c | 160 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 179 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 181 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 196 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page() 197 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page() 199 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 205 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|