/linux-6.6.21/mm/ |
D | page_vma_mapped.c | 18 pte_t ptent; in map_pte() local 40 ptent = ptep_get(pvmw->pte); in map_pte() 43 if (!is_swap_pte(ptent)) in map_pte() 45 } else if (is_swap_pte(ptent)) { in map_pte() 63 entry = pte_to_swp_entry(ptent); in map_pte() 67 } else if (!pte_present(ptent)) { in map_pte() 99 pte_t ptent = ptep_get(pvmw->pte); in check_pte() local 103 if (!is_swap_pte(ptent)) in check_pte() 105 entry = pte_to_swp_entry(ptent); in check_pte() 112 } else if (is_swap_pte(ptent)) { in check_pte() [all …]
|
D | mapping_dirty_helpers.c | 38 pte_t ptent = ptep_get(pte); in wp_pte() local 40 if (pte_write(ptent)) { in wp_pte() 43 ptent = pte_wrprotect(old_pte); in wp_pte() 44 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 94 pte_t ptent = ptep_get(pte); in clean_record_pte() local 96 if (pte_dirty(ptent)) { in clean_record_pte() 101 ptent = pte_mkclean(old_pte); in clean_record_pte() 102 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
|
D | madvise.c | 351 pte_t *start_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 443 ptent = ptep_get(pte); in madvise_cold_or_pageout_pte_range() 445 if (pte_none(ptent)) in madvise_cold_or_pageout_pte_range() 448 if (!pte_present(ptent)) in madvise_cold_or_pageout_pte_range() 451 folio = vm_normal_folio(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 499 if (pte_young(ptent)) { in madvise_cold_or_pageout_pte_range() 500 ptent = ptep_get_and_clear_full(mm, addr, pte, in madvise_cold_or_pageout_pte_range() 502 ptent = pte_mkold(ptent); in madvise_cold_or_pageout_pte_range() 503 set_pte_at(mm, addr, pte, ptent); in madvise_cold_or_pageout_pte_range() 633 pte_t *start_pte, *pte, ptent; in madvise_free_pte_range() local [all …]
|
D | mprotect.c | 110 pte_t ptent; in change_pte_range() local 165 ptent = pte_modify(oldpte, newprot); in change_pte_range() 168 ptent = pte_mkuffd_wp(ptent); in change_pte_range() 170 ptent = pte_clear_uffd_wp(ptent); in change_pte_range() 186 !pte_write(ptent) && in change_pte_range() 187 can_change_pte_writable(vma, addr, ptent)) in change_pte_range() 188 ptent = pte_mkwrite(ptent, vma); in change_pte_range() 190 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range() 191 if (pte_needs_flush(oldpte, ptent)) in change_pte_range()
|
D | highmem.c | 194 pte_t ptent; in flush_all_zero_pkmaps() local 207 ptent = ptep_get(&pkmap_page_table[i]); in flush_all_zero_pkmaps() 208 BUG_ON(pte_none(ptent)); in flush_all_zero_pkmaps() 217 page = pte_page(ptent); in flush_all_zero_pkmaps()
|
D | mlock.c | 315 pte_t ptent; in mlock_pte_range() local 338 ptent = ptep_get(pte); in mlock_pte_range() 339 if (!pte_present(ptent)) in mlock_pte_range() 341 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
|
D | khugepaged.c | 1547 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1550 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1554 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1559 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1599 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1601 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1609 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1613 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
|
D | memory.c | 1009 pte_t ptent; in copy_pte_range() local 1055 ptent = ptep_get(src_pte); in copy_pte_range() 1056 if (pte_none(ptent)) { in copy_pte_range() 1060 if (unlikely(!pte_present(ptent))) { in copy_pte_range() 1416 pte_t ptent = ptep_get(pte); in zap_pte_range() local 1419 if (pte_none(ptent)) in zap_pte_range() 1425 if (pte_present(ptent)) { in zap_pte_range() 1428 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 1431 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range() 1433 arch_check_zapped_pte(vma, ptent); in zap_pte_range() [all …]
|
D | memcontrol.c | 5672 unsigned long addr, pte_t ptent) in mc_handle_present_pte() argument 5674 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte() 5692 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument 5695 swp_entry_t ent = pte_to_swp_entry(ptent); in mc_handle_swap_pte() 5725 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() argument 5732 unsigned long addr, pte_t ptent) in mc_handle_file_pte() argument 5894 unsigned long addr, pte_t ptent, union mc_target *target) in get_mctgt_type() argument 5900 if (pte_present(ptent)) in get_mctgt_type() 5901 page = mc_handle_present_pte(vma, addr, ptent); in get_mctgt_type() 5902 else if (pte_none_mostly(ptent)) in get_mctgt_type() [all …]
|
D | rmap.c | 2194 pte_t ptent; in page_make_device_exclusive_one() local 2206 ptent = ptep_get(pvmw.pte); in page_make_device_exclusive_one() 2207 if (!pte_present(ptent)) { in page_make_device_exclusive_one() 2214 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one() 2218 flush_cache_page(vma, address, pte_pfn(ptent)); in page_make_device_exclusive_one()
|
D | ksm.c | 438 pte_t ptent; in break_ksm_pmd_entry() local 444 ptent = ptep_get(pte); in break_ksm_pmd_entry() 445 if (pte_present(ptent)) { in break_ksm_pmd_entry() 446 page = vm_normal_page(walk->vma, addr, ptent); in break_ksm_pmd_entry() 447 } else if (!pte_none(ptent)) { in break_ksm_pmd_entry() 448 swp_entry_t entry = pte_to_swp_entry(ptent); in break_ksm_pmd_entry()
|
D | memory-failure.c | 385 pte_t ptent; in dev_pagemap_mapping_shift() local 407 ptent = ptep_get(pte); in dev_pagemap_mapping_shift() 408 if (pte_present(ptent) && pte_devmap(ptent)) in dev_pagemap_mapping_shift()
|
D | vmalloc.c | 328 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() local 329 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); in vunmap_pte_range() 2947 pte_t ptent; in vmap_pfn_apply() local 2952 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); in vmap_pfn_apply() 2953 set_pte_at(&init_mm, addr, pte, ptent); in vmap_pfn_apply()
|
D | mempolicy.c | 515 pte_t ptent; in queue_folios_pte_range() local 528 ptent = ptep_get(pte); in queue_folios_pte_range() 529 if (!pte_present(ptent)) in queue_folios_pte_range() 531 folio = vm_normal_folio(vma, addr, ptent); in queue_folios_pte_range()
|
D | vmscan.c | 4030 pte_t ptent = ptep_get(pte + i); in walk_pte_range() local 4035 pfn = get_pte_pfn(ptent, args->vma, addr); in walk_pte_range() 4039 if (!pte_young(ptent)) { in walk_pte_range() 4054 if (pte_dirty(ptent) && !folio_test_dirty(folio) && in walk_pte_range() 4705 pte_t ptent = ptep_get(pte + i); in lru_gen_look_around() local 4707 pfn = get_pte_pfn(ptent, vma, addr); in lru_gen_look_around() 4711 if (!pte_young(ptent)) in lru_gen_look_around() 4723 if (pte_dirty(ptent) && !folio_test_dirty(folio) && in lru_gen_look_around()
|
D | swapfile.c | 1845 pte_t ptent; in unuse_pte_range() local 1853 ptent = ptep_get_lockless(pte); in unuse_pte_range() 1855 if (!is_swap_pte(ptent)) in unuse_pte_range() 1858 entry = pte_to_swp_entry(ptent); in unuse_pte_range()
|
/linux-6.6.21/fs/proc/ |
D | task_mmu.c | 530 pte_t ptent = ptep_get(pte); in smaps_pte_entry() local 532 if (pte_present(ptent)) { in smaps_pte_entry() 533 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry() 534 young = pte_young(ptent); in smaps_pte_entry() 535 dirty = pte_dirty(ptent); in smaps_pte_entry() 536 } else if (is_swap_pte(ptent)) { in smaps_pte_entry() 537 swp_entry_t swpent = pte_to_swp_entry(ptent); in smaps_pte_entry() 727 pte_t ptent = ptep_get(pte); in smaps_hugetlb_range() local 729 if (pte_present(ptent)) { in smaps_hugetlb_range() 730 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range() [all …]
|
/linux-6.6.21/mm/damon/ |
D | vaddr.c | 441 pte_t ptent; in damon_young_pmd_entry() local 484 ptent = ptep_get(pte); in damon_young_pmd_entry() 485 if (!pte_present(ptent)) in damon_young_pmd_entry() 487 folio = damon_get_folio(pte_pfn(ptent)); in damon_young_pmd_entry() 490 if (pte_young(ptent) || !folio_test_idle(folio) || in damon_young_pmd_entry()
|
/linux-6.6.21/mm/kasan/ |
D | init.c | 356 pte_t ptent; in kasan_remove_pte_table() local 363 ptent = ptep_get(pte); in kasan_remove_pte_table() 365 if (!pte_present(ptent)) in kasan_remove_pte_table() 368 if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) in kasan_remove_pte_table()
|
/linux-6.6.21/drivers/gpu/drm/i915/gem/selftests/ |
D | i915_gem_mman.c | 1684 pte_t ptent = ptep_get(pte); in check_present_pte() local 1686 if (!pte_present(ptent) || pte_none(ptent)) { in check_present_pte() 1697 pte_t ptent = ptep_get(pte); in check_absent_pte() local 1699 if (pte_present(ptent) && !pte_none(ptent)) { in check_absent_pte()
|
/linux-6.6.21/fs/ |
D | userfaultfd.c | 335 pte_t ptent; in userfaultfd_must_wait() local 375 ptent = ptep_get(pte); in userfaultfd_must_wait() 376 if (pte_none_mostly(ptent)) in userfaultfd_must_wait() 378 if (!pte_write(ptent) && (reason & VM_UFFD_WP)) in userfaultfd_must_wait()
|