/linux-6.6.21/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmmnv44.c | 28 dma_addr_t *list, u32 ptei, u32 ptes) in nv44_vmm_pgt_fill() argument 38 while (ptes--) { in nv44_vmm_pgt_fill() 74 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv44_vmm_pgt_pte() argument 79 const u32 pten = min(ptes, 4 - (ptei & 3)); in nv44_vmm_pgt_pte() 84 ptes -= pten; in nv44_vmm_pgt_pte() 87 while (ptes >= 4) { in nv44_vmm_pgt_pte() 94 ptes -= 4; in nv44_vmm_pgt_pte() 97 if (ptes) { in nv44_vmm_pgt_pte() 98 for (i = 0; i < ptes; i++, addr += 0x1000) in nv44_vmm_pgt_pte() 100 nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes); in nv44_vmm_pgt_pte() [all …]
|
D | vmmgp100.c | 35 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gp100_vmm_pfn_unmap() argument 41 while (ptes--) { in gp100_vmm_pfn_unmap() 56 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gp100_vmm_pfn_clear() argument 60 while (ptes--) { in gp100_vmm_pfn_clear() 76 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gp100_vmm_pgt_pfn() argument 82 for (; ptes; ptes--, map->pfn++) { in gp100_vmm_pgt_pfn() 116 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gp100_vmm_pgt_pte() argument 120 map->type += ptes * map->ctag; in gp100_vmm_pgt_pte() 122 while (ptes--) { in gp100_vmm_pgt_pte() 130 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gp100_vmm_pgt_sgl() argument [all …]
|
D | vmmnv41.c | 28 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv41_vmm_pgt_pte() argument 31 while (ptes--) { in nv41_vmm_pgt_pte() 39 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_sgl() argument 41 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_sgl() 46 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_dma() argument 50 while (ptes--) { in nv41_vmm_pgt_dma() 56 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_dma() 62 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv41_vmm_pgt_unmap() argument 64 VMM_FO032(pt, vmm, ptei * 4, 0, ptes); in nv41_vmm_pgt_unmap()
|
D | vmmnv04.c | 29 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv04_vmm_pgt_pte() argument 32 while (ptes--) { in nv04_vmm_pgt_pte() 40 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_sgl() argument 42 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_sgl() 47 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_dma() argument 51 while (ptes--) in nv04_vmm_pgt_dma() 55 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_dma() 61 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv04_vmm_pgt_unmap() argument 63 VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes); in nv04_vmm_pgt_unmap()
|
D | vmmnv50.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv50_vmm_pgt_pte() argument 39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte() 41 while (ptes) { in nv50_vmm_pgt_pte() 44 if (ptes >= pten && IS_ALIGNED(ptei, pten)) in nv50_vmm_pgt_pte() 50 ptes -= pten; in nv50_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_sgl() argument 61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); in nv50_vmm_pgt_sgl() 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in nv50_vmm_pgt_dma() 71 while (ptes--) { in nv50_vmm_pgt_dma() [all …]
|
D | vmmgf100.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gf100_vmm_pgt_pte() argument 39 while (ptes--) { in gf100_vmm_pgt_pte() 48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte() 50 while (ptes--) { in gf100_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_sgl() argument 61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_sgl() 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in gf100_vmm_pgt_dma() 71 while (ptes--) { in gf100_vmm_pgt_dma() 80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_dma() [all …]
|
D | vmm.c | 198 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) in nvkm_vmm_unref_sptes() argument 209 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) { in nvkm_vmm_unref_sptes() 210 const u32 pten = min(sptn - spti, ptes); in nvkm_vmm_unref_sptes() 212 ptes -= pten; in nvkm_vmm_unref_sptes() 222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 243 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes); in nvkm_vmm_unref_sptes() 244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes() 251 TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes); in nvkm_vmm_unref_sptes() 252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes() [all …]
|
D | vmmgk104.c | 26 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gk104_vmm_lpt_invalid() argument 29 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes); in gk104_vmm_lpt_invalid()
|
D | vmmgm200.c | 29 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gm200_vmm_pgt_sparse() argument 32 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes); in gm200_vmm_pgt_sparse()
|
D | vmm.h | 54 struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 58 u32 ptei, u32 ptes, struct nvkm_vmm_map *); 72 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
|
/linux-6.6.21/arch/x86/xen/ |
D | grant-table.c | 27 pte_t **ptes; member 45 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_map_shared() 67 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], in arch_gnttab_map_status() 77 pte_t **ptes; in arch_gnttab_unmap() local 82 ptes = gnttab_status_vm_area.ptes; in arch_gnttab_unmap() 84 ptes = gnttab_shared_vm_area.ptes; in arch_gnttab_unmap() 89 set_pte_at(&init_mm, addr, ptes[i], __pte(0)); in arch_gnttab_unmap() 98 area->ptes[area->idx++] = pte; in gnttab_apply() 104 area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL); in arch_gnttab_valloc() 105 if (area->ptes == NULL) in arch_gnttab_valloc() [all …]
|
/linux-6.6.21/block/partitions/ |
D | efi.c | 336 gpt_header **gpt, gpt_entry **ptes) in is_gpt_valid() argument 341 if (!ptes) in is_gpt_valid() 430 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) in is_gpt_valid() 434 crc = efi_crc32((const unsigned char *) (*ptes), pt_size); in is_gpt_valid() 445 kfree(*ptes); in is_gpt_valid() 446 *ptes = NULL; in is_gpt_valid() 582 gpt_entry **ptes) in find_valid_gpt() argument 593 if (!ptes) in find_valid_gpt() 643 *ptes = pptes; in find_valid_gpt() 652 *ptes = aptes; in find_valid_gpt() [all …]
|
/linux-6.6.21/arch/alpha/kernel/ |
D | pci_iommu.c | 78 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node() 79 if (!arena->ptes) in iommu_arena_new_node() 108 unsigned long *ptes; in iommu_arena_find_pages() local 118 ptes = arena->ptes; in iommu_arena_find_pages() 130 if (ptes[p+i]) { in iommu_arena_find_pages() 164 unsigned long *ptes; in iommu_arena_alloc() local 170 ptes = arena->ptes; in iommu_arena_alloc() 183 ptes[p+i] = IOMMU_INVALID_PTE; in iommu_arena_alloc() 197 p = arena->ptes + ofs; in iommu_arena_free() 295 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1() [all …]
|
D | core_titan.c | 328 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in titan_init_one_pachip_port() 336 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); in titan_init_one_pachip_port() 463 unsigned long *ptes; in titan_ioremap() local 518 ptes = hose->sg_pci->ptes; in titan_ioremap() 522 pfn = ptes[baddr >> PAGE_SHIFT]; in titan_ioremap() 711 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in titan_agp_translate()
|
D | core_marvel.c | 295 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); in io7_init_hose() 312 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); in io7_init_hose() 690 unsigned long *ptes; in marvel_ioremap() local 745 ptes = hose->sg_pci->ptes; in marvel_ioremap() 749 pfn = ptes[baddr >> PAGE_SHIFT]; in marvel_ioremap() 1003 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in marvel_agp_translate()
|
D | core_cia.c | 464 arena->ptes[4] = pte0; in verify_tb_operation() 488 arena->ptes[5] = pte0; in verify_tb_operation() 524 arena->ptes[4] = 0; in verify_tb_operation() 525 arena->ptes[5] = 0; in verify_tb_operation() 737 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; in do_init_arch()
|
D | pci_impl.h | 139 unsigned long *ptes; member
|
D | core_tsunami.c | 337 pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in tsunami_init_one_pchip() 341 pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); in tsunami_init_one_pchip()
|
/linux-6.6.21/arch/powerpc/include/asm/ |
D | plpar_wrappers.h | 175 unsigned long *ptes) in plpar_pte_read_4() argument 183 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4() 193 unsigned long *ptes) in plpar_pte_read_4_raw() argument 201 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4_raw() 399 unsigned long *ptes) in plpar_pte_read_4() argument
|
/linux-6.6.21/arch/powerpc/mm/ptdump/ |
D | hashpagetable.c | 244 } ptes[4]; in pseries_find() local 262 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); in pseries_find() 267 if (HPTE_V_COMPARE(ptes[j].v, want_v) && in pseries_find() 268 (ptes[j].v & HPTE_V_VALID)) { in pseries_find() 270 *v = ptes[j].v; in pseries_find() 271 *r = ptes[j].r; in pseries_find()
|
/linux-6.6.21/arch/powerpc/platforms/pseries/ |
D | lpar.c | 854 } ptes[4]; in manual_hpte_clear_all() local 863 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); in manual_hpte_clear_all() 870 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == in manual_hpte_clear_all() 873 if (ptes[j].pteh & HPTE_V_VALID) in manual_hpte_clear_all() 875 &(ptes[j].pteh), &(ptes[j].ptel)); in manual_hpte_clear_all() 965 } ptes[4]; in __pSeries_lpar_hpte_find() local 969 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); in __pSeries_lpar_hpte_find() 977 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && in __pSeries_lpar_hpte_find() 978 (ptes[j].pteh & HPTE_V_VALID)) in __pSeries_lpar_hpte_find()
|
/linux-6.6.21/arch/x86/kvm/mmu/ |
D | paging_tmpl.h | 84 pt_element_t ptes[PT_MAX_FULL_LEVELS]; member 213 pte = orig_pte = walker->ptes[level - 1]; in FNAME() 254 walker->ptes[level - 1] = pte; in FNAME() 425 walker->ptes[walker->level - 1] = pte; in FNAME() 581 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
|
/linux-6.6.21/arch/powerpc/mm/ |
D | hugetlbpage.c | 258 void *ptes[]; member 270 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); in hugepd_free_rcu_callback() 293 (*batchp)->ptes[(*batchp)->index++] = hugepte; in hugepd_free()
|
/linux-6.6.21/Documentation/virt/kvm/x86/ |
D | mmu.rst | 131 Leaf ptes point at guest pages. 133 The following table shows translations encoded by leaf ptes, with higher-level 165 sptes. That means a guest page table contains more ptes than the host, 174 Inherited guest access permissions from the parent ptes in the form uwx. 232 The reverse mapping for the pte/ptes pointing at this page's spt. If 240 changed but before the tlb entry is flushed. Accordingly, unsync ptes 268 The mmu maintains a reverse mapping whereby all ptes mapping a page can be
|
/linux-6.6.21/Documentation/translations/zh_CN/mm/ |
D | page_migration.rst | 110 16. 如果迁移条目被插入到页表中,那么就用真正的ptes替换它们。这样做将使那些尚未等待页
|