/linux-6.1.9/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_pgt.c | 32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0, in mlxsw_sp_pgt_mid_alloc() 34 mlxsw_sp->pgt->end_index, GFP_KERNEL); in mlxsw_sp_pgt_mid_alloc() 42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 46 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free() 53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base)); in mlxsw_sp_pgt_mid_free() 54 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free() 63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range() 70 idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); in mlxsw_sp_pgt_mid_alloc_range() [all …]
|
D | spectrum.h | 211 struct mlxsw_sp_pgt *pgt; member
|
/linux-6.1.9/arch/arm64/kvm/hyp/ |
D | pgtable.c | 53 struct kvm_pgtable *pgt; member 91 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in __kvm_pgd_page_idx() argument 93 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in __kvm_pgd_page_idx() 94 u64 mask = BIT(pgt->ia_bits) - 1; in __kvm_pgd_page_idx() 101 return __kvm_pgd_page_idx(data->pgt, data->addr); in kvm_pgd_page_idx() 106 struct kvm_pgtable pgt = { in kvm_pgd_pages() local 111 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages() 216 childp = kvm_pte_follow(pte, data->pgt->mm_ops); in __kvm_pgtable_visit() 257 struct kvm_pgtable *pgt = data->pgt; in _kvm_pgtable_walk() local 258 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk() [all …]
|
/linux-6.1.9/arch/arm64/include/asm/ |
D | kvm_pgtable.h | 228 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 238 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 259 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 281 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 310 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 315 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ argument 316 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) 325 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 355 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 376 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, [all …]
|
D | kvm_host.h | 94 struct kvm_pgtable *pgt; member
|
/linux-6.1.9/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 30 struct nvkm_vmm_pt *pgt = *ppgt; in nvkm_vmm_pt_del() local 31 if (pgt) { in nvkm_vmm_pt_del() 32 kvfree(pgt->pde); in nvkm_vmm_pt_del() 33 kfree(pgt); in nvkm_vmm_pt_del() 44 struct nvkm_vmm_pt *pgt; in nvkm_vmm_pt_new() local 56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new() 58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new() 59 pgt->sparse = sparse; in nvkm_vmm_pt_new() 62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); in nvkm_vmm_pt_new() 63 if (!pgt->pde) { in nvkm_vmm_pt_new() [all …]
|
D | vmmgp100.c | 238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local 242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde() 244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde() 365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local 369 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
|
D | vmmnv50.c | 106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument 110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde() 111 switch (pgt->page) { in nv50_vmm_pde()
|
D | vmmgf100.c | 108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gf100_vmm_pgd_pde() local 113 if ((pt = pgt->pt[0])) { in gf100_vmm_pgd_pde() 127 if ((pt = pgt->pt[1])) { in gf100_vmm_pgd_pde()
|
/linux-6.1.9/arch/arm64/kvm/ |
D | mmu.c | 58 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in stage2_apply_range() local 59 if (!pgt) in stage2_apply_range() 63 ret = fn(pgt, addr, next - addr); in stage2_apply_range() 642 struct kvm_pgtable pgt = { in get_user_mapping_size() local 653 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); in get_user_mapping_size() 686 struct kvm_pgtable *pgt; in kvm_init_stage2_mmu() local 688 if (mmu->pgt != NULL) { in kvm_init_stage2_mmu() 693 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); in kvm_init_stage2_mmu() 694 if (!pgt) in kvm_init_stage2_mmu() 698 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); in kvm_init_stage2_mmu() [all …]
|
/linux-6.1.9/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
D | usernv04.c | 52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local 55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind() 56 nvkm_kmap(pgt); in nv04_dmaobj_bind() 57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind() 59 nvkm_done(pgt); in nv04_dmaobj_bind()
|
/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/ |
D | mem_protect.c | 133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, in kvm_host_prepare_stage2() 139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); in kvm_host_prepare_stage2() 140 mmu->pgt = &host_kvm.pgt; in kvm_host_prepare_stage2() 178 struct kvm_pgtable *pgt = &host_kvm.pgt; in host_stage2_unmap_dev_all() local 186 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all() 190 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all() 253 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, in __host_stage2_idmap() 290 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level); in host_stage2_adjust_range() 322 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, in host_stage2_set_owner_locked() 434 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, in check_page_state_range() argument [all …]
|
/linux-6.1.9/arch/s390/kvm/ |
D | gaccess.c | 1273 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument 1327 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables() 1354 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables() 1382 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables() 1419 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables() 1445 *pgt = ptr; in kvm_s390_shadow_tables() 1468 unsigned long pgt = 0; in kvm_s390_shadow_fault() local 1480 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); in kvm_s390_shadow_fault() 1482 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection, in kvm_s390_shadow_fault() 1487 pte.val = pgt + vaddr.px * PAGE_SIZE; in kvm_s390_shadow_fault() [all …]
|
/linux-6.1.9/arch/s390/include/asm/ |
D | gmap.h | 135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, 138 unsigned long *pgt, int *dat_protection, int *fake);
|
/linux-6.1.9/arch/s390/mm/ |
D | gmap.c | 1320 unsigned long *pgt) in __gmap_unshadow_pgt() argument 1326 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt() 1338 unsigned long sto, *ste, *pgt; in gmap_unshadow_pgt() local 1348 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN); in gmap_unshadow_pgt() 1350 __gmap_unshadow_pgt(sg, raddr, pgt); in gmap_unshadow_pgt() 1352 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT); in gmap_unshadow_pgt() 1368 unsigned long *pgt; in __gmap_unshadow_sgt() local 1376 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN); in __gmap_unshadow_sgt() 1378 __gmap_unshadow_pgt(sg, raddr, pgt); in __gmap_unshadow_sgt() 1380 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT); in __gmap_unshadow_sgt() [all …]
|
/linux-6.1.9/arch/powerpc/kvm/ |
D | book3s_64_mmu_radix.c | 1298 pgd_t *pgt; in debugfs_radix_read() local 1336 pgt = NULL; in debugfs_radix_read() 1340 pgt = NULL; in debugfs_radix_read() 1350 if (!pgt) { in debugfs_radix_read() 1352 pgt = kvm->arch.pgtable; in debugfs_radix_read() 1359 pgt = nested->shadow_pgtable; in debugfs_radix_read() 1368 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read() 1373 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()
|
/linux-6.1.9/arch/arm64/kvm/hyp/include/nvhe/ |
D | mem_protect.h | 48 struct kvm_pgtable pgt; member
|
/linux-6.1.9/drivers/misc/habanalabs/common/mmu/ |
D | mmu.c | 981 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, in hl_mmu_hr_pte_phys_to_virt() argument 987 return pgt->virt_addr + pte_offset; in hl_mmu_hr_pte_phys_to_virt()
|
/linux-6.1.9/arch/x86/events/intel/ |
D | uncore_nhmex.c | 877 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
|
/linux-6.1.9/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_self_test.c | 78 static int pgt(struct st_pred_args *args) in pgt() function 395 NA, 1, 0, pgt,
|
/linux-6.1.9/drivers/misc/habanalabs/common/ |
D | habanalabs.h | 3649 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, u64 phys_pte_addr,
|