1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2016 - ARM Ltd 4 * 5 * stage2 page table helpers 6 */ 7 8 #ifndef __ARM64_S2_PGTABLE_H_ 9 #define __ARM64_S2_PGTABLE_H_ 10 11 #include <linux/pgtable.h> 12 13 /* 14 * PGDIR_SHIFT determines the size a top-level page table entry can map 15 * and depends on the number of levels in the page table. Compute the 16 * PGDIR_SHIFT for a given number of levels. 17 */ 18 #define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) 19 20 /* 21 * The hardware supports concatenation of up to 16 tables at stage2 entry 22 * level and we use the feature whenever possible, which means we resolve 4 23 * additional bits of address at the entry level. 24 * 25 * This implies, the total number of page table levels required for 26 * IPA_SHIFT at stage2 expected by the hardware can be calculated using 27 * the same logic used for the (non-collapsable) stage1 page tables but for 28 * (IPA_SHIFT - 4). 29 */ 30 #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) 31 #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) 32 33 /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ 34 #define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) 35 #define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) 36 #define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) 37 38 /* 39 * kvm_mmmu_cache_min_pages() is the number of pages required to install 40 * a stage-2 translation. We pre-allocate the entry level page table at 41 * the VM creation. 42 */ 43 #define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) 44 45 static inline phys_addr_t stage2_pgd_addr_end(struct kvm * kvm,phys_addr_t addr,phys_addr_t end)46stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) 47 { 48 phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); 49 50 return (boundary - 1 < end - 1) ? boundary : end; 51 } 52 53 #endif /* __ARM64_S2_PGTABLE_H_ */ 54