1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_pgtable.h>
13 
14 /* Maximum number of VMs that can co-exist under pKVM. */
15 #define KVM_MAX_PVMS 255
16 
17 #define HYP_MEMBLOCK_REGIONS 128
18 
19 int pkvm_init_host_vm(struct kvm *kvm);
20 int pkvm_create_hyp_vm(struct kvm *kvm);
21 void pkvm_destroy_hyp_vm(struct kvm *kvm);
22 
23 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
24 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
25 
26 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)27 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
28 {
29 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
30 	unsigned long start, end;
31 
32 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
33 	end = start + nr_pages * vmemmap_entry_size;
34 	start = ALIGN_DOWN(start, PAGE_SIZE);
35 	end = ALIGN(end, PAGE_SIZE);
36 
37 	return end - start;
38 }
39 
hyp_vmemmap_pages(size_t vmemmap_entry_size)40 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
41 {
42 	unsigned long res = 0, i;
43 
44 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
45 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
46 						 vmemmap_entry_size);
47 	}
48 
49 	return res >> PAGE_SHIFT;
50 }
51 
hyp_vm_table_pages(void)52 static inline unsigned long hyp_vm_table_pages(void)
53 {
54 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
55 }
56 
__hyp_pgtable_max_pages(unsigned long nr_pages)57 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
58 {
59 	unsigned long total = 0, i;
60 
61 	/* Provision the worst case scenario */
62 	for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
63 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
64 		total += nr_pages;
65 	}
66 
67 	return total;
68 }
69 
__hyp_pgtable_total_pages(void)70 static inline unsigned long __hyp_pgtable_total_pages(void)
71 {
72 	unsigned long res = 0, i;
73 
74 	/* Cover all of memory with page-granularity */
75 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
76 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
77 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
78 	}
79 
80 	return res;
81 }
82 
hyp_s1_pgtable_pages(void)83 static inline unsigned long hyp_s1_pgtable_pages(void)
84 {
85 	unsigned long res;
86 
87 	res = __hyp_pgtable_total_pages();
88 
89 	/* Allow 1 GiB for private mappings */
90 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
91 
92 	return res;
93 }
94 
host_s2_pgtable_pages(void)95 static inline unsigned long host_s2_pgtable_pages(void)
96 {
97 	unsigned long res;
98 
99 	/*
100 	 * Include an extra 16 pages to safely upper-bound the worst case of
101 	 * concatenated pgds.
102 	 */
103 	res = __hyp_pgtable_total_pages() + 16;
104 
105 	/* Allow 1 GiB for MMIO mappings */
106 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
107 
108 	return res;
109 }
110 
111 #define KVM_FFA_MBOX_NR_PAGES	1
112 
hyp_ffa_proxy_pages(void)113 static inline unsigned long hyp_ffa_proxy_pages(void)
114 {
115 	size_t desc_max;
116 
117 	/*
118 	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
119 	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
120 	 */
121 	desc_max = sizeof(struct ffa_mem_region) +
122 		   sizeof(struct ffa_mem_region_attributes) +
123 		   sizeof(struct ffa_composite_mem_region) +
124 		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
125 
126 	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
127 	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
128 }
129 
130 #endif	/* __ARM64_KVM_PKVM_H__ */
131