1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Will Deacon <will@kernel.org>
5 */
6
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13
14 #define KVM_PGTABLE_MAX_LEVELS 4U
15
16 /*
17 * The largest supported block sizes for KVM (no 52-bit PA support):
18 * - 4K (level 1): 1GB
19 * - 16K (level 2): 32MB
20 * - 64K (level 2): 512MB
21 */
22 #ifdef CONFIG_ARM64_4K_PAGES
23 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U
24 #else
25 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U
26 #endif
27
kvm_get_parange(u64 mmfr0)28 static inline u64 kvm_get_parange(u64 mmfr0)
29 {
30 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
31 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
32 if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
33 parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
34
35 return parange;
36 }
37
38 typedef u64 kvm_pte_t;
39
40 #define KVM_PTE_VALID BIT(0)
41
42 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
43 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
44
45 #define KVM_PHYS_INVALID (-1ULL)
46
kvm_pte_valid(kvm_pte_t pte)47 static inline bool kvm_pte_valid(kvm_pte_t pte)
48 {
49 return pte & KVM_PTE_VALID;
50 }
51
kvm_pte_to_phys(kvm_pte_t pte)52 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
53 {
54 u64 pa = pte & KVM_PTE_ADDR_MASK;
55
56 if (PAGE_SHIFT == 16)
57 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
58
59 return pa;
60 }
61
kvm_phys_to_pte(u64 pa)62 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
63 {
64 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
65
66 if (PAGE_SHIFT == 16) {
67 pa &= GENMASK(51, 48);
68 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
69 }
70
71 return pte;
72 }
73
kvm_pte_to_pfn(kvm_pte_t pte)74 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
75 {
76 return __phys_to_pfn(kvm_pte_to_phys(pte));
77 }
78
kvm_granule_shift(u32 level)79 static inline u64 kvm_granule_shift(u32 level)
80 {
81 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
82 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
83 }
84
kvm_granule_size(u32 level)85 static inline u64 kvm_granule_size(u32 level)
86 {
87 return BIT(kvm_granule_shift(level));
88 }
89
kvm_level_supports_block_mapping(u32 level)90 static inline bool kvm_level_supports_block_mapping(u32 level)
91 {
92 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
93 }
94
kvm_supported_block_sizes(void)95 static inline u32 kvm_supported_block_sizes(void)
96 {
97 u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
98 u32 r = 0;
99
100 for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
101 r |= BIT(kvm_granule_shift(level));
102
103 return r;
104 }
105
kvm_is_block_size_supported(u64 size)106 static inline bool kvm_is_block_size_supported(u64 size)
107 {
108 bool is_power_of_two = IS_ALIGNED(size, size);
109
110 return is_power_of_two && (size & kvm_supported_block_sizes());
111 }
112
113 /**
114 * struct kvm_pgtable_mm_ops - Memory management callbacks.
115 * @zalloc_page: Allocate a single zeroed memory page.
116 * The @arg parameter can be used by the walker
117 * to pass a memcache. The initial refcount of
118 * the page is 1.
119 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
120 * The @size parameter is in bytes, and is rounded
121 * up to the next page boundary. The resulting
122 * allocation is physically contiguous.
123 * @free_pages_exact: Free an exact number of memory pages previously
124 * allocated by zalloc_pages_exact.
125 * @free_unlinked_table: Free an unlinked paging structure by unlinking and
126 * dropping references.
127 * @get_page: Increment the refcount on a page.
128 * @put_page: Decrement the refcount on a page. When the
129 * refcount reaches 0 the page is automatically
130 * freed.
131 * @page_count: Return the refcount of a page.
132 * @phys_to_virt: Convert a physical address into a virtual
133 * address mapped in the current context.
134 * @virt_to_phys: Convert a virtual address mapped in the current
135 * context into a physical address.
136 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
137 * for the specified memory address range.
138 * @icache_inval_pou: Invalidate the instruction cache to the PoU
139 * for the specified memory address range.
140 */
141 struct kvm_pgtable_mm_ops {
142 void* (*zalloc_page)(void *arg);
143 void* (*zalloc_pages_exact)(size_t size);
144 void (*free_pages_exact)(void *addr, size_t size);
145 void (*free_unlinked_table)(void *addr, u32 level);
146 void (*get_page)(void *addr);
147 void (*put_page)(void *addr);
148 int (*page_count)(void *addr);
149 void* (*phys_to_virt)(phys_addr_t phys);
150 phys_addr_t (*virt_to_phys)(void *addr);
151 void (*dcache_clean_inval_poc)(void *addr, size_t size);
152 void (*icache_inval_pou)(void *addr, size_t size);
153 };
154
155 /**
156 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
157 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
158 * ARM64_HAS_STAGE2_FWB.
159 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
160 */
161 enum kvm_pgtable_stage2_flags {
162 KVM_PGTABLE_S2_NOFWB = BIT(0),
163 KVM_PGTABLE_S2_IDMAP = BIT(1),
164 };
165
166 /**
167 * enum kvm_pgtable_prot - Page-table permissions and attributes.
168 * @KVM_PGTABLE_PROT_X: Execute permission.
169 * @KVM_PGTABLE_PROT_W: Write permission.
170 * @KVM_PGTABLE_PROT_R: Read permission.
171 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
172 * @KVM_PGTABLE_PROT_SW0: Software bit 0.
173 * @KVM_PGTABLE_PROT_SW1: Software bit 1.
174 * @KVM_PGTABLE_PROT_SW2: Software bit 2.
175 * @KVM_PGTABLE_PROT_SW3: Software bit 3.
176 */
177 enum kvm_pgtable_prot {
178 KVM_PGTABLE_PROT_X = BIT(0),
179 KVM_PGTABLE_PROT_W = BIT(1),
180 KVM_PGTABLE_PROT_R = BIT(2),
181
182 KVM_PGTABLE_PROT_DEVICE = BIT(3),
183
184 KVM_PGTABLE_PROT_SW0 = BIT(55),
185 KVM_PGTABLE_PROT_SW1 = BIT(56),
186 KVM_PGTABLE_PROT_SW2 = BIT(57),
187 KVM_PGTABLE_PROT_SW3 = BIT(58),
188 };
189
190 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
191 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
192
193 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
194 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
195
196 #define PAGE_HYP KVM_PGTABLE_PROT_RW
197 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
198 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
199 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
200
201 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
202 enum kvm_pgtable_prot prot);
203
204 /**
205 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
206 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
207 * entries.
208 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
209 * children.
210 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
211 * children.
212 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
213 * with other software walkers.
214 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
215 * invoked from a fault handler.
216 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
217 * without Break-before-make's
218 * TLB invalidation.
219 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
220 * without Cache maintenance
221 * operations required.
222 */
223 enum kvm_pgtable_walk_flags {
224 KVM_PGTABLE_WALK_LEAF = BIT(0),
225 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
226 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
227 KVM_PGTABLE_WALK_SHARED = BIT(3),
228 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
229 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
230 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
231 };
232
233 struct kvm_pgtable_visit_ctx {
234 kvm_pte_t *ptep;
235 kvm_pte_t old;
236 void *arg;
237 struct kvm_pgtable_mm_ops *mm_ops;
238 u64 start;
239 u64 addr;
240 u64 end;
241 u32 level;
242 enum kvm_pgtable_walk_flags flags;
243 };
244
245 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
246 enum kvm_pgtable_walk_flags visit);
247
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)248 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
249 {
250 return ctx->flags & KVM_PGTABLE_WALK_SHARED;
251 }
252
253 /**
254 * struct kvm_pgtable_walker - Hook into a page-table walk.
255 * @cb: Callback function to invoke during the walk.
256 * @arg: Argument passed to the callback function.
257 * @flags: Bitwise-OR of flags to identify the entry types on which to
258 * invoke the callback function.
259 */
260 struct kvm_pgtable_walker {
261 const kvm_pgtable_visitor_fn_t cb;
262 void * const arg;
263 const enum kvm_pgtable_walk_flags flags;
264 };
265
266 /*
267 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
268 * table walkers used in hyp do not call into RCU and instead use other
269 * synchronization mechanisms (such as a spinlock).
270 */
271 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
272
273 typedef kvm_pte_t *kvm_pteref_t;
274
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)275 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
276 kvm_pteref_t pteref)
277 {
278 return pteref;
279 }
280
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)281 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
282 {
283 /*
284 * Due to the lack of RCU (or a similar protection scheme), only
285 * non-shared table walkers are allowed in the hypervisor.
286 */
287 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
288 return -EPERM;
289
290 return 0;
291 }
292
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)293 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
294
kvm_pgtable_walk_lock_held(void)295 static inline bool kvm_pgtable_walk_lock_held(void)
296 {
297 return true;
298 }
299
300 #else
301
302 typedef kvm_pte_t __rcu *kvm_pteref_t;
303
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)304 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
305 kvm_pteref_t pteref)
306 {
307 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
308 }
309
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)310 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
311 {
312 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
313 rcu_read_lock();
314
315 return 0;
316 }
317
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)318 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
319 {
320 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
321 rcu_read_unlock();
322 }
323
kvm_pgtable_walk_lock_held(void)324 static inline bool kvm_pgtable_walk_lock_held(void)
325 {
326 return rcu_read_lock_held();
327 }
328
329 #endif
330
331 /**
332 * struct kvm_pgtable - KVM page-table.
333 * @ia_bits: Maximum input address size, in bits.
334 * @start_level: Level at which the page-table walk starts.
335 * @pgd: Pointer to the first top-level entry of the page-table.
336 * @mm_ops: Memory management callbacks.
337 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
338 * @flags: Stage-2 page-table flags.
339 * @force_pte_cb: Function that returns true if page level mappings must
340 * be used instead of block mappings.
341 */
342 struct kvm_pgtable {
343 u32 ia_bits;
344 u32 start_level;
345 kvm_pteref_t pgd;
346 struct kvm_pgtable_mm_ops *mm_ops;
347
348 /* Stage-2 only */
349 struct kvm_s2_mmu *mmu;
350 enum kvm_pgtable_stage2_flags flags;
351 kvm_pgtable_force_pte_cb_t force_pte_cb;
352 };
353
354 /**
355 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
356 * @pgt: Uninitialised page-table structure to initialise.
357 * @va_bits: Maximum virtual address bits.
358 * @mm_ops: Memory management callbacks.
359 *
360 * Return: 0 on success, negative error code on failure.
361 */
362 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
363 struct kvm_pgtable_mm_ops *mm_ops);
364
365 /**
366 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
367 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
368 *
369 * The page-table is assumed to be unreachable by any hardware walkers prior
370 * to freeing and therefore no TLB invalidation is performed.
371 */
372 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
373
374 /**
375 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
376 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
377 * @addr: Virtual address at which to place the mapping.
378 * @size: Size of the mapping.
379 * @phys: Physical address of the memory to map.
380 * @prot: Permissions and attributes for the mapping.
381 *
382 * The offset of @addr within a page is ignored, @size is rounded-up to
383 * the next page boundary and @phys is rounded-down to the previous page
384 * boundary.
385 *
386 * If device attributes are not explicitly requested in @prot, then the
387 * mapping will be normal, cacheable. Attempts to install a new mapping
388 * for a virtual address that is already mapped will be rejected with an
389 * error and a WARN().
390 *
391 * Return: 0 on success, negative error code on failure.
392 */
393 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
394 enum kvm_pgtable_prot prot);
395
396 /**
397 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
398 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
399 * @addr: Virtual address from which to remove the mapping.
400 * @size: Size of the mapping.
401 *
402 * The offset of @addr within a page is ignored, @size is rounded-up to
403 * the next page boundary and @phys is rounded-down to the previous page
404 * boundary.
405 *
406 * TLB invalidation is performed for each page-table entry cleared during the
407 * unmapping operation and the reference count for the page-table page
408 * containing the cleared entry is decremented, with unreferenced pages being
409 * freed. The unmapping operation will stop early if it encounters either an
410 * invalid page-table entry or a valid block mapping which maps beyond the range
411 * being unmapped.
412 *
413 * Return: Number of bytes unmapped, which may be 0.
414 */
415 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
416
417 /**
418 * kvm_get_vtcr() - Helper to construct VTCR_EL2
419 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
420 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
421 * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
422 *
423 * The VTCR value is common across all the physical CPUs on the system.
424 * We use system wide sanitised values to fill in different fields,
425 * except for Hardware Management of Access Flags. HA Flag is set
426 * unconditionally on all CPUs, as it is safe to run with or without
427 * the feature and the bit is RES0 on CPUs that don't support it.
428 *
429 * Return: VTCR_EL2 value
430 */
431 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
432
433 /**
434 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
435 * @vtcr: Content of the VTCR register.
436 *
437 * Return: the size (in bytes) of the stage-2 PGD
438 */
439 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
440
441 /**
442 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
443 * @pgt: Uninitialised page-table structure to initialise.
444 * @mmu: S2 MMU context for this S2 translation
445 * @mm_ops: Memory management callbacks.
446 * @flags: Stage-2 configuration flags.
447 * @force_pte_cb: Function that returns true if page level mappings must
448 * be used instead of block mappings.
449 *
450 * Return: 0 on success, negative error code on failure.
451 */
452 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
453 struct kvm_pgtable_mm_ops *mm_ops,
454 enum kvm_pgtable_stage2_flags flags,
455 kvm_pgtable_force_pte_cb_t force_pte_cb);
456
457 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
458 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
459
460 /**
461 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
462 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
463 *
464 * The page-table is assumed to be unreachable by any hardware walkers prior
465 * to freeing and therefore no TLB invalidation is performed.
466 */
467 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
468
469 /**
470 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
471 * @mm_ops: Memory management callbacks.
472 * @pgtable: Unlinked stage-2 paging structure to be freed.
473 * @level: Level of the stage-2 paging structure to be freed.
474 *
475 * The page-table is assumed to be unreachable by any hardware walkers prior to
476 * freeing and therefore no TLB invalidation is performed.
477 */
478 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
479
480 /**
481 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
482 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
483 * @phys: Physical address of the memory to map.
484 * @level: Starting level of the stage-2 paging structure to be created.
485 * @prot: Permissions and attributes for the mapping.
486 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
487 * page-table pages.
488 * @force_pte: Force mappings to PAGE_SIZE granularity.
489 *
490 * Returns an unlinked page-table tree. This new page-table tree is
491 * not reachable (i.e., it is unlinked) from the root pgd and it's
492 * therefore unreachableby the hardware page-table walker. No TLB
493 * invalidation or CMOs are performed.
494 *
495 * If device attributes are not explicitly requested in @prot, then the
496 * mapping will be normal, cacheable.
497 *
498 * Return: The fully populated (unlinked) stage-2 paging structure, or
499 * an ERR_PTR(error) on failure.
500 */
501 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
502 u64 phys, u32 level,
503 enum kvm_pgtable_prot prot,
504 void *mc, bool force_pte);
505
506 /**
507 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
508 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
509 * @addr: Intermediate physical address at which to place the mapping.
510 * @size: Size of the mapping.
511 * @phys: Physical address of the memory to map.
512 * @prot: Permissions and attributes for the mapping.
513 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
514 * page-table pages.
515 * @flags: Flags to control the page-table walk (ex. a shared walk)
516 *
517 * The offset of @addr within a page is ignored, @size is rounded-up to
518 * the next page boundary and @phys is rounded-down to the previous page
519 * boundary.
520 *
521 * If device attributes are not explicitly requested in @prot, then the
522 * mapping will be normal, cacheable.
523 *
524 * Note that the update of a valid leaf PTE in this function will be aborted,
525 * if it's trying to recreate the exact same mapping or only change the access
526 * permissions. Instead, the vCPU will exit one more time from guest if still
527 * needed and then go through the path of relaxing permissions.
528 *
529 * Note that this function will both coalesce existing table entries and split
530 * existing block mappings, relying on page-faults to fault back areas outside
531 * of the new mapping lazily.
532 *
533 * Return: 0 on success, negative error code on failure.
534 */
535 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
536 u64 phys, enum kvm_pgtable_prot prot,
537 void *mc, enum kvm_pgtable_walk_flags flags);
538
539 /**
540 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
541 * track ownership.
542 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
543 * @addr: Base intermediate physical address to annotate.
544 * @size: Size of the annotated range.
545 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
546 * page-table pages.
547 * @owner_id: Unique identifier for the owner of the page.
548 *
549 * By default, all page-tables are owned by identifier 0. This function can be
550 * used to mark portions of the IPA space as owned by other entities. When a
551 * stage 2 is used with identity-mappings, these annotations allow to use the
552 * page-table data structure as a simple rmap.
553 *
554 * Return: 0 on success, negative error code on failure.
555 */
556 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
557 void *mc, u8 owner_id);
558
559 /**
560 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
561 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
562 * @addr: Intermediate physical address from which to remove the mapping.
563 * @size: Size of the mapping.
564 *
565 * The offset of @addr within a page is ignored and @size is rounded-up to
566 * the next page boundary.
567 *
568 * TLB invalidation is performed for each page-table entry cleared during the
569 * unmapping operation and the reference count for the page-table page
570 * containing the cleared entry is decremented, with unreferenced pages being
571 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
572 * FWB is not supported by the CPU.
573 *
574 * Return: 0 on success, negative error code on failure.
575 */
576 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
577
578 /**
579 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
580 * without TLB invalidation.
581 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
582 * @addr: Intermediate physical address from which to write-protect,
583 * @size: Size of the range.
584 *
585 * The offset of @addr within a page is ignored and @size is rounded-up to
586 * the next page boundary.
587 *
588 * Note that it is the caller's responsibility to invalidate the TLB after
589 * calling this function to ensure that the updated permissions are visible
590 * to the CPUs.
591 *
592 * Return: 0 on success, negative error code on failure.
593 */
594 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
595
596 /**
597 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
598 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
599 * @addr: Intermediate physical address to identify the page-table entry.
600 *
601 * The offset of @addr within a page is ignored.
602 *
603 * If there is a valid, leaf page-table entry used to translate @addr, then
604 * set the access flag in that entry.
605 *
606 * Return: The old page-table entry prior to setting the flag, 0 on failure.
607 */
608 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
609
610 /**
611 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
612 * flag in a page-table entry.
613 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
614 * @addr: Intermediate physical address to identify the page-table entry.
615 * @size: Size of the address range to visit.
616 * @mkold: True if the access flag should be cleared.
617 *
618 * The offset of @addr within a page is ignored.
619 *
620 * Tests and conditionally clears the access flag for every valid, leaf
621 * page-table entry used to translate the range [@addr, @addr + @size).
622 *
623 * Note that it is the caller's responsibility to invalidate the TLB after
624 * calling this function to ensure that the updated permissions are visible
625 * to the CPUs.
626 *
627 * Return: True if any of the visited PTEs had the access flag set.
628 */
629 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
630 u64 size, bool mkold);
631
632 /**
633 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
634 * page-table entry.
635 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
636 * @addr: Intermediate physical address to identify the page-table entry.
637 * @prot: Additional permissions to grant for the mapping.
638 *
639 * The offset of @addr within a page is ignored.
640 *
641 * If there is a valid, leaf page-table entry used to translate @addr, then
642 * relax the permissions in that entry according to the read, write and
643 * execute permissions specified by @prot. No permissions are removed, and
644 * TLB invalidation is performed after updating the entry. Software bits cannot
645 * be set or cleared using kvm_pgtable_stage2_relax_perms().
646 *
647 * Return: 0 on success, negative error code on failure.
648 */
649 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
650 enum kvm_pgtable_prot prot);
651
652 /**
653 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
654 * of Coherency for guest stage-2 address
655 * range.
656 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
657 * @addr: Intermediate physical address from which to flush.
658 * @size: Size of the range.
659 *
660 * The offset of @addr within a page is ignored and @size is rounded-up to
661 * the next page boundary.
662 *
663 * Return: 0 on success, negative error code on failure.
664 */
665 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
666
667 /**
668 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
669 * to PAGE_SIZE guest pages.
670 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
671 * @addr: Intermediate physical address from which to split.
672 * @size: Size of the range.
673 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
674 * page-table pages.
675 *
676 * The function tries to split any level 1 or 2 entry that overlaps
677 * with the input range (given by @addr and @size).
678 *
679 * Return: 0 on success, negative error code on failure. Note that
680 * kvm_pgtable_stage2_split() is best effort: it tries to break as many
681 * blocks in the input range as allowed by @mc_capacity.
682 */
683 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
684 struct kvm_mmu_memory_cache *mc);
685
686 /**
687 * kvm_pgtable_walk() - Walk a page-table.
688 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
689 * @addr: Input address for the start of the walk.
690 * @size: Size of the range to walk.
691 * @walker: Walker callback description.
692 *
693 * The offset of @addr within a page is ignored and @size is rounded-up to
694 * the next page boundary.
695 *
696 * The walker will walk the page-table entries corresponding to the input
697 * address range specified, visiting entries according to the walker flags.
698 * Invalid entries are treated as leaf entries. The visited page table entry is
699 * reloaded after invoking the walker callback, allowing the walker to descend
700 * into a newly installed table.
701 *
702 * Returning a negative error code from the walker callback function will
703 * terminate the walk immediately with the same error code.
704 *
705 * Return: 0 on success, negative error code on failure.
706 */
707 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
708 struct kvm_pgtable_walker *walker);
709
710 /**
711 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
712 * with its level.
713 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
714 * or a similar initialiser.
715 * @addr: Input address for the start of the walk.
716 * @ptep: Pointer to storage for the retrieved PTE.
717 * @level: Pointer to storage for the level of the retrieved PTE.
718 *
719 * The offset of @addr within a page is ignored.
720 *
721 * The walker will walk the page-table entries corresponding to the input
722 * address specified, retrieving the leaf corresponding to this address.
723 * Invalid entries are treated as leaf entries.
724 *
725 * Return: 0 on success, negative error code on failure.
726 */
727 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
728 kvm_pte_t *ptep, u32 *level);
729
730 /**
731 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
732 * stage-2 Page-Table Entry.
733 * @pte: Page-table entry
734 *
735 * Return: protection attributes of the page-table entry in the enum
736 * kvm_pgtable_prot format.
737 */
738 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
739
740 /**
741 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
742 * Page-Table Entry.
743 * @pte: Page-table entry
744 *
745 * Return: protection attributes of the page-table entry in the enum
746 * kvm_pgtable_prot format.
747 */
748 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
749
750 /**
751 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
752 *
753 * @mmu: Stage-2 KVM MMU struct
754 * @addr: The base Intermediate physical address from which to invalidate
755 * @size: Size of the range from the base to invalidate
756 */
757 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
758 phys_addr_t addr, size_t size);
759 #endif /* __ARM64_KVM_PGTABLE_H__ */
760