1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/kvm_pkvm.h>
13 #include <asm/stage2_pgtable.h>
14
15 #include <hyp/fault.h>
16
17 #include <nvhe/gfp.h>
18 #include <nvhe/memory.h>
19 #include <nvhe/mem_protect.h>
20 #include <nvhe/mm.h>
21
22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
23
24 extern unsigned long hyp_nr_cpus;
25 struct host_kvm host_kvm;
26
27 static struct hyp_pool host_s2_pool;
28
29 const u8 pkvm_hyp_id = 1;
30
host_lock_component(void)31 static void host_lock_component(void)
32 {
33 hyp_spin_lock(&host_kvm.lock);
34 }
35
host_unlock_component(void)36 static void host_unlock_component(void)
37 {
38 hyp_spin_unlock(&host_kvm.lock);
39 }
40
hyp_lock_component(void)41 static void hyp_lock_component(void)
42 {
43 hyp_spin_lock(&pkvm_pgd_lock);
44 }
45
hyp_unlock_component(void)46 static void hyp_unlock_component(void)
47 {
48 hyp_spin_unlock(&pkvm_pgd_lock);
49 }
50
host_s2_zalloc_pages_exact(size_t size)51 static void *host_s2_zalloc_pages_exact(size_t size)
52 {
53 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
54
55 hyp_split_page(hyp_virt_to_page(addr));
56
57 /*
58 * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
59 * so there should be no need to free any of the tail pages to make the
60 * allocation exact.
61 */
62 WARN_ON(size != (PAGE_SIZE << get_order(size)));
63
64 return addr;
65 }
66
host_s2_zalloc_page(void * pool)67 static void *host_s2_zalloc_page(void *pool)
68 {
69 return hyp_alloc_pages(pool, 0);
70 }
71
host_s2_get_page(void * addr)72 static void host_s2_get_page(void *addr)
73 {
74 hyp_get_page(&host_s2_pool, addr);
75 }
76
host_s2_put_page(void * addr)77 static void host_s2_put_page(void *addr)
78 {
79 hyp_put_page(&host_s2_pool, addr);
80 }
81
prepare_s2_pool(void * pgt_pool_base)82 static int prepare_s2_pool(void *pgt_pool_base)
83 {
84 unsigned long nr_pages, pfn;
85 int ret;
86
87 pfn = hyp_virt_to_pfn(pgt_pool_base);
88 nr_pages = host_s2_pgtable_pages();
89 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
90 if (ret)
91 return ret;
92
93 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
94 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
95 .zalloc_page = host_s2_zalloc_page,
96 .phys_to_virt = hyp_phys_to_virt,
97 .virt_to_phys = hyp_virt_to_phys,
98 .page_count = hyp_page_count,
99 .get_page = host_s2_get_page,
100 .put_page = host_s2_put_page,
101 };
102
103 return 0;
104 }
105
prepare_host_vtcr(void)106 static void prepare_host_vtcr(void)
107 {
108 u32 parange, phys_shift;
109
110 /* The host stage 2 is id-mapped, so use parange for T0SZ */
111 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
112 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
113
114 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
115 id_aa64mmfr1_el1_sys_val, phys_shift);
116 }
117
118 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
119
kvm_host_prepare_stage2(void * pgt_pool_base)120 int kvm_host_prepare_stage2(void *pgt_pool_base)
121 {
122 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
123 int ret;
124
125 prepare_host_vtcr();
126 hyp_spin_lock_init(&host_kvm.lock);
127 mmu->arch = &host_kvm.arch;
128
129 ret = prepare_s2_pool(pgt_pool_base);
130 if (ret)
131 return ret;
132
133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
134 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
135 host_stage2_force_pte_cb);
136 if (ret)
137 return ret;
138
139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
140 mmu->pgt = &host_kvm.pgt;
141 atomic64_set(&mmu->vmid.id, 0);
142
143 return 0;
144 }
145
__pkvm_prot_finalize(void)146 int __pkvm_prot_finalize(void)
147 {
148 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
149 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
150
151 if (params->hcr_el2 & HCR_VM)
152 return -EPERM;
153
154 params->vttbr = kvm_get_vttbr(mmu);
155 params->vtcr = host_kvm.arch.vtcr;
156 params->hcr_el2 |= HCR_VM;
157 kvm_flush_dcache_to_poc(params, sizeof(*params));
158
159 write_sysreg(params->hcr_el2, hcr_el2);
160 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
161
162 /*
163 * Make sure to have an ISB before the TLB maintenance below but only
164 * when __load_stage2() doesn't include one already.
165 */
166 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
167
168 /* Invalidate stale HCR bits that may be cached in TLBs */
169 __tlbi(vmalls12e1);
170 dsb(nsh);
171 isb();
172
173 return 0;
174 }
175
host_stage2_unmap_dev_all(void)176 static int host_stage2_unmap_dev_all(void)
177 {
178 struct kvm_pgtable *pgt = &host_kvm.pgt;
179 struct memblock_region *reg;
180 u64 addr = 0;
181 int i, ret;
182
183 /* Unmap all non-memory regions to recycle the pages */
184 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
185 reg = &hyp_memory[i];
186 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
187 if (ret)
188 return ret;
189 }
190 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
191 }
192
193 struct kvm_mem_range {
194 u64 start;
195 u64 end;
196 };
197
find_mem_range(phys_addr_t addr,struct kvm_mem_range * range)198 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
199 {
200 int cur, left = 0, right = hyp_memblock_nr;
201 struct memblock_region *reg;
202 phys_addr_t end;
203
204 range->start = 0;
205 range->end = ULONG_MAX;
206
207 /* The list of memblock regions is sorted, binary search it */
208 while (left < right) {
209 cur = (left + right) >> 1;
210 reg = &hyp_memory[cur];
211 end = reg->base + reg->size;
212 if (addr < reg->base) {
213 right = cur;
214 range->end = reg->base;
215 } else if (addr >= end) {
216 left = cur + 1;
217 range->start = end;
218 } else {
219 range->start = reg->base;
220 range->end = end;
221 return true;
222 }
223 }
224
225 return false;
226 }
227
addr_is_memory(phys_addr_t phys)228 bool addr_is_memory(phys_addr_t phys)
229 {
230 struct kvm_mem_range range;
231
232 return find_mem_range(phys, &range);
233 }
234
is_in_mem_range(u64 addr,struct kvm_mem_range * range)235 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
236 {
237 return range->start <= addr && addr < range->end;
238 }
239
range_is_memory(u64 start,u64 end)240 static bool range_is_memory(u64 start, u64 end)
241 {
242 struct kvm_mem_range r;
243
244 if (!find_mem_range(start, &r))
245 return false;
246
247 return is_in_mem_range(end - 1, &r);
248 }
249
__host_stage2_idmap(u64 start,u64 end,enum kvm_pgtable_prot prot)250 static inline int __host_stage2_idmap(u64 start, u64 end,
251 enum kvm_pgtable_prot prot)
252 {
253 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
254 prot, &host_s2_pool);
255 }
256
257 /*
258 * The pool has been provided with enough pages to cover all of memory with
259 * page granularity, but it is difficult to know how much of the MMIO range
260 * we will need to cover upfront, so we may need to 'recycle' the pages if we
261 * run out.
262 */
263 #define host_stage2_try(fn, ...) \
264 ({ \
265 int __ret; \
266 hyp_assert_lock_held(&host_kvm.lock); \
267 __ret = fn(__VA_ARGS__); \
268 if (__ret == -ENOMEM) { \
269 __ret = host_stage2_unmap_dev_all(); \
270 if (!__ret) \
271 __ret = fn(__VA_ARGS__); \
272 } \
273 __ret; \
274 })
275
range_included(struct kvm_mem_range * child,struct kvm_mem_range * parent)276 static inline bool range_included(struct kvm_mem_range *child,
277 struct kvm_mem_range *parent)
278 {
279 return parent->start <= child->start && child->end <= parent->end;
280 }
281
host_stage2_adjust_range(u64 addr,struct kvm_mem_range * range)282 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
283 {
284 struct kvm_mem_range cur;
285 kvm_pte_t pte;
286 u32 level;
287 int ret;
288
289 hyp_assert_lock_held(&host_kvm.lock);
290 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
291 if (ret)
292 return ret;
293
294 if (kvm_pte_valid(pte))
295 return -EAGAIN;
296
297 if (pte)
298 return -EPERM;
299
300 do {
301 u64 granule = kvm_granule_size(level);
302 cur.start = ALIGN_DOWN(addr, granule);
303 cur.end = cur.start + granule;
304 level++;
305 } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
306 !(kvm_level_supports_block_mapping(level) &&
307 range_included(&cur, range)));
308
309 *range = cur;
310
311 return 0;
312 }
313
host_stage2_idmap_locked(phys_addr_t addr,u64 size,enum kvm_pgtable_prot prot)314 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
315 enum kvm_pgtable_prot prot)
316 {
317 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
318 }
319
host_stage2_set_owner_locked(phys_addr_t addr,u64 size,u8 owner_id)320 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
321 {
322 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
323 addr, size, &host_s2_pool, owner_id);
324 }
325
host_stage2_force_pte_cb(u64 addr,u64 end,enum kvm_pgtable_prot prot)326 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
327 {
328 /*
329 * Block mappings must be used with care in the host stage-2 as a
330 * kvm_pgtable_stage2_map() operation targeting a page in the range of
331 * an existing block will delete the block under the assumption that
332 * mappings in the rest of the block range can always be rebuilt lazily.
333 * That assumption is correct for the host stage-2 with RWX mappings
334 * targeting memory or RW mappings targeting MMIO ranges (see
335 * host_stage2_idmap() below which implements some of the host memory
336 * abort logic). However, this is not safe for any other mappings where
337 * the host stage-2 page-table is in fact the only place where this
338 * state is stored. In all those cases, it is safer to use page-level
339 * mappings, hence avoiding to lose the state because of side-effects in
340 * kvm_pgtable_stage2_map().
341 */
342 if (range_is_memory(addr, end))
343 return prot != PKVM_HOST_MEM_PROT;
344 else
345 return prot != PKVM_HOST_MMIO_PROT;
346 }
347
host_stage2_idmap(u64 addr)348 static int host_stage2_idmap(u64 addr)
349 {
350 struct kvm_mem_range range;
351 bool is_memory = find_mem_range(addr, &range);
352 enum kvm_pgtable_prot prot;
353 int ret;
354
355 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
356
357 host_lock_component();
358 ret = host_stage2_adjust_range(addr, &range);
359 if (ret)
360 goto unlock;
361
362 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
363 unlock:
364 host_unlock_component();
365
366 return ret;
367 }
368
handle_host_mem_abort(struct kvm_cpu_context * host_ctxt)369 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
370 {
371 struct kvm_vcpu_fault_info fault;
372 u64 esr, addr;
373 int ret = 0;
374
375 esr = read_sysreg_el2(SYS_ESR);
376 BUG_ON(!__get_fault_info(esr, &fault));
377
378 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
379 ret = host_stage2_idmap(addr);
380 BUG_ON(ret && ret != -EAGAIN);
381 }
382
383 /* This corresponds to locking order */
384 enum pkvm_component_id {
385 PKVM_ID_HOST,
386 PKVM_ID_HYP,
387 };
388
389 struct pkvm_mem_transition {
390 u64 nr_pages;
391
392 struct {
393 enum pkvm_component_id id;
394 /* Address in the initiator's address space */
395 u64 addr;
396
397 union {
398 struct {
399 /* Address in the completer's address space */
400 u64 completer_addr;
401 } host;
402 };
403 } initiator;
404
405 struct {
406 enum pkvm_component_id id;
407 } completer;
408 };
409
410 struct pkvm_mem_share {
411 const struct pkvm_mem_transition tx;
412 const enum kvm_pgtable_prot completer_prot;
413 };
414
415 struct check_walk_data {
416 enum pkvm_page_state desired;
417 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
418 };
419
__check_page_state_visitor(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)420 static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
421 kvm_pte_t *ptep,
422 enum kvm_pgtable_walk_flags flag,
423 void * const arg)
424 {
425 struct check_walk_data *d = arg;
426 kvm_pte_t pte = *ptep;
427
428 if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
429 return -EINVAL;
430
431 return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
432 }
433
check_page_state_range(struct kvm_pgtable * pgt,u64 addr,u64 size,struct check_walk_data * data)434 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
435 struct check_walk_data *data)
436 {
437 struct kvm_pgtable_walker walker = {
438 .cb = __check_page_state_visitor,
439 .arg = data,
440 .flags = KVM_PGTABLE_WALK_LEAF,
441 };
442
443 return kvm_pgtable_walk(pgt, addr, size, &walker);
444 }
445
host_get_page_state(kvm_pte_t pte)446 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
447 {
448 if (!kvm_pte_valid(pte) && pte)
449 return PKVM_NOPAGE;
450
451 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
452 }
453
__host_check_page_state_range(u64 addr,u64 size,enum pkvm_page_state state)454 static int __host_check_page_state_range(u64 addr, u64 size,
455 enum pkvm_page_state state)
456 {
457 struct check_walk_data d = {
458 .desired = state,
459 .get_page_state = host_get_page_state,
460 };
461
462 hyp_assert_lock_held(&host_kvm.lock);
463 return check_page_state_range(&host_kvm.pgt, addr, size, &d);
464 }
465
__host_set_page_state_range(u64 addr,u64 size,enum pkvm_page_state state)466 static int __host_set_page_state_range(u64 addr, u64 size,
467 enum pkvm_page_state state)
468 {
469 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
470
471 return host_stage2_idmap_locked(addr, size, prot);
472 }
473
host_request_owned_transition(u64 * completer_addr,const struct pkvm_mem_transition * tx)474 static int host_request_owned_transition(u64 *completer_addr,
475 const struct pkvm_mem_transition *tx)
476 {
477 u64 size = tx->nr_pages * PAGE_SIZE;
478 u64 addr = tx->initiator.addr;
479
480 *completer_addr = tx->initiator.host.completer_addr;
481 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
482 }
483
host_request_unshare(u64 * completer_addr,const struct pkvm_mem_transition * tx)484 static int host_request_unshare(u64 *completer_addr,
485 const struct pkvm_mem_transition *tx)
486 {
487 u64 size = tx->nr_pages * PAGE_SIZE;
488 u64 addr = tx->initiator.addr;
489
490 *completer_addr = tx->initiator.host.completer_addr;
491 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
492 }
493
host_initiate_share(u64 * completer_addr,const struct pkvm_mem_transition * tx)494 static int host_initiate_share(u64 *completer_addr,
495 const struct pkvm_mem_transition *tx)
496 {
497 u64 size = tx->nr_pages * PAGE_SIZE;
498 u64 addr = tx->initiator.addr;
499
500 *completer_addr = tx->initiator.host.completer_addr;
501 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
502 }
503
host_initiate_unshare(u64 * completer_addr,const struct pkvm_mem_transition * tx)504 static int host_initiate_unshare(u64 *completer_addr,
505 const struct pkvm_mem_transition *tx)
506 {
507 u64 size = tx->nr_pages * PAGE_SIZE;
508 u64 addr = tx->initiator.addr;
509
510 *completer_addr = tx->initiator.host.completer_addr;
511 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
512 }
513
hyp_get_page_state(kvm_pte_t pte)514 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
515 {
516 if (!kvm_pte_valid(pte))
517 return PKVM_NOPAGE;
518
519 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
520 }
521
__hyp_check_page_state_range(u64 addr,u64 size,enum pkvm_page_state state)522 static int __hyp_check_page_state_range(u64 addr, u64 size,
523 enum pkvm_page_state state)
524 {
525 struct check_walk_data d = {
526 .desired = state,
527 .get_page_state = hyp_get_page_state,
528 };
529
530 hyp_assert_lock_held(&pkvm_pgd_lock);
531 return check_page_state_range(&pkvm_pgtable, addr, size, &d);
532 }
533
__hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition * tx)534 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
535 {
536 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
537 tx->initiator.id != PKVM_ID_HOST);
538 }
539
hyp_ack_share(u64 addr,const struct pkvm_mem_transition * tx,enum kvm_pgtable_prot perms)540 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
541 enum kvm_pgtable_prot perms)
542 {
543 u64 size = tx->nr_pages * PAGE_SIZE;
544
545 if (perms != PAGE_HYP)
546 return -EPERM;
547
548 if (__hyp_ack_skip_pgtable_check(tx))
549 return 0;
550
551 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
552 }
553
hyp_ack_unshare(u64 addr,const struct pkvm_mem_transition * tx)554 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
555 {
556 u64 size = tx->nr_pages * PAGE_SIZE;
557
558 if (__hyp_ack_skip_pgtable_check(tx))
559 return 0;
560
561 return __hyp_check_page_state_range(addr, size,
562 PKVM_PAGE_SHARED_BORROWED);
563 }
564
hyp_complete_share(u64 addr,const struct pkvm_mem_transition * tx,enum kvm_pgtable_prot perms)565 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
566 enum kvm_pgtable_prot perms)
567 {
568 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
569 enum kvm_pgtable_prot prot;
570
571 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
572 return pkvm_create_mappings_locked(start, end, prot);
573 }
574
hyp_complete_unshare(u64 addr,const struct pkvm_mem_transition * tx)575 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
576 {
577 u64 size = tx->nr_pages * PAGE_SIZE;
578 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
579
580 return (ret != size) ? -EFAULT : 0;
581 }
582
check_share(struct pkvm_mem_share * share)583 static int check_share(struct pkvm_mem_share *share)
584 {
585 const struct pkvm_mem_transition *tx = &share->tx;
586 u64 completer_addr;
587 int ret;
588
589 switch (tx->initiator.id) {
590 case PKVM_ID_HOST:
591 ret = host_request_owned_transition(&completer_addr, tx);
592 break;
593 default:
594 ret = -EINVAL;
595 }
596
597 if (ret)
598 return ret;
599
600 switch (tx->completer.id) {
601 case PKVM_ID_HYP:
602 ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
603 break;
604 default:
605 ret = -EINVAL;
606 }
607
608 return ret;
609 }
610
__do_share(struct pkvm_mem_share * share)611 static int __do_share(struct pkvm_mem_share *share)
612 {
613 const struct pkvm_mem_transition *tx = &share->tx;
614 u64 completer_addr;
615 int ret;
616
617 switch (tx->initiator.id) {
618 case PKVM_ID_HOST:
619 ret = host_initiate_share(&completer_addr, tx);
620 break;
621 default:
622 ret = -EINVAL;
623 }
624
625 if (ret)
626 return ret;
627
628 switch (tx->completer.id) {
629 case PKVM_ID_HYP:
630 ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
631 break;
632 default:
633 ret = -EINVAL;
634 }
635
636 return ret;
637 }
638
639 /*
640 * do_share():
641 *
642 * The page owner grants access to another component with a given set
643 * of permissions.
644 *
645 * Initiator: OWNED => SHARED_OWNED
646 * Completer: NOPAGE => SHARED_BORROWED
647 */
do_share(struct pkvm_mem_share * share)648 static int do_share(struct pkvm_mem_share *share)
649 {
650 int ret;
651
652 ret = check_share(share);
653 if (ret)
654 return ret;
655
656 return WARN_ON(__do_share(share));
657 }
658
check_unshare(struct pkvm_mem_share * share)659 static int check_unshare(struct pkvm_mem_share *share)
660 {
661 const struct pkvm_mem_transition *tx = &share->tx;
662 u64 completer_addr;
663 int ret;
664
665 switch (tx->initiator.id) {
666 case PKVM_ID_HOST:
667 ret = host_request_unshare(&completer_addr, tx);
668 break;
669 default:
670 ret = -EINVAL;
671 }
672
673 if (ret)
674 return ret;
675
676 switch (tx->completer.id) {
677 case PKVM_ID_HYP:
678 ret = hyp_ack_unshare(completer_addr, tx);
679 break;
680 default:
681 ret = -EINVAL;
682 }
683
684 return ret;
685 }
686
__do_unshare(struct pkvm_mem_share * share)687 static int __do_unshare(struct pkvm_mem_share *share)
688 {
689 const struct pkvm_mem_transition *tx = &share->tx;
690 u64 completer_addr;
691 int ret;
692
693 switch (tx->initiator.id) {
694 case PKVM_ID_HOST:
695 ret = host_initiate_unshare(&completer_addr, tx);
696 break;
697 default:
698 ret = -EINVAL;
699 }
700
701 if (ret)
702 return ret;
703
704 switch (tx->completer.id) {
705 case PKVM_ID_HYP:
706 ret = hyp_complete_unshare(completer_addr, tx);
707 break;
708 default:
709 ret = -EINVAL;
710 }
711
712 return ret;
713 }
714
715 /*
716 * do_unshare():
717 *
718 * The page owner revokes access from another component for a range of
719 * pages which were previously shared using do_share().
720 *
721 * Initiator: SHARED_OWNED => OWNED
722 * Completer: SHARED_BORROWED => NOPAGE
723 */
do_unshare(struct pkvm_mem_share * share)724 static int do_unshare(struct pkvm_mem_share *share)
725 {
726 int ret;
727
728 ret = check_unshare(share);
729 if (ret)
730 return ret;
731
732 return WARN_ON(__do_unshare(share));
733 }
734
__pkvm_host_share_hyp(u64 pfn)735 int __pkvm_host_share_hyp(u64 pfn)
736 {
737 int ret;
738 u64 host_addr = hyp_pfn_to_phys(pfn);
739 u64 hyp_addr = (u64)__hyp_va(host_addr);
740 struct pkvm_mem_share share = {
741 .tx = {
742 .nr_pages = 1,
743 .initiator = {
744 .id = PKVM_ID_HOST,
745 .addr = host_addr,
746 .host = {
747 .completer_addr = hyp_addr,
748 },
749 },
750 .completer = {
751 .id = PKVM_ID_HYP,
752 },
753 },
754 .completer_prot = PAGE_HYP,
755 };
756
757 host_lock_component();
758 hyp_lock_component();
759
760 ret = do_share(&share);
761
762 hyp_unlock_component();
763 host_unlock_component();
764
765 return ret;
766 }
767
__pkvm_host_unshare_hyp(u64 pfn)768 int __pkvm_host_unshare_hyp(u64 pfn)
769 {
770 int ret;
771 u64 host_addr = hyp_pfn_to_phys(pfn);
772 u64 hyp_addr = (u64)__hyp_va(host_addr);
773 struct pkvm_mem_share share = {
774 .tx = {
775 .nr_pages = 1,
776 .initiator = {
777 .id = PKVM_ID_HOST,
778 .addr = host_addr,
779 .host = {
780 .completer_addr = hyp_addr,
781 },
782 },
783 .completer = {
784 .id = PKVM_ID_HYP,
785 },
786 },
787 .completer_prot = PAGE_HYP,
788 };
789
790 host_lock_component();
791 hyp_lock_component();
792
793 ret = do_unshare(&share);
794
795 hyp_unlock_component();
796 host_unlock_component();
797
798 return ret;
799 }
800