Lines Matching refs:vsie_page

25 struct vsie_page {  struct
64 static void prefix_unmapped(struct vsie_page *vsie_page) in prefix_unmapped() argument
66 atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_unmapped()
70 static void prefix_unmapped_sync(struct vsie_page *vsie_page) in prefix_unmapped_sync() argument
72 prefix_unmapped(vsie_page); in prefix_unmapped_sync()
73 if (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
74 atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); in prefix_unmapped_sync()
75 while (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
80 static void prefix_mapped(struct vsie_page *vsie_page) in prefix_mapped() argument
82 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_mapped()
86 static int prefix_is_mapped(struct vsie_page *vsie_page) in prefix_is_mapped() argument
88 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); in prefix_is_mapped()
92 static void update_intervention_requests(struct vsie_page *vsie_page) in update_intervention_requests() argument
97 cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); in update_intervention_requests()
98 atomic_andnot(bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
99 atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
103 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_cpuflags() argument
105 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_cpuflags()
106 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_cpuflags()
284 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_crycb() argument
286 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_crycb()
287 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_crycb()
316 ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, in shadow_crycb()
333 vsie_page->crycb.dea_wrapping_key_mask, 56)) in shadow_crycb()
340 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; in shadow_crycb()
354 scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; in shadow_crycb()
359 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_ibc() argument
361 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_ibc()
362 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_ibc()
382 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unshadow_scb() argument
384 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unshadow_scb()
385 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in unshadow_scb()
433 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_scb() argument
435 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_scb()
436 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_scb()
455 rc = prepare_cpuflags(vcpu, vsie_page); in shadow_scb()
491 prefix_unmapped(vsie_page); in shadow_scb()
518 prefix_unmapped(vsie_page); in shadow_scb()
561 prepare_ibc(vcpu, vsie_page); in shadow_scb()
562 rc = shadow_crycb(vcpu, vsie_page); in shadow_scb()
565 unshadow_scb(vcpu, vsie_page); in shadow_scb()
573 struct vsie_page *cur; in kvm_s390_vsie_gmap_notifier()
615 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in map_prefix() argument
617 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in map_prefix()
621 if (prefix_is_mapped(vsie_page)) in map_prefix()
625 prefix_mapped(vsie_page); in map_prefix()
630 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL); in map_prefix()
632 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in map_prefix()
639 prefix_unmapped(vsie_page); in map_prefix()
672 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unpin_blocks() argument
674 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unpin_blocks()
679 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); in unpin_blocks()
680 vsie_page->sca_gpa = 0; in unpin_blocks()
687 unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); in unpin_blocks()
688 vsie_page->itdba_gpa = 0; in unpin_blocks()
694 unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); in unpin_blocks()
695 vsie_page->gvrd_gpa = 0; in unpin_blocks()
701 unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); in unpin_blocks()
702 vsie_page->riccbd_gpa = 0; in unpin_blocks()
708 unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); in unpin_blocks()
709 vsie_page->sdnx_gpa = 0; in unpin_blocks()
728 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in pin_blocks() argument
730 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in pin_blocks()
731 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in pin_blocks()
754 vsie_page->sca_gpa = gpa; in pin_blocks()
771 vsie_page->itdba_gpa = gpa; in pin_blocks()
790 vsie_page->gvrd_gpa = gpa; in pin_blocks()
807 vsie_page->riccbd_gpa = gpa; in pin_blocks()
836 vsie_page->sdnx_gpa = gpa; in pin_blocks()
841 unpin_blocks(vcpu, vsie_page); in pin_blocks()
846 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in unpin_scb() argument
849 hpa_t hpa = (hpa_t) vsie_page->scb_o; in unpin_scb()
853 vsie_page->scb_o = NULL; in unpin_scb()
862 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in pin_scb() argument
874 vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa; in pin_scb()
914 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_fault() argument
923 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_fault()
930 vsie_page->fault_addr = current->thread.gmap_addr; in handle_fault()
942 struct vsie_page *vsie_page) in handle_last_fault() argument
944 if (vsie_page->fault_addr) in handle_last_fault()
945 kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_last_fault()
946 vsie_page->fault_addr, NULL); in handle_last_fault()
947 vsie_page->fault_addr = 0; in handle_last_fault()
950 static inline void clear_vsie_icpt(struct vsie_page *vsie_page) in clear_vsie_icpt() argument
952 vsie_page->scb_s.icptcode = 0; in clear_vsie_icpt()
956 static void retry_vsie_icpt(struct vsie_page *vsie_page) in retry_vsie_icpt() argument
958 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in retry_vsie_icpt()
968 clear_vsie_icpt(vsie_page); in retry_vsie_icpt()
978 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_stfle() argument
980 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in handle_stfle()
981 __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; in handle_stfle()
984 retry_vsie_icpt(vsie_page); in handle_stfle()
985 if (read_guest_real(vcpu, fac, &vsie_page->fac, in handle_stfle()
986 sizeof(vsie_page->fac))) in handle_stfle()
988 scb_s->fac = (__u32)(__u64) &vsie_page->fac; in handle_stfle()
1000 static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg) in vsie_get_register() argument
1006 return vsie_page->scb_s.gg15; in vsie_get_register()
1008 return vsie_page->scb_s.gg14; in vsie_get_register()
1014 static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in vsie_handle_mvpg() argument
1016 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in vsie_handle_mvpg()
1018 u64 *pei_block = &vsie_page->scb_o->mcic; in vsie_handle_mvpg()
1027 dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask; in vsie_handle_mvpg()
1029 src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask; in vsie_handle_mvpg()
1032 rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest); in vsie_handle_mvpg()
1033 rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src); in vsie_handle_mvpg()
1039 retry_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1050 clear_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1073 retry_vsie_icpt(vsie_page); in vsie_handle_mvpg()
1093 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in do_vsie_run() argument
1097 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in do_vsie_run()
1098 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in do_vsie_run()
1102 handle_last_fault(vcpu, vsie_page); in do_vsie_run()
1150 kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); in do_vsie_run()
1157 return handle_fault(vcpu, vsie_page); in do_vsie_run()
1162 rc = handle_stfle(vcpu, vsie_page); in do_vsie_run()
1167 clear_vsie_icpt(vsie_page); in do_vsie_run()
1175 rc = vsie_handle_mvpg(vcpu, vsie_page); in do_vsie_run()
1181 static void release_gmap_shadow(struct vsie_page *vsie_page) in release_gmap_shadow() argument
1183 if (vsie_page->gmap) in release_gmap_shadow()
1184 gmap_put(vsie_page->gmap); in release_gmap_shadow()
1185 WRITE_ONCE(vsie_page->gmap, NULL); in release_gmap_shadow()
1186 prefix_unmapped(vsie_page); in release_gmap_shadow()
1190 struct vsie_page *vsie_page) in acquire_gmap_shadow() argument
1207 if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) in acquire_gmap_shadow()
1211 release_gmap_shadow(vsie_page); in acquire_gmap_shadow()
1216 WRITE_ONCE(vsie_page->gmap, gmap); in acquire_gmap_shadow()
1224 struct vsie_page *vsie_page) in register_shadow_scb() argument
1226 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in register_shadow_scb()
1228 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); in register_shadow_scb()
1267 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in vsie_run() argument
1269 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in vsie_run()
1273 rc = acquire_gmap_shadow(vcpu, vsie_page); in vsie_run()
1275 rc = map_prefix(vcpu, vsie_page); in vsie_run()
1277 gmap_enable(vsie_page->gmap); in vsie_run()
1278 update_intervention_requests(vsie_page); in vsie_run()
1279 rc = do_vsie_run(vcpu, vsie_page); in vsie_run()
1320 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) in get_vsie_page()
1322 struct vsie_page *vsie_page; in get_vsie_page() local
1372 vsie_page = page_to_virt(page); in get_vsie_page()
1373 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); in get_vsie_page()
1374 release_gmap_shadow(vsie_page); in get_vsie_page()
1375 vsie_page->fault_addr = 0; in get_vsie_page()
1376 vsie_page->scb_s.ihcpu = 0xffffU; in get_vsie_page()
1377 return vsie_page; in get_vsie_page()
1381 static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page) in put_vsie_page() argument
1383 struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT); in put_vsie_page()
1390 struct vsie_page *vsie_page; in kvm_s390_handle_vsie() local
1400 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); in kvm_s390_handle_vsie()
1411 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); in kvm_s390_handle_vsie()
1412 if (IS_ERR(vsie_page)) in kvm_s390_handle_vsie()
1413 return PTR_ERR(vsie_page); in kvm_s390_handle_vsie()
1414 else if (!vsie_page) in kvm_s390_handle_vsie()
1418 rc = pin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1421 rc = shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1424 rc = pin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1427 register_shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1428 rc = vsie_run(vcpu, vsie_page); in kvm_s390_handle_vsie()
1430 unpin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1432 unshadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1434 unpin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1436 put_vsie_page(vcpu->kvm, vsie_page); in kvm_s390_handle_vsie()
1451 struct vsie_page *vsie_page; in kvm_s390_vsie_destroy() local
1459 vsie_page = page_to_virt(page); in kvm_s390_vsie_destroy()
1460 release_gmap_shadow(vsie_page); in kvm_s390_vsie_destroy()