Lines Matching refs:sie_block

62 	u64 *cr9 = &vcpu->arch.sie_block->gcr[9];  in enable_all_hw_bp()
63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp()
103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp()
104 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_wp()
132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; in kvm_s390_backup_guest_per_regs()
133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; in kvm_s390_backup_guest_per_regs()
134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_backup_guest_per_regs()
135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_backup_guest_per_regs()
140 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; in kvm_s390_restore_guest_per_regs()
141 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; in kvm_s390_restore_guest_per_regs()
142 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; in kvm_s390_restore_guest_per_regs()
143 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; in kvm_s390_restore_guest_per_regs()
156 vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK; in kvm_s390_patch_guest_per_regs()
157 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; in kvm_s390_patch_guest_per_regs()
158 vcpu->arch.sie_block->gcr[10] = 0; in kvm_s390_patch_guest_per_regs()
159 vcpu->arch.sie_block->gcr[11] = -1UL; in kvm_s390_patch_guest_per_regs()
168 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) in kvm_s390_patch_guest_per_regs()
169 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; in kvm_s390_patch_guest_per_regs()
394 unsigned long addr = vcpu->arch.sie_block->gpsw.addr; in debug_exit_required()
442 if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) { in per_fetched_addr()
444 *addr = vcpu->arch.sie_block->peraddr; in per_fetched_addr()
459 *addr = __rewind_psw(vcpu->arch.sie_block->gpsw, in per_fetched_addr()
461 if (vcpu->arch.sie_block->icptstatus & 0x01) { in per_fetched_addr()
462 exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4; in per_fetched_addr()
495 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
499 const u64 cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_handle_per_ifetch_icpt()
500 const u64 cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_handle_per_ifetch_icpt()
505 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), in kvm_s390_handle_per_ifetch_icpt()
522 !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH)) in kvm_s390_handle_per_ifetch_icpt()
539 const u8 perc = vcpu->arch.sie_block->perc; in filter_guest_per_event()
540 u64 addr = vcpu->arch.sie_block->gpsw.addr; in filter_guest_per_event()
541 u64 cr9 = vcpu->arch.sie_block->gcr[9]; in filter_guest_per_event()
542 u64 cr10 = vcpu->arch.sie_block->gcr[10]; in filter_guest_per_event()
543 u64 cr11 = vcpu->arch.sie_block->gcr[11]; in filter_guest_per_event()
574 vcpu->arch.sie_block->perc = guest_perc; in filter_guest_per_event()
577 vcpu->arch.sie_block->iprcc &= ~PGM_PER; in filter_guest_per_event()
581 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
582 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
583 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
584 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
590 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, in kvm_s390_handle_per_event()
591 vcpu->arch.sie_block->peraddr)) in kvm_s390_handle_per_event()
604 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { in kvm_s390_handle_per_event()
605 vcpu->arch.sie_block->iprcc = 0; in kvm_s390_handle_per_event()
606 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; in kvm_s390_handle_per_event()
615 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; in kvm_s390_handle_per_event()
623 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; in kvm_s390_handle_per_event()