Home
last modified time | relevance | path

Searched refs:sie_block (Results 1 – 18 of 18) sorted by relevance

/linux-5.19.10/arch/s390/kvm/
Dintercept.c27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() local
30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen()
37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen()
39 if (sie_block->icptstatus & 1) { in kvm_s390_get_ilen()
40 ilen = (sie_block->icptstatus >> 4) & 0x6; in kvm_s390_get_ilen()
47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen()
93 int viwhy = vcpu->arch.sie_block->ipb >> 16; in handle_validity()
110 vcpu->arch.sie_block->ipa, in handle_instruction()
111 vcpu->arch.sie_block->ipb); in handle_instruction()
113 switch (vcpu->arch.sie_block->ipa >> 8) { in handle_instruction()
[all …]
Dguestdbg.c62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp()
63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp()
103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp()
104 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_wp()
132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; in kvm_s390_backup_guest_per_regs()
133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; in kvm_s390_backup_guest_per_regs()
134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_backup_guest_per_regs()
135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_backup_guest_per_regs()
[all …]
Dkvm-s390.h24 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
26 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
56 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
62 atomic_or(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_set_cpuflags()
67 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_clear_cpuflags()
72 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; in kvm_s390_test_cpuflags()
99 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; in kvm_s390_get_prefix()
106 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; in kvm_s390_set_prefix()
113 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_s()
114 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); in kvm_s390_get_base_disp_s()
[all …]
Dpriv.c39 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
48 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
65 vcpu->arch.sie_block->ecb |= ECB_GS; in handle_gs()
66 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in handle_gs()
76 int code = vcpu->arch.sie_block->ipb & 0xff; in kvm_s390_handle_e3()
93 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
132 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix()
170 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix()
200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address()
234 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_skey_check_enable()
[all …]
Dkvm-s390.c302 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
304 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
305 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
2889 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2953 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2954 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2961 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2962 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2963 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2964 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
[all …]
Dpv.c35 free_page(sida_origin(vcpu->arch.sie_block)); in kvm_s390_pv_destroy_cpu()
36 vcpu->arch.sie_block->pv_handle_cpu = 0; in kvm_s390_pv_destroy_cpu()
37 vcpu->arch.sie_block->pv_handle_config = 0; in kvm_s390_pv_destroy_cpu()
39 vcpu->arch.sie_block->sdf = 0; in kvm_s390_pv_destroy_cpu()
45 vcpu->arch.sie_block->gbea = 1; in kvm_s390_pv_destroy_cpu()
69 uvcb.num = vcpu->arch.sie_block->icpua; in kvm_s390_pv_create_cpu()
70 uvcb.state_origin = (u64)vcpu->arch.sie_block; in kvm_s390_pv_create_cpu()
74 vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in kvm_s390_pv_create_cpu()
75 if (!vcpu->arch.sie_block->sidad) { in kvm_s390_pv_create_cpu()
98 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; in kvm_s390_pv_create_cpu()
[all …]
Dinterrupt.c146 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); in psw_extint_disabled()
151 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); in psw_ioint_disabled()
156 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); in psw_mchk_disabled()
169 !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) in ckc_interrupts_enabled()
180 const u64 ckc = vcpu->arch.sie_block->ckc; in ckc_irq_pending()
182 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { in ckc_irq_pending()
194 (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK); in cpu_timer_interrupts_enabled()
361 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) in disable_iscs()
381 if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) in deliverable_irqs()
383 if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK)) in deliverable_irqs()
[all …]
Ddiag.c25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages()
72 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; in __diag_page_ref_service()
73 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); in __diag_page_ref_service()
171 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed()
212 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; in __diag_ipl_functions()
284 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_diag()
Dtrace-s390.h44 struct kvm_s390_sie_block *sie_block),
45 TP_ARGS(id, vcpu, sie_block),
50 __field(struct kvm_s390_sie_block *, sie_block)
56 __entry->sie_block = sie_block;
60 __entry->id, __entry->vcpu, __entry->sie_block)
Dgaccess.c267 if (vcpu->arch.sie_block->eca & ECA_SII) { in ipte_lock_held()
366 if (vcpu->arch.sie_block->eca & ECA_SII) in ipte_lock()
374 if (vcpu->arch.sie_block->eca & ECA_SII) in ipte_unlock()
398 asce->val = vcpu->arch.sie_block->gcr[1]; in ar_translation()
401 asce->val = vcpu->arch.sie_block->gcr[7]; in ar_translation()
409 ald_addr = vcpu->arch.sie_block->gcr[5]; in ar_translation()
411 ald_addr = vcpu->arch.sie_block->gcr[2]; in ar_translation()
444 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; in ar_translation()
542 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as; in trans_exc_ending()
570 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); in get_vcpu_asce()
[all …]
Dsigp.c76 psw = &dst_vcpu->arch.sie_block->gpsw; in __sigp_conditional_emergency()
77 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ in __sigp_conditional_emergency()
78 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ in __sigp_conditional_emergency()
427 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_sigp()
428 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp()
435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_sigp()
478 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp_pei()
Dgaccess.h89 return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga); in kvm_s390_logical_to_effective()
278 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in write_guest()
322 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in read_guest()
345 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in read_guest_instr()
Dvsie.c297 int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; in shadow_crycb()
302 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; in shadow_crycb()
325 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & in shadow_crycb()
327 ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC; in shadow_crycb()
1012 cr0.val = vcpu->arch.sie_block->gcr[0]; in vsie_handle_mvpg()
1106 vcpu->arch.sie_block->fpf & FPF_BPBC) in do_vsie_run()
1119 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; in do_vsie_run()
1126 vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; in do_vsie_run()
1187 asce = vcpu->arch.sie_block->gcr[1]; in acquire_gmap_shadow()
1188 cr0.val = vcpu->arch.sie_block->gcr[0]; in acquire_gmap_shadow()
[all …]
Dtrace.h27 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \
28 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
/linux-5.19.10/arch/s390/kernel/
Dnmi.c365 struct kvm_s390_sie_block *sie_block = in s390_backup_mcck_info() local
368 if (sie_block == NULL) in s390_backup_mcck_info()
372 sie_page = container_of(sie_block, struct sie_page, sie_block); in s390_backup_mcck_info()
Dperf_event.c26 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) in sie_block() function
49 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE; in guest_is_user_mode()
54 return sie_block(regs)->gpsw.addr; in instruction_pointer_guest()
/linux-5.19.10/arch/s390/include/asm/
Dkvm_host.h132 #define sida_origin(sie_block) \ argument
133 ((sie_block)->sidad & PAGE_MASK)
134 #define sida_size(sie_block) \ argument
135 ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
357 struct kvm_s390_sie_block sie_block; member
724 struct kvm_s390_sie_block *sie_block; member
/linux-5.19.10/drivers/s390/crypto/
Dvfio_ap_ops.c402 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { in handle_pqap()
404 __func__, apqn, vcpu->arch.sie_block->eca); in handle_pqap()