/linux-6.6.21/arch/x86/kvm/ |
D | emulate.c | 198 int (*execute)(struct x86_emulate_ctxt *ctxt); 207 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 246 static void writeback_registers(struct x86_emulate_ctxt *ctxt) in writeback_registers() argument 248 unsigned long dirty = ctxt->regs_dirty; in writeback_registers() 252 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); in writeback_registers() 255 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) in invalidate_registers() argument 257 ctxt->regs_dirty = 0; in invalidate_registers() 258 ctxt->regs_valid = 0; in invalidate_registers() 291 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); 466 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, in emulator_check_intercept() argument [all …]
|
D | kvm_emulate.h | 92 void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); 98 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 105 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 114 int (*read_std)(struct x86_emulate_ctxt *ctxt, 127 int (*write_std)(struct x86_emulate_ctxt *ctxt, 137 int (*fetch)(struct x86_emulate_ctxt *ctxt, 147 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 158 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 171 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 177 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
/linux-6.6.21/arch/arm64/kvm/hyp/include/hyp/ |
D | sysreg-sr.h | 19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() argument 21 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); in __sysreg_save_common_state() 24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() argument 26 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); in __sysreg_save_user_state() 27 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); in __sysreg_save_user_state() 30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte() argument 32 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; in ctxt_has_mte() 35 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); in ctxt_has_mte() 40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() argument 42 ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); in __sysreg_save_el1_state() [all …]
|
D | switch.h | 272 &vcpu->arch.ctxt.fp_regs.fpsr); in __hyp_sve_restore_guest() 332 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); in kvm_hyp_handle_fpsimd() 417 #define __ptrauth_save_key(ctxt, key) \ argument 421 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 423 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ 430 struct kvm_cpu_context *ctxt; in kvm_hyp_handle_ptrauth() local 436 ctxt = this_cpu_ptr(&kvm_hyp_ctxt); in kvm_hyp_handle_ptrauth() 437 __ptrauth_save_key(ctxt, APIA); in kvm_hyp_handle_ptrauth() 438 __ptrauth_save_key(ctxt, APIB); in kvm_hyp_handle_ptrauth() 439 __ptrauth_save_key(ctxt, APDA); in kvm_hyp_handle_ptrauth() [all …]
|
/linux-6.6.21/net/sunrpc/xprtrdma/ |
D | svc_rdma_recvfrom.c | 129 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local 133 ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node); in svc_rdma_recv_ctxt_alloc() 134 if (!ctxt) in svc_rdma_recv_ctxt_alloc() 144 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc() 145 pcl_init(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_alloc() 146 pcl_init(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_alloc() 147 pcl_init(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_alloc() 148 pcl_init(&ctxt->rc_reply_pcl); in svc_rdma_recv_ctxt_alloc() 150 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc() 151 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc() [all …]
|
D | svc_rdma_sendto.c | 127 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 132 ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc() 134 if (!ctxt) in svc_rdma_send_ctxt_alloc() 144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() 146 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc() 147 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 148 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc() 149 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc() 150 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc() 151 ctxt->sc_xprt_buf = buffer; in svc_rdma_send_ctxt_alloc() [all …]
|
D | svc_rdma_rw.c | 56 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 63 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt() 65 ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), in svc_rdma_get_rw_ctxt() 67 if (!ctxt) in svc_rdma_get_rw_ctxt() 70 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 73 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 74 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt() 75 ctxt->rw_sg_table.sgl, in svc_rdma_get_rw_ctxt() 78 return ctxt; in svc_rdma_get_rw_ctxt() 81 kfree(ctxt); in svc_rdma_get_rw_ctxt() [all …]
|
/linux-6.6.21/drivers/net/ethernet/intel/ice/ |
D | ice_vsi_vlan_lib.c | 93 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() local 96 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion() 97 if (!ctxt) in ice_vsi_manage_vlan_insertion() 104 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion() 107 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion() 110 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion() 112 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion() 119 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion() 121 kfree(ctxt); in ice_vsi_manage_vlan_insertion() 133 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_stripping() local [all …]
|
/linux-6.6.21/arch/x86/power/ |
D | cpu.c | 39 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument 41 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() 42 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 51 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument 53 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() 54 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 79 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 89 store_idt(&ctxt->idt); in __save_processor_state() 97 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state() 98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state() [all …]
|
/linux-6.6.21/drivers/net/wireless/intel/iwlwifi/mvm/ |
D | phy-ctxt.c | 69 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 73 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 79 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_set_rxchain() argument 97 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { in iwl_mvm_phy_ctxt_set_rxchain() 117 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data_v1() argument 128 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data_v1() 138 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data() argument 151 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data() 155 int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_send_rlc() argument [all …]
|
/linux-6.6.21/arch/x86/kernel/ |
D | sev.c | 272 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, in vc_fetch_insn_kernel() argument 275 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_fetch_insn_kernel() 278 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) in __vc_decode_user_insn() argument 283 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); in __vc_decode_user_insn() 286 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_user_insn() 287 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; in __vc_decode_user_insn() 288 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn() 292 ctxt->fi.vector = X86_TRAP_GP; in __vc_decode_user_insn() 293 ctxt->fi.error_code = 0; in __vc_decode_user_insn() 294 ctxt->fi.cr2 = 0; in __vc_decode_user_insn() [all …]
|
D | sev-shared.c | 177 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, in vc_init_em_ctxt() argument 183 memset(ctxt, 0, sizeof(*ctxt)); in vc_init_em_ctxt() 184 ctxt->regs = regs; in vc_init_em_ctxt() 187 ret = vc_decode_insn(ctxt); in vc_init_em_ctxt() 192 static void vc_finish_insn(struct es_em_ctxt *ctxt) in vc_finish_insn() argument 194 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn() 197 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) in verify_exception_info() argument 213 ctxt->fi.vector = v; in verify_exception_info() 216 ctxt->fi.error_code = info >> 32; in verify_exception_info() 226 struct es_em_ctxt *ctxt, in sev_es_ghcb_hv_call() argument [all …]
|
/linux-6.6.21/arch/arm64/kvm/hyp/nvhe/ |
D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() argument 23 __sysreg_save_el1_state(ctxt); in __sysreg_save_state_nvhe() 24 __sysreg_save_common_state(ctxt); in __sysreg_save_state_nvhe() 25 __sysreg_save_user_state(ctxt); in __sysreg_save_state_nvhe() 26 __sysreg_save_el2_return_state(ctxt); in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe() argument 31 __sysreg_restore_el1_state(ctxt); in __sysreg_restore_state_nvhe() 32 __sysreg_restore_common_state(ctxt); in __sysreg_restore_state_nvhe() 33 __sysreg_restore_user_state(ctxt); in __sysreg_restore_state_nvhe() 34 __sysreg_restore_el2_return_state(ctxt); in __sysreg_restore_state_nvhe()
|
D | ffa.c | 94 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval() argument 97 cpu_reg(ctxt, 0) = res->a0; in ffa_set_retval() 98 cpu_reg(ctxt, 1) = res->a1; in ffa_set_retval() 99 cpu_reg(ctxt, 2) = res->a2; in ffa_set_retval() 100 cpu_reg(ctxt, 3) = res->a3; in ffa_set_retval() 181 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map() argument 183 DECLARE_REG(phys_addr_t, tx, ctxt, 1); in do_ffa_rxtx_map() 184 DECLARE_REG(phys_addr_t, rx, ctxt, 2); in do_ffa_rxtx_map() 185 DECLARE_REG(u32, npages, ctxt, 3); in do_ffa_rxtx_map() 260 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_unmap() argument [all …]
|
/linux-6.6.21/arch/arm64/kvm/hyp/vhe/ |
D | sysreg-sr.c | 28 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() argument 30 __sysreg_save_common_state(ctxt); in sysreg_save_host_state_vhe() 34 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() argument 36 __sysreg_save_common_state(ctxt); in sysreg_save_guest_state_vhe() 37 __sysreg_save_el2_return_state(ctxt); in sysreg_save_guest_state_vhe() 41 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() argument 43 __sysreg_restore_common_state(ctxt); in sysreg_restore_host_state_vhe() 47 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() argument 49 __sysreg_restore_common_state(ctxt); in sysreg_restore_guest_state_vhe() 50 __sysreg_restore_el2_return_state(ctxt); in sysreg_restore_guest_state_vhe() [all …]
|
/linux-6.6.21/arch/arm64/include/asm/ |
D | kvm_asm.h | 303 .macro get_vcpu_ptr vcpu, ctxt 304 get_host_ctxt \ctxt, \vcpu 305 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 308 .macro get_loaded_vcpu vcpu, ctxt 309 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 310 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 313 .macro set_loaded_vcpu vcpu, ctxt, tmp 314 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 315 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 342 .macro save_callee_saved_regs ctxt [all …]
|
D | kvm_emulate.h | 193 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt) in vcpu_is_el2_ctxt() argument 195 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) { in vcpu_is_el2_ctxt() 206 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt); in vcpu_is_el2() 209 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) in __vcpu_el2_e2h_is_set() argument 211 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H; in __vcpu_el2_e2h_is_set() 216 return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt); in vcpu_el2_e2h_is_set() 219 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt) in __vcpu_el2_tge_is_set() argument 221 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE; in __vcpu_el2_tge_is_set() 226 return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt); in vcpu_el2_tge_is_set() 229 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) in __is_hyp_ctxt() argument [all …]
|
/linux-6.6.21/drivers/infiniband/hw/hfi1/ |
D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
D | trace_rx.h | 29 __field(u32, ctxt) 38 __entry->ctxt = packet->rcd->ctxt; 48 __entry->ctxt, 62 __field(u32, ctxt) 67 __entry->ctxt = rcd->ctxt; 73 __entry->ctxt, 80 TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type, 82 TP_ARGS(ctxt, subctxt, type, start, end), 84 __field(unsigned int, ctxt) 91 __entry->ctxt = ctxt; [all …]
|
D | trace_tx.h | 170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt), 171 TP_ARGS(dd, ctxt, subctxt), 173 __field(u16, ctxt) 177 __entry->ctxt = ctxt; 182 __entry->ctxt, 188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, 190 TP_ARGS(dd, ctxt, subctxt, comp_idx), 192 __field(u16, ctxt) 197 __entry->ctxt = ctxt; 203 __entry->ctxt, [all …]
|
/linux-6.6.21/arch/x86/xen/ |
D | smp_pv.c | 255 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 262 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 263 if (ctxt == NULL) { in cpu_initialize_context() 275 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context() 276 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 277 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 278 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 279 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 280 ctxt->user_regs.ss = __KERNEL_DS; in cpu_initialize_context() 281 ctxt->user_regs.cs = __KERNEL_CS; in cpu_initialize_context() [all …]
|
D | pmu.c | 30 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 31 (uintptr_t)ctxt->field)) 202 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 212 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate() 216 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 219 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 222 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 225 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 230 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate() 234 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate() [all …]
|
/linux-6.6.21/fs/nilfs2/ |
D | btnode.c | 174 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 178 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 184 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 185 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 224 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 237 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 239 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 240 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 267 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 277 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
/linux-6.6.21/arch/arm64/kvm/hyp/include/nvhe/ |
D | trap_handler.h | 14 #define cpu_reg(ctxt, r) (ctxt)->regs.regs[r] argument 15 #define DECLARE_REG(type, name, ctxt, reg) \ argument 16 type name = (type)cpu_reg(ctxt, (reg))
|
/linux-6.6.21/arch/x86/boot/compressed/ |
D | sev.c | 76 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) in vc_decode_insn() argument 81 memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_decode_insn() 83 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); in vc_decode_insn() 90 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, in vc_write_mem() argument 98 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, in vc_read_mem() argument 106 static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) in vc_ioio_check() argument 296 struct es_em_ctxt ctxt; in do_boot_stage2_vc() local 303 result = vc_init_em_ctxt(&ctxt, regs, exit_code); in do_boot_stage2_vc() 310 result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); in do_boot_stage2_vc() 313 result = vc_handle_ioio(boot_ghcb, &ctxt); in do_boot_stage2_vc() [all …]
|