1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
3 
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
6 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
7 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8 
kvm_register_read(struct kvm_vcpu * vcpu,enum kvm_reg reg)9 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
10 					      enum kvm_reg reg)
11 {
12 	if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
13 		kvm_x86_ops->cache_reg(vcpu, reg);
14 
15 	return vcpu->arch.regs[reg];
16 }
17 
kvm_register_write(struct kvm_vcpu * vcpu,enum kvm_reg reg,unsigned long val)18 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
19 				      enum kvm_reg reg,
20 				      unsigned long val)
21 {
22 	vcpu->arch.regs[reg] = val;
23 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
24 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
25 }
26 
kvm_rip_read(struct kvm_vcpu * vcpu)27 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
28 {
29 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
30 }
31 
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)32 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
33 {
34 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
35 }
36 
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)37 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 {
39 	might_sleep();  /* on svm */
40 
41 	if (!test_bit(VCPU_EXREG_PDPTR,
42 		      (unsigned long *)&vcpu->arch.regs_avail))
43 		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44 
45 	return vcpu->arch.walk_mmu->pdptrs[index];
46 }
47 
kvm_pdptr_read_mmu(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,int index)48 static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
49 {
50 	load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
51 
52 	return mmu->pdptrs[index];
53 }
54 
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)55 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
56 {
57 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
58 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
59 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
60 	return vcpu->arch.cr0 & mask;
61 }
62 
kvm_read_cr0(struct kvm_vcpu * vcpu)63 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
64 {
65 	return kvm_read_cr0_bits(vcpu, ~0UL);
66 }
67 
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)68 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
69 {
70 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
71 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
72 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
73 	return vcpu->arch.cr4 & mask;
74 }
75 
kvm_read_cr3(struct kvm_vcpu * vcpu)76 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
77 {
78 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
79 		kvm_x86_ops->decache_cr3(vcpu);
80 	return vcpu->arch.cr3;
81 }
82 
kvm_read_cr4(struct kvm_vcpu * vcpu)83 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
84 {
85 	return kvm_read_cr4_bits(vcpu, ~0UL);
86 }
87 
kvm_read_edx_eax(struct kvm_vcpu * vcpu)88 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
89 {
90 	return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
91 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
92 }
93 
enter_guest_mode(struct kvm_vcpu * vcpu)94 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
95 {
96 	vcpu->arch.hflags |= HF_GUEST_MASK;
97 }
98 
leave_guest_mode(struct kvm_vcpu * vcpu)99 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
100 {
101 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
102 }
103 
is_guest_mode(struct kvm_vcpu * vcpu)104 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
105 {
106 	return vcpu->arch.hflags & HF_GUEST_MASK;
107 }
108 
109 #endif
110