1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PARA_H__
21 #define __POWERPC_KVM_PARA_H__
22 
23 #include <linux/types.h>
24 
25 /*
26  * Additions to this struct must only occur at the end, and should be
27  * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
28  * (albeit not necessarily relevant to the current target hardware platform).
29  *
30  * Struct fields are always 32 or 64 bit aligned, depending on them being 32
31  * or 64 bit wide respectively.
32  *
33  * See Documentation/virtual/kvm/ppc-pv.txt
34  */
35 struct kvm_vcpu_arch_shared {
36 	__u64 scratch1;
37 	__u64 scratch2;
38 	__u64 scratch3;
39 	__u64 critical;		/* Guest may not get interrupts if == r1 */
40 	__u64 sprg0;
41 	__u64 sprg1;
42 	__u64 sprg2;
43 	__u64 sprg3;
44 	__u64 srr0;
45 	__u64 srr1;
46 	__u64 dar;		/* dear on BookE */
47 	__u64 msr;
48 	__u32 dsisr;
49 	__u32 int_pending;	/* Tells the guest if we have an interrupt */
50 	__u32 sr[16];
51 	__u32 mas0;
52 	__u32 mas1;
53 	__u64 mas7_3;
54 	__u64 mas2;
55 	__u32 mas4;
56 	__u32 mas6;
57 	__u32 esr;
58 	__u32 pir;
59 
60 	/*
61 	 * SPRG4-7 are user-readable, so we can only keep these consistent
62 	 * between the shared area and the real registers when there's an
63 	 * intervening exit to KVM.  This also applies to SPRG3 on some
64 	 * chips.
65 	 *
66 	 * This suffices for access by guest userspace, since in PR-mode
67 	 * KVM, an exit must occur when changing the guest's MSR[PR].
68 	 * If the guest kernel writes to SPRG3-7 via the shared area, it
69 	 * must also use the shared area for reading while in kernel space.
70 	 */
71 	__u64 sprg4;
72 	__u64 sprg5;
73 	__u64 sprg6;
74 	__u64 sprg7;
75 };
76 
77 #define KVM_SC_MAGIC_R0		0x4b564d21 /* "KVM!" */
78 #define HC_VENDOR_KVM		(42 << 16)
79 #define HC_EV_SUCCESS		0
80 #define HC_EV_UNIMPLEMENTED	12
81 
82 #define KVM_FEATURE_MAGIC_PAGE	1
83 
84 #define KVM_MAGIC_FEAT_SR		(1 << 0)
85 
86 /* MASn, ESR, PIR, and high SPRGs */
87 #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7	(1 << 1)
88 
89 #ifdef __KERNEL__
90 
91 #ifdef CONFIG_KVM_GUEST
92 
93 #include <linux/of.h>
94 
kvm_para_available(void)95 static inline int kvm_para_available(void)
96 {
97 	struct device_node *hyper_node;
98 
99 	hyper_node = of_find_node_by_path("/hypervisor");
100 	if (!hyper_node)
101 		return 0;
102 
103 	if (!of_device_is_compatible(hyper_node, "linux,kvm"))
104 		return 0;
105 
106 	return 1;
107 }
108 
109 extern unsigned long kvm_hypercall(unsigned long *in,
110 				   unsigned long *out,
111 				   unsigned long nr);
112 
113 #else
114 
kvm_para_available(void)115 static inline int kvm_para_available(void)
116 {
117 	return 0;
118 }
119 
kvm_hypercall(unsigned long * in,unsigned long * out,unsigned long nr)120 static unsigned long kvm_hypercall(unsigned long *in,
121 				   unsigned long *out,
122 				   unsigned long nr)
123 {
124 	return HC_EV_UNIMPLEMENTED;
125 }
126 
127 #endif
128 
kvm_hypercall0_1(unsigned int nr,unsigned long * r2)129 static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
130 {
131 	unsigned long in[8];
132 	unsigned long out[8];
133 	unsigned long r;
134 
135 	r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
136 	*r2 = out[0];
137 
138 	return r;
139 }
140 
kvm_hypercall0(unsigned int nr)141 static inline long kvm_hypercall0(unsigned int nr)
142 {
143 	unsigned long in[8];
144 	unsigned long out[8];
145 
146 	return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
147 }
148 
kvm_hypercall1(unsigned int nr,unsigned long p1)149 static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
150 {
151 	unsigned long in[8];
152 	unsigned long out[8];
153 
154 	in[0] = p1;
155 	return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
156 }
157 
kvm_hypercall2(unsigned int nr,unsigned long p1,unsigned long p2)158 static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
159 				  unsigned long p2)
160 {
161 	unsigned long in[8];
162 	unsigned long out[8];
163 
164 	in[0] = p1;
165 	in[1] = p2;
166 	return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
167 }
168 
kvm_hypercall3(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3)169 static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
170 				  unsigned long p2, unsigned long p3)
171 {
172 	unsigned long in[8];
173 	unsigned long out[8];
174 
175 	in[0] = p1;
176 	in[1] = p2;
177 	in[2] = p3;
178 	return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
179 }
180 
kvm_hypercall4(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)181 static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
182 				  unsigned long p2, unsigned long p3,
183 				  unsigned long p4)
184 {
185 	unsigned long in[8];
186 	unsigned long out[8];
187 
188 	in[0] = p1;
189 	in[1] = p2;
190 	in[2] = p3;
191 	in[3] = p4;
192 	return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
193 }
194 
195 
kvm_arch_para_features(void)196 static inline unsigned int kvm_arch_para_features(void)
197 {
198 	unsigned long r;
199 
200 	if (!kvm_para_available())
201 		return 0;
202 
203 	if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
204 		return 0;
205 
206 	return r;
207 }
208 
209 #endif /* __KERNEL__ */
210 
211 #endif /* __POWERPC_KVM_PARA_H__ */
212