1 /*
2  * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
3  *
4  * Author: Yu Liu, yu.liu@freescale.com
5  *
6  * Description:
7  * This file is based on arch/powerpc/kvm/44x_tlb.c,
8  * by Hollis Blanchard <hollisb@us.ibm.com>.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_host.h>
20 #include <linux/highmem.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_e500.h>
23 
24 #include "../mm/mmu_decl.h"
25 #include "e500_tlb.h"
26 #include "trace.h"
27 
28 #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
29 
30 static unsigned int tlb1_entry_num;
31 
kvmppc_dump_tlbs(struct kvm_vcpu * vcpu)32 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
33 {
34 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
35 	struct tlbe *tlbe;
36 	int i, tlbsel;
37 
38 	printk("| %8s | %8s | %8s | %8s | %8s |\n",
39 			"nr", "mas1", "mas2", "mas3", "mas7");
40 
41 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
42 		printk("Guest TLB%d:\n", tlbsel);
43 		for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
44 			tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
45 			if (tlbe->mas1 & MAS1_VALID)
46 				printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
47 					tlbsel, i, tlbe->mas1, tlbe->mas2,
48 					tlbe->mas3, tlbe->mas7);
49 		}
50 	}
51 
52 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
53 		printk("Shadow TLB%d:\n", tlbsel);
54 		for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
55 			tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
56 			if (tlbe->mas1 & MAS1_VALID)
57 				printk(" S[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
58 					tlbsel, i, tlbe->mas1, tlbe->mas2,
59 					tlbe->mas3, tlbe->mas7);
60 		}
61 	}
62 }
63 
tlb0_get_next_victim(struct kvmppc_vcpu_e500 * vcpu_e500)64 static inline unsigned int tlb0_get_next_victim(
65 		struct kvmppc_vcpu_e500 *vcpu_e500)
66 {
67 	unsigned int victim;
68 
69 	victim = vcpu_e500->guest_tlb_nv[0]++;
70 	if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
71 		vcpu_e500->guest_tlb_nv[0] = 0;
72 
73 	return victim;
74 }
75 
tlb1_max_shadow_size(void)76 static inline unsigned int tlb1_max_shadow_size(void)
77 {
78 	return tlb1_entry_num - tlbcam_index;
79 }
80 
tlbe_is_writable(struct tlbe * tlbe)81 static inline int tlbe_is_writable(struct tlbe *tlbe)
82 {
83 	return tlbe->mas3 & (MAS3_SW|MAS3_UW);
84 }
85 
e500_shadow_mas3_attrib(u32 mas3,int usermode)86 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
87 {
88 	/* Mask off reserved bits. */
89 	mas3 &= MAS3_ATTRIB_MASK;
90 
91 	if (!usermode) {
92 		/* Guest is in supervisor mode,
93 		 * so we need to translate guest
94 		 * supervisor permissions into user permissions. */
95 		mas3 &= ~E500_TLB_USER_PERM_MASK;
96 		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
97 	}
98 
99 	return mas3 | E500_TLB_SUPER_PERM_MASK;
100 }
101 
e500_shadow_mas2_attrib(u32 mas2,int usermode)102 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
103 {
104 #ifdef CONFIG_SMP
105 	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
106 #else
107 	return mas2 & MAS2_ATTRIB_MASK;
108 #endif
109 }
110 
111 /*
112  * writing shadow tlb entry to host TLB
113  */
__write_host_tlbe(struct tlbe * stlbe)114 static inline void __write_host_tlbe(struct tlbe *stlbe)
115 {
116 	mtspr(SPRN_MAS1, stlbe->mas1);
117 	mtspr(SPRN_MAS2, stlbe->mas2);
118 	mtspr(SPRN_MAS3, stlbe->mas3);
119 	mtspr(SPRN_MAS7, stlbe->mas7);
120 	__asm__ __volatile__ ("tlbwe\n" : : );
121 }
122 
write_host_tlbe(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)123 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
124 		int tlbsel, int esel)
125 {
126 	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
127 
128 	local_irq_disable();
129 	if (tlbsel == 0) {
130 		__write_host_tlbe(stlbe);
131 	} else {
132 		unsigned register mas0;
133 
134 		mas0 = mfspr(SPRN_MAS0);
135 
136 		mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
137 		__write_host_tlbe(stlbe);
138 
139 		mtspr(SPRN_MAS0, mas0);
140 	}
141 	local_irq_enable();
142 }
143 
kvmppc_e500_tlb_load(struct kvm_vcpu * vcpu,int cpu)144 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
145 {
146 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
147 	int i;
148 	unsigned register mas0;
149 
150 	/* Load all valid TLB1 entries to reduce guest tlb miss fault */
151 	local_irq_disable();
152 	mas0 = mfspr(SPRN_MAS0);
153 	for (i = 0; i < tlb1_max_shadow_size(); i++) {
154 		struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
155 
156 		if (get_tlb_v(stlbe)) {
157 			mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
158 					| MAS0_ESEL(to_htlb1_esel(i)));
159 			__write_host_tlbe(stlbe);
160 		}
161 	}
162 	mtspr(SPRN_MAS0, mas0);
163 	local_irq_enable();
164 }
165 
kvmppc_e500_tlb_put(struct kvm_vcpu * vcpu)166 void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
167 {
168 	_tlbil_all();
169 }
170 
171 /* Search the guest TLB for a matching entry. */
kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 * vcpu_e500,gva_t eaddr,int tlbsel,unsigned int pid,int as)172 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
173 		gva_t eaddr, int tlbsel, unsigned int pid, int as)
174 {
175 	int i;
176 
177 	/* XXX Replace loop with fancy data structures. */
178 	for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
179 		struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
180 		unsigned int tid;
181 
182 		if (eaddr < get_tlb_eaddr(tlbe))
183 			continue;
184 
185 		if (eaddr > get_tlb_end(tlbe))
186 			continue;
187 
188 		tid = get_tlb_tid(tlbe);
189 		if (tid && (tid != pid))
190 			continue;
191 
192 		if (!get_tlb_v(tlbe))
193 			continue;
194 
195 		if (get_tlb_ts(tlbe) != as && as != -1)
196 			continue;
197 
198 		return i;
199 	}
200 
201 	return -1;
202 }
203 
kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)204 static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
205 		int tlbsel, int esel)
206 {
207 	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
208 	struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
209 
210 	if (page) {
211 		vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
212 
213 		if (get_tlb_v(stlbe)) {
214 			if (tlbe_is_writable(stlbe))
215 				kvm_release_page_dirty(page);
216 			else
217 				kvm_release_page_clean(page);
218 		}
219 	}
220 }
221 
kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)222 static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
223 		int tlbsel, int esel)
224 {
225 	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
226 
227 	kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
228 	stlbe->mas1 = 0;
229 	trace_kvm_stlb_inval(index_of(tlbsel, esel));
230 }
231 
kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 * vcpu_e500,gva_t eaddr,gva_t eend,u32 tid)232 static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
233 		gva_t eaddr, gva_t eend, u32 tid)
234 {
235 	unsigned int pid = tid & 0xff;
236 	unsigned int i;
237 
238 	/* XXX Replace loop with fancy data structures. */
239 	for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
240 		struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
241 		unsigned int tid;
242 
243 		if (!get_tlb_v(stlbe))
244 			continue;
245 
246 		if (eend < get_tlb_eaddr(stlbe))
247 			continue;
248 
249 		if (eaddr > get_tlb_end(stlbe))
250 			continue;
251 
252 		tid = get_tlb_tid(stlbe);
253 		if (tid && (tid != pid))
254 			continue;
255 
256 		kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
257 		write_host_tlbe(vcpu_e500, 1, i);
258 	}
259 }
260 
kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu * vcpu,unsigned int eaddr,int as)261 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
262 		unsigned int eaddr, int as)
263 {
264 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
265 	unsigned int victim, pidsel, tsized;
266 	int tlbsel;
267 
268 	/* since we only have two TLBs, only lower bit is used. */
269 	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
270 	victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
271 	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
272 	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
273 
274 	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
275 		| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
276 	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
277 		| MAS1_TID(vcpu_e500->pid[pidsel])
278 		| MAS1_TSIZE(tsized);
279 	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
280 		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
281 	vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
282 	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
283 		| (get_cur_pid(vcpu) << 16)
284 		| (as ? MAS6_SAS : 0);
285 	vcpu_e500->mas7 = 0;
286 }
287 
kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct tlbe * gtlbe,int tlbsel,int esel)288 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
289 	u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
290 {
291 	struct page *new_page;
292 	struct tlbe *stlbe;
293 	hpa_t hpaddr;
294 
295 	stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
296 
297 	/* Get reference to new page. */
298 	new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
299 	if (is_error_page(new_page)) {
300 		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
301 				(long)gfn);
302 		kvm_release_page_clean(new_page);
303 		return;
304 	}
305 	hpaddr = page_to_phys(new_page);
306 
307 	/* Drop reference to old page. */
308 	kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
309 
310 	vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
311 
312 	/* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
313 	stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
314 		| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
315 	stlbe->mas2 = (gvaddr & MAS2_EPN)
316 		| e500_shadow_mas2_attrib(gtlbe->mas2,
317 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
318 	stlbe->mas3 = (hpaddr & MAS3_RPN)
319 		| e500_shadow_mas3_attrib(gtlbe->mas3,
320 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
321 	stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
322 
323 	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
324 			     stlbe->mas3, stlbe->mas7);
325 }
326 
327 /* XXX only map the one-one case, for now use TLB0 */
kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)328 static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
329 		int tlbsel, int esel)
330 {
331 	struct tlbe *gtlbe;
332 
333 	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
334 
335 	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
336 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
337 			gtlbe, tlbsel, esel);
338 
339 	return esel;
340 }
341 
342 /* Caller must ensure that the specified guest TLB entry is safe to insert into
343  * the shadow TLB. */
344 /* XXX for both one-one and one-to-many , for now use TLB1 */
kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct tlbe * gtlbe)345 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
346 		u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
347 {
348 	unsigned int victim;
349 
350 	victim = vcpu_e500->guest_tlb_nv[1]++;
351 
352 	if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
353 		vcpu_e500->guest_tlb_nv[1] = 0;
354 
355 	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
356 
357 	return victim;
358 }
359 
360 /* Invalidate all guest kernel mappings when enter usermode,
361  * so that when they fault back in they will get the
362  * proper permission bits. */
kvmppc_mmu_priv_switch(struct kvm_vcpu * vcpu,int usermode)363 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
364 {
365 	if (usermode) {
366 		struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
367 		int i;
368 
369 		/* XXX Replace loop with fancy data structures. */
370 		for (i = 0; i < tlb1_max_shadow_size(); i++)
371 			kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
372 
373 		_tlbil_all();
374 	}
375 }
376 
kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)377 static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
378 		int tlbsel, int esel)
379 {
380 	struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
381 
382 	if (unlikely(get_tlb_iprot(gtlbe)))
383 		return -1;
384 
385 	if (tlbsel == 1) {
386 		kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
387 				get_tlb_end(gtlbe),
388 				get_tlb_tid(gtlbe));
389 	} else {
390 		kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
391 	}
392 
393 	gtlbe->mas1 = 0;
394 
395 	return 0;
396 }
397 
kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 * vcpu_e500,ulong value)398 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
399 {
400 	int esel;
401 
402 	if (value & MMUCSR0_TLB0FI)
403 		for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
404 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
405 	if (value & MMUCSR0_TLB1FI)
406 		for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
407 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
408 
409 	_tlbil_all();
410 
411 	return EMULATE_DONE;
412 }
413 
kvmppc_e500_emul_tlbivax(struct kvm_vcpu * vcpu,int ra,int rb)414 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
415 {
416 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
417 	unsigned int ia;
418 	int esel, tlbsel;
419 	gva_t ea;
420 
421 	ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
422 
423 	ia = (ea >> 2) & 0x1;
424 
425 	/* since we only have two TLBs, only lower bit is used. */
426 	tlbsel = (ea >> 3) & 0x1;
427 
428 	if (ia) {
429 		/* invalidate all entries */
430 		for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
431 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
432 	} else {
433 		ea &= 0xfffff000;
434 		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
435 				get_cur_pid(vcpu), -1);
436 		if (esel >= 0)
437 			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
438 	}
439 
440 	_tlbil_all();
441 
442 	return EMULATE_DONE;
443 }
444 
kvmppc_e500_emul_tlbre(struct kvm_vcpu * vcpu)445 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
446 {
447 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
448 	int tlbsel, esel;
449 	struct tlbe *gtlbe;
450 
451 	tlbsel = get_tlb_tlbsel(vcpu_e500);
452 	esel = get_tlb_esel(vcpu_e500, tlbsel);
453 
454 	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
455 	vcpu_e500->mas0 &= ~MAS0_NV(~0);
456 	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
457 	vcpu_e500->mas1 = gtlbe->mas1;
458 	vcpu_e500->mas2 = gtlbe->mas2;
459 	vcpu_e500->mas3 = gtlbe->mas3;
460 	vcpu_e500->mas7 = gtlbe->mas7;
461 
462 	return EMULATE_DONE;
463 }
464 
kvmppc_e500_emul_tlbsx(struct kvm_vcpu * vcpu,int rb)465 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
466 {
467 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
468 	int as = !!get_cur_sas(vcpu_e500);
469 	unsigned int pid = get_cur_spid(vcpu_e500);
470 	int esel, tlbsel;
471 	struct tlbe *gtlbe = NULL;
472 	gva_t ea;
473 
474 	ea = kvmppc_get_gpr(vcpu, rb);
475 
476 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
477 		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
478 		if (esel >= 0) {
479 			gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
480 			break;
481 		}
482 	}
483 
484 	if (gtlbe) {
485 		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
486 			| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
487 		vcpu_e500->mas1 = gtlbe->mas1;
488 		vcpu_e500->mas2 = gtlbe->mas2;
489 		vcpu_e500->mas3 = gtlbe->mas3;
490 		vcpu_e500->mas7 = gtlbe->mas7;
491 	} else {
492 		int victim;
493 
494 		/* since we only have two TLBs, only lower bit is used. */
495 		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
496 		victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
497 
498 		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
499 			| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
500 		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
501 			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
502 			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
503 		vcpu_e500->mas2 &= MAS2_EPN;
504 		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
505 		vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
506 		vcpu_e500->mas7 = 0;
507 	}
508 
509 	return EMULATE_DONE;
510 }
511 
kvmppc_e500_emul_tlbwe(struct kvm_vcpu * vcpu)512 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
513 {
514 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
515 	u64 eaddr;
516 	u64 raddr;
517 	u32 tid;
518 	struct tlbe *gtlbe;
519 	int tlbsel, esel, stlbsel, sesel;
520 
521 	tlbsel = get_tlb_tlbsel(vcpu_e500);
522 	esel = get_tlb_esel(vcpu_e500, tlbsel);
523 
524 	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
525 
526 	if (get_tlb_v(gtlbe) && tlbsel == 1) {
527 		eaddr = get_tlb_eaddr(gtlbe);
528 		tid = get_tlb_tid(gtlbe);
529 		kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
530 				get_tlb_end(gtlbe), tid);
531 	}
532 
533 	gtlbe->mas1 = vcpu_e500->mas1;
534 	gtlbe->mas2 = vcpu_e500->mas2;
535 	gtlbe->mas3 = vcpu_e500->mas3;
536 	gtlbe->mas7 = vcpu_e500->mas7;
537 
538 	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
539 			     gtlbe->mas3, gtlbe->mas7);
540 
541 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
542 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
543 		switch (tlbsel) {
544 		case 0:
545 			/* TLB0 */
546 			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
547 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
548 
549 			stlbsel = 0;
550 			sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
551 
552 			break;
553 
554 		case 1:
555 			/* TLB1 */
556 			eaddr = get_tlb_eaddr(gtlbe);
557 			raddr = get_tlb_raddr(gtlbe);
558 
559 			/* Create a 4KB mapping on the host.
560 			 * If the guest wanted a large page,
561 			 * only the first 4KB is mapped here and the rest
562 			 * are mapped on the fly. */
563 			stlbsel = 1;
564 			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
565 					raddr >> PAGE_SHIFT, gtlbe);
566 			break;
567 
568 		default:
569 			BUG();
570 		}
571 		write_host_tlbe(vcpu_e500, stlbsel, sesel);
572 	}
573 
574 	return EMULATE_DONE;
575 }
576 
kvmppc_mmu_itlb_index(struct kvm_vcpu * vcpu,gva_t eaddr)577 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
578 {
579 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
580 
581 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
582 }
583 
kvmppc_mmu_dtlb_index(struct kvm_vcpu * vcpu,gva_t eaddr)584 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
585 {
586 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
587 
588 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
589 }
590 
kvmppc_mmu_itlb_miss(struct kvm_vcpu * vcpu)591 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
592 {
593 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
594 
595 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
596 }
597 
kvmppc_mmu_dtlb_miss(struct kvm_vcpu * vcpu)598 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
599 {
600 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
601 
602 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
603 }
604 
kvmppc_mmu_xlate(struct kvm_vcpu * vcpu,unsigned int index,gva_t eaddr)605 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
606 			gva_t eaddr)
607 {
608 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
609 	struct tlbe *gtlbe =
610 		&vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
611 	u64 pgmask = get_tlb_bytes(gtlbe) - 1;
612 
613 	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
614 }
615 
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)616 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
617 {
618 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
619 	int tlbsel, i;
620 
621 	for (tlbsel = 0; tlbsel < 2; tlbsel++)
622 		for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
623 			kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
624 
625 	/* discard all guest mapping */
626 	_tlbil_all();
627 }
628 
kvmppc_mmu_map(struct kvm_vcpu * vcpu,u64 eaddr,gpa_t gpaddr,unsigned int index)629 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
630 			unsigned int index)
631 {
632 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
633 	int tlbsel = tlbsel_of(index);
634 	int esel = esel_of(index);
635 	int stlbsel, sesel;
636 
637 	switch (tlbsel) {
638 	case 0:
639 		stlbsel = 0;
640 		sesel = esel;
641 		break;
642 
643 	case 1: {
644 		gfn_t gfn = gpaddr >> PAGE_SHIFT;
645 		struct tlbe *gtlbe
646 			= &vcpu_e500->guest_tlb[tlbsel][esel];
647 
648 		stlbsel = 1;
649 		sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
650 		break;
651 	}
652 
653 	default:
654 		BUG();
655 		break;
656 	}
657 	write_host_tlbe(vcpu_e500, stlbsel, sesel);
658 }
659 
kvmppc_e500_tlb_search(struct kvm_vcpu * vcpu,gva_t eaddr,unsigned int pid,int as)660 int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
661 				gva_t eaddr, unsigned int pid, int as)
662 {
663 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
664 	int esel, tlbsel;
665 
666 	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
667 		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
668 		if (esel >= 0)
669 			return index_of(tlbsel, esel);
670 	}
671 
672 	return -1;
673 }
674 
kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 * vcpu_e500)675 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
676 {
677 	struct tlbe *tlbe;
678 
679 	/* Insert large initial mapping for guest. */
680 	tlbe = &vcpu_e500->guest_tlb[1][0];
681 	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
682 	tlbe->mas2 = 0;
683 	tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
684 	tlbe->mas7 = 0;
685 
686 	/* 4K map for serial output. Used by kernel wrapper. */
687 	tlbe = &vcpu_e500->guest_tlb[1][1];
688 	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
689 	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
690 	tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
691 	tlbe->mas7 = 0;
692 }
693 
kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 * vcpu_e500)694 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
695 {
696 	tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
697 
698 	vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
699 	vcpu_e500->guest_tlb[0] =
700 		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
701 	if (vcpu_e500->guest_tlb[0] == NULL)
702 		goto err_out;
703 
704 	vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
705 	vcpu_e500->shadow_tlb[0] =
706 		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
707 	if (vcpu_e500->shadow_tlb[0] == NULL)
708 		goto err_out_guest0;
709 
710 	vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
711 	vcpu_e500->guest_tlb[1] =
712 		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
713 	if (vcpu_e500->guest_tlb[1] == NULL)
714 		goto err_out_shadow0;
715 
716 	vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
717 	vcpu_e500->shadow_tlb[1] =
718 		kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
719 	if (vcpu_e500->shadow_tlb[1] == NULL)
720 		goto err_out_guest1;
721 
722 	vcpu_e500->shadow_pages[0] = (struct page **)
723 		kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
724 	if (vcpu_e500->shadow_pages[0] == NULL)
725 		goto err_out_shadow1;
726 
727 	vcpu_e500->shadow_pages[1] = (struct page **)
728 		kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
729 	if (vcpu_e500->shadow_pages[1] == NULL)
730 		goto err_out_page0;
731 
732 	/* Init TLB configuration register */
733 	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
734 	vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
735 	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
736 	vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
737 
738 	return 0;
739 
740 err_out_page0:
741 	kfree(vcpu_e500->shadow_pages[0]);
742 err_out_shadow1:
743 	kfree(vcpu_e500->shadow_tlb[1]);
744 err_out_guest1:
745 	kfree(vcpu_e500->guest_tlb[1]);
746 err_out_shadow0:
747 	kfree(vcpu_e500->shadow_tlb[0]);
748 err_out_guest0:
749 	kfree(vcpu_e500->guest_tlb[0]);
750 err_out:
751 	return -1;
752 }
753 
kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 * vcpu_e500)754 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
755 {
756 	kfree(vcpu_e500->shadow_pages[1]);
757 	kfree(vcpu_e500->shadow_pages[0]);
758 	kfree(vcpu_e500->shadow_tlb[1]);
759 	kfree(vcpu_e500->guest_tlb[1]);
760 	kfree(vcpu_e500->shadow_tlb[0]);
761 	kfree(vcpu_e500->guest_tlb[0]);
762 }
763