1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
22 
23 #ifdef CONFIG_KVM_BOOK3S_PR
svcpu_get(struct kvm_vcpu * vcpu)24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25 {
26 	preempt_disable();
27 	return &get_paca()->shadow_vcpu;
28 }
29 
svcpu_put(struct kvmppc_book3s_shadow_vcpu * svcpu)30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31 {
32 	preempt_enable();
33 }
34 #endif
35 
36 #define SPAPR_TCE_SHIFT		12
37 
38 #ifdef CONFIG_KVM_BOOK3S_64_HV
39 /* For now use fixed-size 16MB page table */
40 #define HPT_ORDER	24
41 #define HPT_NPTEG	(1ul << (HPT_ORDER - 7))	/* 128B per pteg */
42 #define HPT_NPTE	(HPT_NPTEG << 3)		/* 8 PTEs per PTEG */
43 #define HPT_HASH_MASK	(HPT_NPTEG - 1)
44 #endif
45 
46 #define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
47 
48 /*
49  * We use a lock bit in HPTE dword 0 to synchronize updates and
50  * accesses to each HPTE, and another bit to indicate non-present
51  * HPTEs.
52  */
53 #define HPTE_V_HVLOCK	0x40UL
54 #define HPTE_V_ABSENT	0x20UL
55 
try_lock_hpte(unsigned long * hpte,unsigned long bits)56 static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
57 {
58 	unsigned long tmp, old;
59 
60 	asm volatile("	ldarx	%0,0,%2\n"
61 		     "	and.	%1,%0,%3\n"
62 		     "	bne	2f\n"
63 		     "	ori	%0,%0,%4\n"
64 		     "  stdcx.	%0,0,%2\n"
65 		     "	beq+	2f\n"
66 		     "	li	%1,%3\n"
67 		     "2:	isync"
68 		     : "=&r" (tmp), "=&r" (old)
69 		     : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
70 		     : "cc", "memory");
71 	return old == 0;
72 }
73 
compute_tlbie_rb(unsigned long v,unsigned long r,unsigned long pte_index)74 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
75 					     unsigned long pte_index)
76 {
77 	unsigned long rb, va_low;
78 
79 	rb = (v & ~0x7fUL) << 16;		/* AVA field */
80 	va_low = pte_index >> 3;
81 	if (v & HPTE_V_SECONDARY)
82 		va_low = ~va_low;
83 	/* xor vsid from AVA */
84 	if (!(v & HPTE_V_1TB_SEG))
85 		va_low ^= v >> 12;
86 	else
87 		va_low ^= v >> 24;
88 	va_low &= 0x7ff;
89 	if (v & HPTE_V_LARGE) {
90 		rb |= 1;			/* L field */
91 		if (cpu_has_feature(CPU_FTR_ARCH_206) &&
92 		    (r & 0xff000)) {
93 			/* non-16MB large page, must be 64k */
94 			/* (masks depend on page size) */
95 			rb |= 0x1000;		/* page encoding in LP field */
96 			rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
97 			rb |= (va_low & 0xfe);	/* AVAL field (P7 doesn't seem to care) */
98 		}
99 	} else {
100 		/* 4kB page */
101 		rb |= (va_low & 0x7ff) << 12;	/* remaining 11b of VA */
102 	}
103 	rb |= (v >> 54) & 0x300;		/* B field */
104 	return rb;
105 }
106 
hpte_page_size(unsigned long h,unsigned long l)107 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
108 {
109 	/* only handle 4k, 64k and 16M pages for now */
110 	if (!(h & HPTE_V_LARGE))
111 		return 1ul << 12;		/* 4k page */
112 	if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
113 		return 1ul << 16;		/* 64k page */
114 	if ((l & 0xff000) == 0)
115 		return 1ul << 24;		/* 16M page */
116 	return 0;				/* error */
117 }
118 
hpte_rpn(unsigned long ptel,unsigned long psize)119 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
120 {
121 	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
122 }
123 
hpte_is_writable(unsigned long ptel)124 static inline int hpte_is_writable(unsigned long ptel)
125 {
126 	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
127 
128 	return pp != PP_RXRX && pp != PP_RXXX;
129 }
130 
hpte_make_readonly(unsigned long ptel)131 static inline unsigned long hpte_make_readonly(unsigned long ptel)
132 {
133 	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
134 		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
135 	else
136 		ptel |= PP_RXRX;
137 	return ptel;
138 }
139 
hpte_cache_flags_ok(unsigned long ptel,unsigned long io_type)140 static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
141 {
142 	unsigned int wimg = ptel & HPTE_R_WIMG;
143 
144 	/* Handle SAO */
145 	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
146 	    cpu_has_feature(CPU_FTR_ARCH_206))
147 		wimg = HPTE_R_M;
148 
149 	if (!io_type)
150 		return wimg == HPTE_R_M;
151 
152 	return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
153 }
154 
155 /*
156  * Lock and read a linux PTE.  If it's present and writable, atomically
157  * set dirty and referenced bits and return the PTE, otherwise return 0.
158  */
kvmppc_read_update_linux_pte(pte_t * p,int writing)159 static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
160 {
161 	pte_t pte, tmp;
162 
163 	/* wait until _PAGE_BUSY is clear then set it atomically */
164 	__asm__ __volatile__ (
165 		"1:	ldarx	%0,0,%3\n"
166 		"	andi.	%1,%0,%4\n"
167 		"	bne-	1b\n"
168 		"	ori	%1,%0,%4\n"
169 		"	stdcx.	%1,0,%3\n"
170 		"	bne-	1b"
171 		: "=&r" (pte), "=&r" (tmp), "=m" (*p)
172 		: "r" (p), "i" (_PAGE_BUSY)
173 		: "cc");
174 
175 	if (pte_present(pte)) {
176 		pte = pte_mkyoung(pte);
177 		if (writing && pte_write(pte))
178 			pte = pte_mkdirty(pte);
179 	}
180 
181 	*p = pte;	/* clears _PAGE_BUSY */
182 
183 	return pte;
184 }
185 
186 /* Return HPTE cache control bits corresponding to Linux pte bits */
hpte_cache_bits(unsigned long pte_val)187 static inline unsigned long hpte_cache_bits(unsigned long pte_val)
188 {
189 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
190 	return pte_val & (HPTE_R_W | HPTE_R_I);
191 #else
192 	return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
193 		((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
194 #endif
195 }
196 
hpte_read_permission(unsigned long pp,unsigned long key)197 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
198 {
199 	if (key)
200 		return PP_RWRX <= pp && pp <= PP_RXRX;
201 	return 1;
202 }
203 
hpte_write_permission(unsigned long pp,unsigned long key)204 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
205 {
206 	if (key)
207 		return pp == PP_RWRW;
208 	return pp <= PP_RWRW;
209 }
210 
hpte_get_skey_perm(unsigned long hpte_r,unsigned long amr)211 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
212 {
213 	unsigned long skey;
214 
215 	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
216 		((hpte_r & HPTE_R_KEY_LO) >> 9);
217 	return (amr >> (62 - 2 * skey)) & 3;
218 }
219 
lock_rmap(unsigned long * rmap)220 static inline void lock_rmap(unsigned long *rmap)
221 {
222 	do {
223 		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
224 			cpu_relax();
225 	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
226 }
227 
unlock_rmap(unsigned long * rmap)228 static inline void unlock_rmap(unsigned long *rmap)
229 {
230 	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
231 }
232 
slot_is_aligned(struct kvm_memory_slot * memslot,unsigned long pagesize)233 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
234 				   unsigned long pagesize)
235 {
236 	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
237 
238 	if (pagesize <= PAGE_SIZE)
239 		return 1;
240 	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
241 }
242 
243 #endif /* __ASM_KVM_BOOK3S_64_H__ */
244