1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4 
5 #ifdef CONFIG_PPC_PSERIES
6 
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 
10 #include <asm/hvcall.h>
11 #include <asm/paca.h>
12 #include <asm/page.h>
13 
poll_pending(void)14 static inline long poll_pending(void)
15 {
16 	return plpar_hcall_norets(H_POLL_PENDING);
17 }
18 
get_cede_latency_hint(void)19 static inline u8 get_cede_latency_hint(void)
20 {
21 	return get_lppaca()->cede_latency_hint;
22 }
23 
set_cede_latency_hint(u8 latency_hint)24 static inline void set_cede_latency_hint(u8 latency_hint)
25 {
26 	get_lppaca()->cede_latency_hint = latency_hint;
27 }
28 
cede_processor(void)29 static inline long cede_processor(void)
30 {
31 	/*
32 	 * We cannot call tracepoints inside RCU idle regions which
33 	 * means we must not trace H_CEDE.
34 	 */
35 	return plpar_hcall_norets_notrace(H_CEDE);
36 }
37 
extended_cede_processor(unsigned long latency_hint)38 static inline long extended_cede_processor(unsigned long latency_hint)
39 {
40 	long rc;
41 	u8 old_latency_hint = get_cede_latency_hint();
42 
43 	set_cede_latency_hint(latency_hint);
44 
45 	rc = cede_processor();
46 
47 	/* Ensure that H_CEDE returns with IRQs on */
48 	if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
49 		__hard_irq_enable();
50 
51 	set_cede_latency_hint(old_latency_hint);
52 
53 	return rc;
54 }
55 
vpa_call(unsigned long flags,unsigned long cpu,unsigned long vpa)56 static inline long vpa_call(unsigned long flags, unsigned long cpu,
57 		unsigned long vpa)
58 {
59 	flags = flags << H_VPA_FUNC_SHIFT;
60 
61 	return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
62 }
63 
unregister_vpa(unsigned long cpu)64 static inline long unregister_vpa(unsigned long cpu)
65 {
66 	return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
67 }
68 
register_vpa(unsigned long cpu,unsigned long vpa)69 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
70 {
71 	return vpa_call(H_VPA_REG_VPA, cpu, vpa);
72 }
73 
unregister_slb_shadow(unsigned long cpu)74 static inline long unregister_slb_shadow(unsigned long cpu)
75 {
76 	return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
77 }
78 
register_slb_shadow(unsigned long cpu,unsigned long vpa)79 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
80 {
81 	return vpa_call(H_VPA_REG_SLB, cpu, vpa);
82 }
83 
unregister_dtl(unsigned long cpu)84 static inline long unregister_dtl(unsigned long cpu)
85 {
86 	return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
87 }
88 
register_dtl(unsigned long cpu,unsigned long vpa)89 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
90 {
91 	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
92 }
93 
94 extern void vpa_init(int cpu);
95 
plpar_pte_enter(unsigned long flags,unsigned long hpte_group,unsigned long hpte_v,unsigned long hpte_r,unsigned long * slot)96 static inline long plpar_pte_enter(unsigned long flags,
97 		unsigned long hpte_group, unsigned long hpte_v,
98 		unsigned long hpte_r, unsigned long *slot)
99 {
100 	long rc;
101 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
102 
103 	rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
104 
105 	*slot = retbuf[0];
106 
107 	return rc;
108 }
109 
plpar_pte_remove(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)110 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
111 		unsigned long avpn, unsigned long *old_pteh_ret,
112 		unsigned long *old_ptel_ret)
113 {
114 	long rc;
115 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
116 
117 	rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
118 
119 	*old_pteh_ret = retbuf[0];
120 	*old_ptel_ret = retbuf[1];
121 
122 	return rc;
123 }
124 
125 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_remove_raw(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)126 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
127 		unsigned long avpn, unsigned long *old_pteh_ret,
128 		unsigned long *old_ptel_ret)
129 {
130 	long rc;
131 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
132 
133 	rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
134 
135 	*old_pteh_ret = retbuf[0];
136 	*old_ptel_ret = retbuf[1];
137 
138 	return rc;
139 }
140 
plpar_pte_read(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)141 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
142 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
143 {
144 	long rc;
145 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
146 
147 	rc = plpar_hcall(H_READ, retbuf, flags, ptex);
148 
149 	*old_pteh_ret = retbuf[0];
150 	*old_ptel_ret = retbuf[1];
151 
152 	return rc;
153 }
154 
155 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_read_raw(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)156 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
157 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
158 {
159 	long rc;
160 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
161 
162 	rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
163 
164 	*old_pteh_ret = retbuf[0];
165 	*old_ptel_ret = retbuf[1];
166 
167 	return rc;
168 }
169 
170 /*
171  * ptes must be 8*sizeof(unsigned long)
172  */
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)173 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
174 				    unsigned long *ptes)
175 
176 {
177 	long rc;
178 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
179 
180 	rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
181 
182 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
183 
184 	return rc;
185 }
186 
187 /*
188  * plpar_pte_read_4_raw can be called in real mode.
189  * ptes must be 8*sizeof(unsigned long)
190  */
plpar_pte_read_4_raw(unsigned long flags,unsigned long ptex,unsigned long * ptes)191 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
192 					unsigned long *ptes)
193 
194 {
195 	long rc;
196 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
197 
198 	rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
199 
200 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
201 
202 	return rc;
203 }
204 
plpar_pte_protect(unsigned long flags,unsigned long ptex,unsigned long avpn)205 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
206 		unsigned long avpn)
207 {
208 	return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
209 }
210 
plpar_resize_hpt_prepare(unsigned long flags,unsigned long shift)211 static inline long plpar_resize_hpt_prepare(unsigned long flags,
212 					    unsigned long shift)
213 {
214 	return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
215 }
216 
plpar_resize_hpt_commit(unsigned long flags,unsigned long shift)217 static inline long plpar_resize_hpt_commit(unsigned long flags,
218 					   unsigned long shift)
219 {
220 	return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
221 }
222 
plpar_tce_get(unsigned long liobn,unsigned long ioba,unsigned long * tce_ret)223 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
224 		unsigned long *tce_ret)
225 {
226 	long rc;
227 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
228 
229 	rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
230 
231 	*tce_ret = retbuf[0];
232 
233 	return rc;
234 }
235 
plpar_tce_put(unsigned long liobn,unsigned long ioba,unsigned long tceval)236 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
237 		unsigned long tceval)
238 {
239 	return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
240 }
241 
plpar_tce_put_indirect(unsigned long liobn,unsigned long ioba,unsigned long page,unsigned long count)242 static inline long plpar_tce_put_indirect(unsigned long liobn,
243 		unsigned long ioba, unsigned long page, unsigned long count)
244 {
245 	return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
246 }
247 
plpar_tce_stuff(unsigned long liobn,unsigned long ioba,unsigned long tceval,unsigned long count)248 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
249 		unsigned long tceval, unsigned long count)
250 {
251 	return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
252 }
253 
254 /* Set various resource mode parameters */
plpar_set_mode(unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)255 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
256 		unsigned long value1, unsigned long value2)
257 {
258 	return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
259 }
260 
261 /*
262  * Enable relocation on exceptions on this partition
263  *
264  * Note: this call has a partition wide scope and can take a while to complete.
265  * If it returns H_LONG_BUSY_* it should be retried periodically until it
266  * returns H_SUCCESS.
267  */
enable_reloc_on_exceptions(void)268 static inline long enable_reloc_on_exceptions(void)
269 {
270 	/* mflags = 3: Exceptions at 0xC000000000004000 */
271 	return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
272 }
273 
274 /*
275  * Disable relocation on exceptions on this partition
276  *
277  * Note: this call has a partition wide scope and can take a while to complete.
278  * If it returns H_LONG_BUSY_* it should be retried periodically until it
279  * returns H_SUCCESS.
280  */
disable_reloc_on_exceptions(void)281 static inline long disable_reloc_on_exceptions(void) {
282 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
283 }
284 
285 /*
286  * Take exceptions in big endian mode on this partition
287  *
288  * Note: this call has a partition wide scope and can take a while to complete.
289  * If it returns H_LONG_BUSY_* it should be retried periodically until it
290  * returns H_SUCCESS.
291  */
enable_big_endian_exceptions(void)292 static inline long enable_big_endian_exceptions(void)
293 {
294 	/* mflags = 0: big endian exceptions */
295 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
296 }
297 
298 /*
299  * Take exceptions in little endian mode on this partition
300  *
301  * Note: this call has a partition wide scope and can take a while to complete.
302  * If it returns H_LONG_BUSY_* it should be retried periodically until it
303  * returns H_SUCCESS.
304  */
enable_little_endian_exceptions(void)305 static inline long enable_little_endian_exceptions(void)
306 {
307 	/* mflags = 1: little endian exceptions */
308 	return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
309 }
310 
plpar_set_ciabr(unsigned long ciabr)311 static inline long plpar_set_ciabr(unsigned long ciabr)
312 {
313 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
314 }
315 
plpar_set_watchpoint0(unsigned long dawr0,unsigned long dawrx0)316 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
317 {
318 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
319 }
320 
plpar_set_watchpoint1(unsigned long dawr1,unsigned long dawrx1)321 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
322 {
323 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
324 }
325 
plpar_signal_sys_reset(long cpu)326 static inline long plpar_signal_sys_reset(long cpu)
327 {
328 	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
329 }
330 
plpar_get_cpu_characteristics(struct h_cpu_char_result * p)331 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
332 {
333 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
334 	long rc;
335 
336 	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
337 	if (rc == H_SUCCESS) {
338 		p->character = retbuf[0];
339 		p->behaviour = retbuf[1];
340 	}
341 
342 	return rc;
343 }
344 
345 /*
346  * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
347  *
348  * - Returns H_SUCCESS on success
349  * - For H_BUSY return value, we retry the hcall.
350  * - For any other hcall failures, attempt a full flush once before
351  *   resorting to BUG().
352  *
353  * Note: This hcall is expected to fail only very rarely. The correct
354  * error recovery of killing the process/guest will be eventually
355  * needed.
356  */
pseries_rpt_invalidate(u32 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)357 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
358 					  u64 page_sizes, u64 start, u64 end)
359 {
360 	long rc;
361 	unsigned long all;
362 
363 	while (true) {
364 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
365 					page_sizes, start, end);
366 		if (rc == H_BUSY) {
367 			cpu_relax();
368 			continue;
369 		} else if (rc == H_SUCCESS)
370 			return rc;
371 
372 		/* Flush request failed, try with a full flush once */
373 		if (type & H_RPTI_TYPE_NESTED)
374 			all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
375 		else
376 			all = H_RPTI_TYPE_ALL;
377 retry:
378 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
379 					all, page_sizes, 0, -1UL);
380 		if (rc == H_BUSY) {
381 			cpu_relax();
382 			goto retry;
383 		} else if (rc == H_SUCCESS)
384 			return rc;
385 
386 		BUG();
387 	}
388 }
389 
390 #else /* !CONFIG_PPC_PSERIES */
391 
plpar_set_ciabr(unsigned long ciabr)392 static inline long plpar_set_ciabr(unsigned long ciabr)
393 {
394 	return 0;
395 }
396 
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)397 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
398 				    unsigned long *ptes)
399 {
400 	return 0;
401 }
402 
pseries_rpt_invalidate(u32 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)403 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
404 					  u64 page_sizes, u64 start, u64 end)
405 {
406 	return 0;
407 }
408 
409 #endif /* CONFIG_PPC_PSERIES */
410 
411 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
412