1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 specific pci instructions
4  *
5  * Copyright IBM Corp. 2013
6  */
7 
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/asm-extable.h>
13 #include <asm/facility.h>
14 #include <asm/pci_insn.h>
15 #include <asm/pci_debug.h>
16 #include <asm/pci_io.h>
17 #include <asm/processor.h>
18 
19 #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
20 
21 struct zpci_err_insn_data {
22 	u8 insn;
23 	u8 cc;
24 	u8 status;
25 	union {
26 		struct {
27 			u64 req;
28 			u64 offset;
29 		};
30 		struct {
31 			u64 addr;
32 			u64 len;
33 		};
34 	};
35 } __packed;
36 
zpci_err_insn_req(int lvl,u8 insn,u8 cc,u8 status,u64 req,u64 offset)37 static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
38 				     u64 req, u64 offset)
39 {
40 	struct zpci_err_insn_data data = {
41 		.insn = insn, .cc = cc, .status = status,
42 		.req = req, .offset = offset};
43 
44 	zpci_err_hex_level(lvl, &data, sizeof(data));
45 }
46 
zpci_err_insn_addr(int lvl,u8 insn,u8 cc,u8 status,u64 addr,u64 len)47 static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
48 				      u64 addr, u64 len)
49 {
50 	struct zpci_err_insn_data data = {
51 		.insn = insn, .cc = cc, .status = status,
52 		.addr = addr, .len = len};
53 
54 	zpci_err_hex_level(lvl, &data, sizeof(data));
55 }
56 
57 /* Modify PCI Function Controls */
__mpcifc(u64 req,struct zpci_fib * fib,u8 * status)58 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
59 {
60 	u8 cc;
61 
62 	asm volatile (
63 		"	.insn	rxy,0xe300000000d0,%[req],%[fib]\n"
64 		"	ipm	%[cc]\n"
65 		"	srl	%[cc],28\n"
66 		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
67 		: : "cc");
68 	*status = req >> 24 & 0xff;
69 	return cc;
70 }
71 
zpci_mod_fc(u64 req,struct zpci_fib * fib,u8 * status)72 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
73 {
74 	bool retried = false;
75 	u8 cc;
76 
77 	do {
78 		cc = __mpcifc(req, fib, status);
79 		if (cc == 2) {
80 			msleep(ZPCI_INSN_BUSY_DELAY);
81 			if (!retried) {
82 				zpci_err_insn_req(1, 'M', cc, *status, req, 0);
83 				retried = true;
84 			}
85 		}
86 	} while (cc == 2);
87 
88 	if (cc)
89 		zpci_err_insn_req(0, 'M', cc, *status, req, 0);
90 	else if (retried)
91 		zpci_err_insn_req(1, 'M', cc, *status, req, 0);
92 
93 	return cc;
94 }
95 
96 /* Refresh PCI Translations */
__rpcit(u64 fn,u64 addr,u64 range,u8 * status)97 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
98 {
99 	union register_pair addr_range = {.even = addr, .odd = range};
100 	u8 cc;
101 
102 	asm volatile (
103 		"	.insn	rre,0xb9d30000,%[fn],%[addr_range]\n"
104 		"	ipm	%[cc]\n"
105 		"	srl	%[cc],28\n"
106 		: [cc] "=d" (cc), [fn] "+d" (fn)
107 		: [addr_range] "d" (addr_range.pair)
108 		: "cc");
109 	*status = fn >> 24 & 0xff;
110 	return cc;
111 }
112 
zpci_refresh_trans(u64 fn,u64 addr,u64 range)113 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
114 {
115 	bool retried = false;
116 	u8 cc, status;
117 
118 	do {
119 		cc = __rpcit(fn, addr, range, &status);
120 		if (cc == 2) {
121 			udelay(ZPCI_INSN_BUSY_DELAY);
122 			if (!retried) {
123 				zpci_err_insn_addr(1, 'R', cc, status, addr, range);
124 				retried = true;
125 			}
126 		}
127 	} while (cc == 2);
128 
129 	if (cc)
130 		zpci_err_insn_addr(0, 'R', cc, status, addr, range);
131 	else if (retried)
132 		zpci_err_insn_addr(1, 'R', cc, status, addr, range);
133 
134 	if (cc == 1 && (status == 4 || status == 16))
135 		return -ENOMEM;
136 
137 	return (cc) ? -EIO : 0;
138 }
139 
140 /* Set Interruption Controls */
__zpci_set_irq_ctrl(u16 ctl,u8 isc,union zpci_sic_iib * iib)141 int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
142 {
143 	if (!test_facility(72))
144 		return -EIO;
145 
146 	asm volatile(
147 		".insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
148 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
149 
150 	return 0;
151 }
152 
153 /* PCI Load */
____pcilg(u64 * data,u64 req,u64 offset,u8 * status)154 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
155 {
156 	union register_pair req_off = {.even = req, .odd = offset};
157 	int cc = -ENXIO;
158 	u64 __data;
159 
160 	asm volatile (
161 		"	.insn	rre,0xb9d20000,%[data],%[req_off]\n"
162 		"0:	ipm	%[cc]\n"
163 		"	srl	%[cc],28\n"
164 		"1:\n"
165 		EX_TABLE(0b, 1b)
166 		: [cc] "+d" (cc), [data] "=d" (__data),
167 		  [req_off] "+&d" (req_off.pair) :: "cc");
168 	*status = req_off.even >> 24 & 0xff;
169 	*data = __data;
170 	return cc;
171 }
172 
__pcilg(u64 * data,u64 req,u64 offset,u8 * status)173 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
174 {
175 	u64 __data;
176 	int cc;
177 
178 	cc = ____pcilg(&__data, req, offset, status);
179 	if (!cc)
180 		*data = __data;
181 
182 	return cc;
183 }
184 
__zpci_load(u64 * data,u64 req,u64 offset)185 int __zpci_load(u64 *data, u64 req, u64 offset)
186 {
187 	bool retried = false;
188 	u8 status;
189 	int cc;
190 
191 	do {
192 		cc = __pcilg(data, req, offset, &status);
193 		if (cc == 2) {
194 			udelay(ZPCI_INSN_BUSY_DELAY);
195 			if (!retried) {
196 				zpci_err_insn_req(1, 'l', cc, status, req, offset);
197 				retried = true;
198 			}
199 		}
200 	} while (cc == 2);
201 
202 	if (cc)
203 		zpci_err_insn_req(0, 'l', cc, status, req, offset);
204 	else if (retried)
205 		zpci_err_insn_req(1, 'l', cc, status, req, offset);
206 
207 	return (cc > 0) ? -EIO : cc;
208 }
209 EXPORT_SYMBOL_GPL(__zpci_load);
210 
zpci_load_fh(u64 * data,const volatile void __iomem * addr,unsigned long len)211 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
212 			       unsigned long len)
213 {
214 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
215 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
216 
217 	return __zpci_load(data, req, ZPCI_OFFSET(addr));
218 }
219 
__pcilg_mio(u64 * data,u64 ioaddr,u64 len,u8 * status)220 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
221 {
222 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
223 	int cc = -ENXIO;
224 	u64 __data;
225 
226 	asm volatile (
227 		"       .insn   rre,0xb9d60000,%[data],%[ioaddr_len]\n"
228 		"0:     ipm     %[cc]\n"
229 		"       srl     %[cc],28\n"
230 		"1:\n"
231 		EX_TABLE(0b, 1b)
232 		: [cc] "+d" (cc), [data] "=d" (__data),
233 		  [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
234 	*status = ioaddr_len.odd >> 24 & 0xff;
235 	*data = __data;
236 	return cc;
237 }
238 
zpci_load(u64 * data,const volatile void __iomem * addr,unsigned long len)239 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
240 {
241 	u8 status;
242 	int cc;
243 
244 	if (!static_branch_unlikely(&have_mio))
245 		return zpci_load_fh(data, addr, len);
246 
247 	cc = __pcilg_mio(data, (__force u64) addr, len, &status);
248 	if (cc)
249 		zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
250 
251 	return (cc > 0) ? -EIO : cc;
252 }
253 EXPORT_SYMBOL_GPL(zpci_load);
254 
255 /* PCI Store */
__pcistg(u64 data,u64 req,u64 offset,u8 * status)256 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
257 {
258 	union register_pair req_off = {.even = req, .odd = offset};
259 	int cc = -ENXIO;
260 
261 	asm volatile (
262 		"	.insn	rre,0xb9d00000,%[data],%[req_off]\n"
263 		"0:	ipm	%[cc]\n"
264 		"	srl	%[cc],28\n"
265 		"1:\n"
266 		EX_TABLE(0b, 1b)
267 		: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
268 		: [data] "d" (data)
269 		: "cc");
270 	*status = req_off.even >> 24 & 0xff;
271 	return cc;
272 }
273 
__zpci_store(u64 data,u64 req,u64 offset)274 int __zpci_store(u64 data, u64 req, u64 offset)
275 {
276 	bool retried = false;
277 	u8 status;
278 	int cc;
279 
280 	do {
281 		cc = __pcistg(data, req, offset, &status);
282 		if (cc == 2) {
283 			udelay(ZPCI_INSN_BUSY_DELAY);
284 			if (!retried) {
285 				zpci_err_insn_req(1, 's', cc, status, req, offset);
286 				retried = true;
287 			}
288 		}
289 	} while (cc == 2);
290 
291 	if (cc)
292 		zpci_err_insn_req(0, 's', cc, status, req, offset);
293 	else if (retried)
294 		zpci_err_insn_req(1, 's', cc, status, req, offset);
295 
296 	return (cc > 0) ? -EIO : cc;
297 }
298 EXPORT_SYMBOL_GPL(__zpci_store);
299 
zpci_store_fh(const volatile void __iomem * addr,u64 data,unsigned long len)300 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
301 				unsigned long len)
302 {
303 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
304 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
305 
306 	return __zpci_store(data, req, ZPCI_OFFSET(addr));
307 }
308 
__pcistg_mio(u64 data,u64 ioaddr,u64 len,u8 * status)309 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
310 {
311 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
312 	int cc = -ENXIO;
313 
314 	asm volatile (
315 		"       .insn   rre,0xb9d40000,%[data],%[ioaddr_len]\n"
316 		"0:     ipm     %[cc]\n"
317 		"       srl     %[cc],28\n"
318 		"1:\n"
319 		EX_TABLE(0b, 1b)
320 		: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
321 		: [data] "d" (data)
322 		: "cc", "memory");
323 	*status = ioaddr_len.odd >> 24 & 0xff;
324 	return cc;
325 }
326 
zpci_store(const volatile void __iomem * addr,u64 data,unsigned long len)327 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
328 {
329 	u8 status;
330 	int cc;
331 
332 	if (!static_branch_unlikely(&have_mio))
333 		return zpci_store_fh(addr, data, len);
334 
335 	cc = __pcistg_mio(data, (__force u64) addr, len, &status);
336 	if (cc)
337 		zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
338 
339 	return (cc > 0) ? -EIO : cc;
340 }
341 EXPORT_SYMBOL_GPL(zpci_store);
342 
343 /* PCI Store Block */
__pcistb(const u64 * data,u64 req,u64 offset,u8 * status)344 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
345 {
346 	int cc = -ENXIO;
347 
348 	asm volatile (
349 		"	.insn	rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
350 		"0:	ipm	%[cc]\n"
351 		"	srl	%[cc],28\n"
352 		"1:\n"
353 		EX_TABLE(0b, 1b)
354 		: [cc] "+d" (cc), [req] "+d" (req)
355 		: [offset] "d" (offset), [data] "Q" (*data)
356 		: "cc");
357 	*status = req >> 24 & 0xff;
358 	return cc;
359 }
360 
__zpci_store_block(const u64 * data,u64 req,u64 offset)361 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
362 {
363 	bool retried = false;
364 	u8 status;
365 	int cc;
366 
367 	do {
368 		cc = __pcistb(data, req, offset, &status);
369 		if (cc == 2) {
370 			udelay(ZPCI_INSN_BUSY_DELAY);
371 			if (!retried) {
372 				zpci_err_insn_req(0, 'b', cc, status, req, offset);
373 				retried = true;
374 			}
375 		}
376 	} while (cc == 2);
377 
378 	if (cc)
379 		zpci_err_insn_req(0, 'b', cc, status, req, offset);
380 	else if (retried)
381 		zpci_err_insn_req(1, 'b', cc, status, req, offset);
382 
383 	return (cc > 0) ? -EIO : cc;
384 }
385 EXPORT_SYMBOL_GPL(__zpci_store_block);
386 
zpci_write_block_fh(volatile void __iomem * dst,const void * src,unsigned long len)387 static inline int zpci_write_block_fh(volatile void __iomem *dst,
388 				      const void *src, unsigned long len)
389 {
390 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
391 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
392 	u64 offset = ZPCI_OFFSET(dst);
393 
394 	return __zpci_store_block(src, req, offset);
395 }
396 
__pcistb_mio(const u64 * data,u64 ioaddr,u64 len,u8 * status)397 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
398 {
399 	int cc = -ENXIO;
400 
401 	asm volatile (
402 		"       .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
403 		"0:     ipm     %[cc]\n"
404 		"       srl     %[cc],28\n"
405 		"1:\n"
406 		EX_TABLE(0b, 1b)
407 		: [cc] "+d" (cc), [len] "+d" (len)
408 		: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
409 		: "cc");
410 	*status = len >> 24 & 0xff;
411 	return cc;
412 }
413 
zpci_write_block(volatile void __iomem * dst,const void * src,unsigned long len)414 int zpci_write_block(volatile void __iomem *dst,
415 		     const void *src, unsigned long len)
416 {
417 	u8 status;
418 	int cc;
419 
420 	if (!static_branch_unlikely(&have_mio))
421 		return zpci_write_block_fh(dst, src, len);
422 
423 	cc = __pcistb_mio(src, (__force u64) dst, len, &status);
424 	if (cc)
425 		zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
426 
427 	return (cc > 0) ? -EIO : cc;
428 }
429 EXPORT_SYMBOL_GPL(zpci_write_block);
430 
__pciwb_mio(void)431 static inline void __pciwb_mio(void)
432 {
433 	asm volatile (".insn    rre,0xb9d50000,0,0\n");
434 }
435 
zpci_barrier(void)436 void zpci_barrier(void)
437 {
438 	if (static_branch_likely(&have_mio))
439 		__pciwb_mio();
440 }
441 EXPORT_SYMBOL_GPL(zpci_barrier);
442