1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/pci.h>
4 #include <asm/pnv-ocxl.h>
5 #include <misc/ocxl-config.h>
6 #include "ocxl_internal.h"
7
8 #define EXTRACT_BIT(val, bit) (!!(val & BIT(bit)))
9 #define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s)
10
11 #define OCXL_DVSEC_AFU_IDX_MASK GENMASK(5, 0)
12 #define OCXL_DVSEC_ACTAG_MASK GENMASK(11, 0)
13 #define OCXL_DVSEC_PASID_MASK GENMASK(19, 0)
14 #define OCXL_DVSEC_PASID_LOG_MASK GENMASK(4, 0)
15
16 #define OCXL_DVSEC_TEMPL_VERSION 0x0
17 #define OCXL_DVSEC_TEMPL_NAME 0x4
18 #define OCXL_DVSEC_TEMPL_AFU_VERSION 0x1C
19 #define OCXL_DVSEC_TEMPL_MMIO_GLOBAL 0x20
20 #define OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ 0x28
21 #define OCXL_DVSEC_TEMPL_MMIO_PP 0x30
22 #define OCXL_DVSEC_TEMPL_MMIO_PP_SZ 0x38
23 #define OCXL_DVSEC_TEMPL_ALL_MEM_SZ 0x3C
24 #define OCXL_DVSEC_TEMPL_LPC_MEM_START 0x40
25 #define OCXL_DVSEC_TEMPL_WWID 0x48
26 #define OCXL_DVSEC_TEMPL_LPC_MEM_SZ 0x58
27
28 #define OCXL_MAX_AFU_PER_FUNCTION 64
29 #define OCXL_TEMPL_LEN_1_0 0x58
30 #define OCXL_TEMPL_LEN_1_1 0x60
31 #define OCXL_TEMPL_NAME_LEN 24
32 #define OCXL_CFG_TIMEOUT 3
33
find_dvsec(struct pci_dev * dev,int dvsec_id)34 static int find_dvsec(struct pci_dev *dev, int dvsec_id)
35 {
36 return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id);
37 }
38
find_dvsec_afu_ctrl(struct pci_dev * dev,u8 afu_idx)39 static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
40 {
41 int vsec = 0;
42 u16 vendor, id;
43 u8 idx;
44
45 while ((vsec = pci_find_next_ext_capability(dev, vsec,
46 OCXL_EXT_CAP_ID_DVSEC))) {
47 pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
48 &vendor);
49 pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
50
51 if (vendor == PCI_VENDOR_ID_IBM &&
52 id == OCXL_DVSEC_AFU_CTRL_ID) {
53 pci_read_config_byte(dev,
54 vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX,
55 &idx);
56 if (idx == afu_idx)
57 return vsec;
58 }
59 }
60 return 0;
61 }
62
63 /**
64 * get_function_0() - Find a related PCI device (function 0)
65 * @dev: PCI device to match
66 *
67 * Returns a pointer to the related device, or null if not found
68 */
get_function_0(struct pci_dev * dev)69 static struct pci_dev *get_function_0(struct pci_dev *dev)
70 {
71 unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
72
73 return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
74 dev->bus->number, devfn);
75 }
76
read_pasid(struct pci_dev * dev,struct ocxl_fn_config * fn)77 static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn)
78 {
79 u16 val;
80 int pos;
81
82 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PASID);
83 if (!pos) {
84 /*
85 * PASID capability is not mandatory, but there
86 * shouldn't be any AFU
87 */
88 dev_dbg(&dev->dev, "Function doesn't require any PASID\n");
89 fn->max_pasid_log = -1;
90 goto out;
91 }
92 pci_read_config_word(dev, pos + PCI_PASID_CAP, &val);
93 fn->max_pasid_log = EXTRACT_BITS(val, 8, 12);
94
95 out:
96 dev_dbg(&dev->dev, "PASID capability:\n");
97 dev_dbg(&dev->dev, " Max PASID log = %d\n", fn->max_pasid_log);
98 }
99
read_dvsec_tl(struct pci_dev * dev,struct ocxl_fn_config * fn)100 static int read_dvsec_tl(struct pci_dev *dev, struct ocxl_fn_config *fn)
101 {
102 int pos;
103
104 pos = find_dvsec(dev, OCXL_DVSEC_TL_ID);
105 if (!pos && PCI_FUNC(dev->devfn) == 0) {
106 dev_err(&dev->dev, "Can't find TL DVSEC\n");
107 return -ENODEV;
108 }
109 if (pos && PCI_FUNC(dev->devfn) != 0) {
110 dev_err(&dev->dev, "TL DVSEC is only allowed on function 0\n");
111 return -ENODEV;
112 }
113 fn->dvsec_tl_pos = pos;
114 return 0;
115 }
116
read_dvsec_function(struct pci_dev * dev,struct ocxl_fn_config * fn)117 static int read_dvsec_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
118 {
119 int pos, afu_present;
120 u32 val;
121
122 pos = find_dvsec(dev, OCXL_DVSEC_FUNC_ID);
123 if (!pos) {
124 dev_err(&dev->dev, "Can't find function DVSEC\n");
125 return -ENODEV;
126 }
127 fn->dvsec_function_pos = pos;
128
129 pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val);
130 afu_present = EXTRACT_BIT(val, 31);
131 if (!afu_present) {
132 fn->max_afu_index = -1;
133 dev_dbg(&dev->dev, "Function doesn't define any AFU\n");
134 goto out;
135 }
136 fn->max_afu_index = EXTRACT_BITS(val, 24, 29);
137
138 out:
139 dev_dbg(&dev->dev, "Function DVSEC:\n");
140 dev_dbg(&dev->dev, " Max AFU index = %d\n", fn->max_afu_index);
141 return 0;
142 }
143
read_dvsec_afu_info(struct pci_dev * dev,struct ocxl_fn_config * fn)144 static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn)
145 {
146 int pos;
147
148 if (fn->max_afu_index < 0) {
149 fn->dvsec_afu_info_pos = -1;
150 return 0;
151 }
152
153 pos = find_dvsec(dev, OCXL_DVSEC_AFU_INFO_ID);
154 if (!pos) {
155 dev_err(&dev->dev, "Can't find AFU information DVSEC\n");
156 return -ENODEV;
157 }
158 fn->dvsec_afu_info_pos = pos;
159 return 0;
160 }
161
read_dvsec_vendor(struct pci_dev * dev)162 static int read_dvsec_vendor(struct pci_dev *dev)
163 {
164 int pos;
165 u32 cfg, tlx, dlx, reset_reload;
166
167 /*
168 * vendor specific DVSEC, for IBM images only. Some older
169 * images may not have it
170 *
171 * It's only used on function 0 to specify the version of some
172 * logic blocks and to give access to special registers to
173 * enable host-based flashing.
174 */
175 if (PCI_FUNC(dev->devfn) != 0)
176 return 0;
177
178 pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
179 if (!pos)
180 return 0;
181
182 pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg);
183 pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx);
184 pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx);
185 pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
186 &reset_reload);
187
188 dev_dbg(&dev->dev, "Vendor specific DVSEC:\n");
189 dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg);
190 dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx);
191 dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx);
192 dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload);
193 return 0;
194 }
195
get_dvsec_vendor0(struct pci_dev * dev,struct pci_dev ** dev0,int * out_pos)196 static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
197 int *out_pos)
198 {
199 int pos;
200
201 if (PCI_FUNC(dev->devfn) != 0) {
202 dev = get_function_0(dev);
203 if (!dev)
204 return -1;
205 }
206 pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
207 if (!pos)
208 return -1;
209 *dev0 = dev;
210 *out_pos = pos;
211 return 0;
212 }
213
ocxl_config_get_reset_reload(struct pci_dev * dev,int * val)214 int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val)
215 {
216 struct pci_dev *dev0;
217 u32 reset_reload;
218 int pos;
219
220 if (get_dvsec_vendor0(dev, &dev0, &pos))
221 return -1;
222
223 pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
224 &reset_reload);
225 *val = !!(reset_reload & BIT(0));
226 return 0;
227 }
228
ocxl_config_set_reset_reload(struct pci_dev * dev,int val)229 int ocxl_config_set_reset_reload(struct pci_dev *dev, int val)
230 {
231 struct pci_dev *dev0;
232 u32 reset_reload;
233 int pos;
234
235 if (get_dvsec_vendor0(dev, &dev0, &pos))
236 return -1;
237
238 pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
239 &reset_reload);
240 if (val)
241 reset_reload |= BIT(0);
242 else
243 reset_reload &= ~BIT(0);
244 pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
245 reset_reload);
246 return 0;
247 }
248
validate_function(struct pci_dev * dev,struct ocxl_fn_config * fn)249 static int validate_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
250 {
251 if (fn->max_pasid_log == -1 && fn->max_afu_index >= 0) {
252 dev_err(&dev->dev,
253 "AFUs are defined but no PASIDs are requested\n");
254 return -EINVAL;
255 }
256
257 if (fn->max_afu_index > OCXL_MAX_AFU_PER_FUNCTION) {
258 dev_err(&dev->dev,
259 "Max AFU index out of architectural limit (%d vs %d)\n",
260 fn->max_afu_index, OCXL_MAX_AFU_PER_FUNCTION);
261 return -EINVAL;
262 }
263 return 0;
264 }
265
ocxl_config_read_function(struct pci_dev * dev,struct ocxl_fn_config * fn)266 int ocxl_config_read_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
267 {
268 int rc;
269
270 read_pasid(dev, fn);
271
272 rc = read_dvsec_tl(dev, fn);
273 if (rc) {
274 dev_err(&dev->dev,
275 "Invalid Transaction Layer DVSEC configuration: %d\n",
276 rc);
277 return -ENODEV;
278 }
279
280 rc = read_dvsec_function(dev, fn);
281 if (rc) {
282 dev_err(&dev->dev,
283 "Invalid Function DVSEC configuration: %d\n", rc);
284 return -ENODEV;
285 }
286
287 rc = read_dvsec_afu_info(dev, fn);
288 if (rc) {
289 dev_err(&dev->dev, "Invalid AFU configuration: %d\n", rc);
290 return -ENODEV;
291 }
292
293 rc = read_dvsec_vendor(dev);
294 if (rc) {
295 dev_err(&dev->dev,
296 "Invalid vendor specific DVSEC configuration: %d\n",
297 rc);
298 return -ENODEV;
299 }
300
301 rc = validate_function(dev, fn);
302 return rc;
303 }
304 EXPORT_SYMBOL_GPL(ocxl_config_read_function);
305
read_afu_info(struct pci_dev * dev,struct ocxl_fn_config * fn,int offset,u32 * data)306 static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn,
307 int offset, u32 *data)
308 {
309 u32 val;
310 unsigned long timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT);
311 int pos = fn->dvsec_afu_info_pos;
312
313 /* Protect 'data valid' bit */
314 if (EXTRACT_BIT(offset, 31)) {
315 dev_err(&dev->dev, "Invalid offset in AFU info DVSEC\n");
316 return -EINVAL;
317 }
318
319 pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, offset);
320 pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val);
321 while (!EXTRACT_BIT(val, 31)) {
322 if (time_after_eq(jiffies, timeout)) {
323 dev_err(&dev->dev,
324 "Timeout while reading AFU info DVSEC (offset=%d)\n",
325 offset);
326 return -EBUSY;
327 }
328 cpu_relax();
329 pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val);
330 }
331 pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_DATA, data);
332 return 0;
333 }
334
335 /**
336 * read_template_version() - Read the template version from the AFU
337 * @dev: the device for the AFU
338 * @fn: the AFU offsets
339 * @len: outputs the template length
340 * @version: outputs the major<<8,minor version
341 *
342 * Returns 0 on success, negative on failure
343 */
read_template_version(struct pci_dev * dev,struct ocxl_fn_config * fn,u16 * len,u16 * version)344 static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn,
345 u16 *len, u16 *version)
346 {
347 u32 val32;
348 u8 major, minor;
349 int rc;
350
351 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val32);
352 if (rc)
353 return rc;
354
355 *len = EXTRACT_BITS(val32, 16, 31);
356 major = EXTRACT_BITS(val32, 8, 15);
357 minor = EXTRACT_BITS(val32, 0, 7);
358 *version = (major << 8) + minor;
359 return 0;
360 }
361
ocxl_config_check_afu_index(struct pci_dev * dev,struct ocxl_fn_config * fn,int afu_idx)362 int ocxl_config_check_afu_index(struct pci_dev *dev,
363 struct ocxl_fn_config *fn, int afu_idx)
364 {
365 int rc;
366 u16 templ_version;
367 u16 len, expected_len;
368
369 pci_write_config_byte(dev,
370 fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
371 afu_idx);
372
373 rc = read_template_version(dev, fn, &len, &templ_version);
374 if (rc)
375 return rc;
376
377 /* AFU index map can have holes, in which case we read all 0's */
378 if (!templ_version && !len)
379 return 0;
380
381 dev_dbg(&dev->dev, "AFU descriptor template version %d.%d\n",
382 templ_version >> 8, templ_version & 0xFF);
383
384 switch (templ_version) {
385 case 0x0005: // v0.5 was used prior to the spec approval
386 case 0x0100:
387 expected_len = OCXL_TEMPL_LEN_1_0;
388 break;
389 case 0x0101:
390 expected_len = OCXL_TEMPL_LEN_1_1;
391 break;
392 default:
393 dev_warn(&dev->dev, "Unknown AFU template version %#x\n",
394 templ_version);
395 expected_len = len;
396 }
397 if (len != expected_len)
398 dev_warn(&dev->dev,
399 "Unexpected template length %#x in AFU information, expected %#x for version %#x\n",
400 len, expected_len, templ_version);
401 return 1;
402 }
403
read_afu_name(struct pci_dev * dev,struct ocxl_fn_config * fn,struct ocxl_afu_config * afu)404 static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
405 struct ocxl_afu_config *afu)
406 {
407 int i, rc;
408 u32 val, *ptr;
409
410 BUILD_BUG_ON(OCXL_AFU_NAME_SZ < OCXL_TEMPL_NAME_LEN);
411 for (i = 0; i < OCXL_TEMPL_NAME_LEN; i += 4) {
412 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_NAME + i, &val);
413 if (rc)
414 return rc;
415 ptr = (u32 *) &afu->name[i];
416 *ptr = le32_to_cpu((__force __le32) val);
417 }
418 afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
419 return 0;
420 }
421
read_afu_mmio(struct pci_dev * dev,struct ocxl_fn_config * fn,struct ocxl_afu_config * afu)422 static int read_afu_mmio(struct pci_dev *dev, struct ocxl_fn_config *fn,
423 struct ocxl_afu_config *afu)
424 {
425 int rc;
426 u32 val;
427
428 /*
429 * Global MMIO
430 */
431 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL, &val);
432 if (rc)
433 return rc;
434 afu->global_mmio_bar = EXTRACT_BITS(val, 0, 2);
435 afu->global_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16;
436
437 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL + 4, &val);
438 if (rc)
439 return rc;
440 afu->global_mmio_offset += (u64) val << 32;
441
442 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ, &val);
443 if (rc)
444 return rc;
445 afu->global_mmio_size = val;
446
447 /*
448 * Per-process MMIO
449 */
450 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP, &val);
451 if (rc)
452 return rc;
453 afu->pp_mmio_bar = EXTRACT_BITS(val, 0, 2);
454 afu->pp_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16;
455
456 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP + 4, &val);
457 if (rc)
458 return rc;
459 afu->pp_mmio_offset += (u64) val << 32;
460
461 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP_SZ, &val);
462 if (rc)
463 return rc;
464 afu->pp_mmio_stride = val;
465
466 return 0;
467 }
468
read_afu_control(struct pci_dev * dev,struct ocxl_afu_config * afu)469 static int read_afu_control(struct pci_dev *dev, struct ocxl_afu_config *afu)
470 {
471 int pos;
472 u8 val8;
473 u16 val16;
474
475 pos = find_dvsec_afu_ctrl(dev, afu->idx);
476 if (!pos) {
477 dev_err(&dev->dev, "Can't find AFU control DVSEC for AFU %d\n",
478 afu->idx);
479 return -ENODEV;
480 }
481 afu->dvsec_afu_control_pos = pos;
482
483 pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_SUP, &val8);
484 afu->pasid_supported_log = EXTRACT_BITS(val8, 0, 4);
485
486 pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP, &val16);
487 afu->actag_supported = EXTRACT_BITS(val16, 0, 11);
488 return 0;
489 }
490
char_allowed(int c)491 static bool char_allowed(int c)
492 {
493 /*
494 * Permitted Characters : Alphanumeric, hyphen, underscore, comma
495 */
496 if ((c >= 0x30 && c <= 0x39) /* digits */ ||
497 (c >= 0x41 && c <= 0x5A) /* upper case */ ||
498 (c >= 0x61 && c <= 0x7A) /* lower case */ ||
499 c == 0 /* NULL */ ||
500 c == 0x2D /* - */ ||
501 c == 0x5F /* _ */ ||
502 c == 0x2C /* , */)
503 return true;
504 return false;
505 }
506
validate_afu(struct pci_dev * dev,struct ocxl_afu_config * afu)507 static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu)
508 {
509 int i;
510
511 if (!afu->name[0]) {
512 dev_err(&dev->dev, "Empty AFU name\n");
513 return -EINVAL;
514 }
515 for (i = 0; i < OCXL_TEMPL_NAME_LEN; i++) {
516 if (!char_allowed(afu->name[i])) {
517 dev_err(&dev->dev,
518 "Invalid character in AFU name\n");
519 return -EINVAL;
520 }
521 }
522
523 if (afu->global_mmio_bar != 0 &&
524 afu->global_mmio_bar != 2 &&
525 afu->global_mmio_bar != 4) {
526 dev_err(&dev->dev, "Invalid global MMIO bar number\n");
527 return -EINVAL;
528 }
529 if (afu->pp_mmio_bar != 0 &&
530 afu->pp_mmio_bar != 2 &&
531 afu->pp_mmio_bar != 4) {
532 dev_err(&dev->dev, "Invalid per-process MMIO bar number\n");
533 return -EINVAL;
534 }
535 return 0;
536 }
537
538 /**
539 * read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory
540 * @dev: the device for the AFU
541 * @fn: the AFU offsets
542 * @afu: the AFU struct to populate the LPC metadata into
543 *
544 * Returns 0 on success, negative on failure
545 */
read_afu_lpc_memory_info(struct pci_dev * dev,struct ocxl_fn_config * fn,struct ocxl_afu_config * afu)546 static int read_afu_lpc_memory_info(struct pci_dev *dev,
547 struct ocxl_fn_config *fn,
548 struct ocxl_afu_config *afu)
549 {
550 int rc;
551 u32 val32;
552 u16 templ_version;
553 u16 templ_len;
554 u64 total_mem_size = 0;
555 u64 lpc_mem_size = 0;
556
557 afu->lpc_mem_offset = 0;
558 afu->lpc_mem_size = 0;
559 afu->special_purpose_mem_offset = 0;
560 afu->special_purpose_mem_size = 0;
561 /*
562 * For AFUs following template v1.0, the LPC memory covers the
563 * total memory. Its size is a power of 2.
564 *
565 * For AFUs with template >= v1.01, the total memory size is
566 * still a power of 2, but it is split in 2 parts:
567 * - the LPC memory, whose size can now be anything
568 * - the remainder memory is a special purpose memory, whose
569 * definition is AFU-dependent. It is not accessible through
570 * the usual commands for LPC memory
571 */
572 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_ALL_MEM_SZ, &val32);
573 if (rc)
574 return rc;
575
576 val32 = EXTRACT_BITS(val32, 0, 7);
577 if (!val32)
578 return 0; /* No LPC memory */
579
580 /*
581 * The configuration space spec allows for a memory size of up
582 * to 2^255 bytes.
583 *
584 * Current generation hardware uses 56-bit physical addresses,
585 * but we won't be able to get near close to that, as we won't
586 * have a hole big enough in the memory map. Let it pass in
587 * the driver for now. We'll get an error from the firmware
588 * when trying to configure something too big.
589 */
590 total_mem_size = 1ull << val32;
591
592 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START, &val32);
593 if (rc)
594 return rc;
595
596 afu->lpc_mem_offset = val32;
597
598 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START + 4, &val32);
599 if (rc)
600 return rc;
601
602 afu->lpc_mem_offset |= (u64) val32 << 32;
603
604 rc = read_template_version(dev, fn, &templ_len, &templ_version);
605 if (rc)
606 return rc;
607
608 if (templ_version >= 0x0101) {
609 rc = read_afu_info(dev, fn,
610 OCXL_DVSEC_TEMPL_LPC_MEM_SZ, &val32);
611 if (rc)
612 return rc;
613 lpc_mem_size = val32;
614
615 rc = read_afu_info(dev, fn,
616 OCXL_DVSEC_TEMPL_LPC_MEM_SZ + 4, &val32);
617 if (rc)
618 return rc;
619 lpc_mem_size |= (u64) val32 << 32;
620 } else {
621 lpc_mem_size = total_mem_size;
622 }
623 afu->lpc_mem_size = lpc_mem_size;
624
625 if (lpc_mem_size < total_mem_size) {
626 afu->special_purpose_mem_offset =
627 afu->lpc_mem_offset + lpc_mem_size;
628 afu->special_purpose_mem_size =
629 total_mem_size - lpc_mem_size;
630 }
631 return 0;
632 }
633
ocxl_config_read_afu(struct pci_dev * dev,struct ocxl_fn_config * fn,struct ocxl_afu_config * afu,u8 afu_idx)634 int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
635 struct ocxl_afu_config *afu, u8 afu_idx)
636 {
637 int rc;
638 u32 val32;
639
640 /*
641 * First, we need to write the AFU idx for the AFU we want to
642 * access.
643 */
644 WARN_ON((afu_idx & OCXL_DVSEC_AFU_IDX_MASK) != afu_idx);
645 afu->idx = afu_idx;
646 pci_write_config_byte(dev,
647 fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
648 afu->idx);
649
650 rc = read_afu_name(dev, fn, afu);
651 if (rc)
652 return rc;
653
654 rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_AFU_VERSION, &val32);
655 if (rc)
656 return rc;
657 afu->version_major = EXTRACT_BITS(val32, 24, 31);
658 afu->version_minor = EXTRACT_BITS(val32, 16, 23);
659 afu->afuc_type = EXTRACT_BITS(val32, 14, 15);
660 afu->afum_type = EXTRACT_BITS(val32, 12, 13);
661 afu->profile = EXTRACT_BITS(val32, 0, 7);
662
663 rc = read_afu_mmio(dev, fn, afu);
664 if (rc)
665 return rc;
666
667 rc = read_afu_lpc_memory_info(dev, fn, afu);
668 if (rc)
669 return rc;
670
671 rc = read_afu_control(dev, afu);
672 if (rc)
673 return rc;
674
675 dev_dbg(&dev->dev, "AFU configuration:\n");
676 dev_dbg(&dev->dev, " name = %s\n", afu->name);
677 dev_dbg(&dev->dev, " version = %d.%d\n", afu->version_major,
678 afu->version_minor);
679 dev_dbg(&dev->dev, " global mmio bar = %hhu\n", afu->global_mmio_bar);
680 dev_dbg(&dev->dev, " global mmio offset = %#llx\n",
681 afu->global_mmio_offset);
682 dev_dbg(&dev->dev, " global mmio size = %#x\n", afu->global_mmio_size);
683 dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar);
684 dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset);
685 dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride);
686 dev_dbg(&dev->dev, " lpc_mem offset = %#llx\n", afu->lpc_mem_offset);
687 dev_dbg(&dev->dev, " lpc_mem size = %#llx\n", afu->lpc_mem_size);
688 dev_dbg(&dev->dev, " special purpose mem offset = %#llx\n",
689 afu->special_purpose_mem_offset);
690 dev_dbg(&dev->dev, " special purpose mem size = %#llx\n",
691 afu->special_purpose_mem_size);
692 dev_dbg(&dev->dev, " pasid supported (log) = %u\n",
693 afu->pasid_supported_log);
694 dev_dbg(&dev->dev, " actag supported = %u\n",
695 afu->actag_supported);
696
697 rc = validate_afu(dev, afu);
698 return rc;
699 }
700 EXPORT_SYMBOL_GPL(ocxl_config_read_afu);
701
ocxl_config_get_actag_info(struct pci_dev * dev,u16 * base,u16 * enabled,u16 * supported)702 int ocxl_config_get_actag_info(struct pci_dev *dev, u16 *base, u16 *enabled,
703 u16 *supported)
704 {
705 int rc;
706
707 /*
708 * This is really a simple wrapper for the kernel API, to
709 * avoid an external driver using ocxl as a library to call
710 * platform-dependent code
711 */
712 rc = pnv_ocxl_get_actag(dev, base, enabled, supported);
713 if (rc) {
714 dev_err(&dev->dev, "Can't get actag for device: %d\n", rc);
715 return rc;
716 }
717 return 0;
718 }
719 EXPORT_SYMBOL_GPL(ocxl_config_get_actag_info);
720
ocxl_config_set_afu_actag(struct pci_dev * dev,int pos,int actag_base,int actag_count)721 void ocxl_config_set_afu_actag(struct pci_dev *dev, int pos, int actag_base,
722 int actag_count)
723 {
724 u16 val;
725
726 val = actag_count & OCXL_DVSEC_ACTAG_MASK;
727 pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_EN, val);
728
729 val = actag_base & OCXL_DVSEC_ACTAG_MASK;
730 pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_BASE, val);
731 }
732 EXPORT_SYMBOL_GPL(ocxl_config_set_afu_actag);
733
ocxl_config_get_pasid_info(struct pci_dev * dev,int * count)734 int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count)
735 {
736 return pnv_ocxl_get_pasid_count(dev, count);
737 }
738
ocxl_config_set_afu_pasid(struct pci_dev * dev,int pos,int pasid_base,u32 pasid_count_log)739 void ocxl_config_set_afu_pasid(struct pci_dev *dev, int pos, int pasid_base,
740 u32 pasid_count_log)
741 {
742 u8 val8;
743 u32 val32;
744
745 val8 = pasid_count_log & OCXL_DVSEC_PASID_LOG_MASK;
746 pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_EN, val8);
747
748 pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE,
749 &val32);
750 val32 &= ~OCXL_DVSEC_PASID_MASK;
751 val32 |= pasid_base & OCXL_DVSEC_PASID_MASK;
752 pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE,
753 val32);
754 }
755 EXPORT_SYMBOL_GPL(ocxl_config_set_afu_pasid);
756
ocxl_config_set_afu_state(struct pci_dev * dev,int pos,int enable)757 void ocxl_config_set_afu_state(struct pci_dev *dev, int pos, int enable)
758 {
759 u8 val;
760
761 pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, &val);
762 if (enable)
763 val |= 1;
764 else
765 val &= 0xFE;
766 pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, val);
767 }
768 EXPORT_SYMBOL_GPL(ocxl_config_set_afu_state);
769
ocxl_config_set_TL(struct pci_dev * dev,int tl_dvsec)770 int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec)
771 {
772 u32 val;
773 __be32 *be32ptr;
774 u8 timers;
775 int i, rc;
776 long recv_cap;
777 char *recv_rate;
778
779 /*
780 * Skip on function != 0, as the TL can only be defined on 0
781 */
782 if (PCI_FUNC(dev->devfn) != 0)
783 return 0;
784
785 recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL);
786 if (!recv_rate)
787 return -ENOMEM;
788 /*
789 * The spec defines 64 templates for messages in the
790 * Transaction Layer (TL).
791 *
792 * The host and device each support a subset, so we need to
793 * configure the transmitters on each side to send only
794 * templates the receiver understands, at a rate the receiver
795 * can process. Per the spec, template 0 must be supported by
796 * everybody. That's the template which has been used by the
797 * host and device so far.
798 *
799 * The sending rate limit must be set before the template is
800 * enabled.
801 */
802
803 /*
804 * Device -> host
805 */
806 rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate,
807 PNV_OCXL_TL_RATE_BUF_SIZE);
808 if (rc)
809 goto out;
810
811 for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
812 be32ptr = (__be32 *) &recv_rate[i];
813 pci_write_config_dword(dev,
814 tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i,
815 be32_to_cpu(*be32ptr));
816 }
817 val = recv_cap >> 32;
818 pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val);
819 val = recv_cap & GENMASK(31, 0);
820 pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val);
821
822 /*
823 * Host -> device
824 */
825 for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
826 pci_read_config_dword(dev,
827 tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i,
828 &val);
829 be32ptr = (__be32 *) &recv_rate[i];
830 *be32ptr = cpu_to_be32(val);
831 }
832 pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val);
833 recv_cap = (long) val << 32;
834 pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val);
835 recv_cap |= val;
836
837 rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate),
838 PNV_OCXL_TL_RATE_BUF_SIZE);
839 if (rc)
840 goto out;
841
842 /*
843 * Opencapi commands needing to be retried are classified per
844 * the TL in 2 groups: short and long commands.
845 *
846 * The short back off timer it not used for now. It will be
847 * for opencapi 4.0.
848 *
849 * The long back off timer is typically used when an AFU hits
850 * a page fault but the NPU is already processing one. So the
851 * AFU needs to wait before it can resubmit. Having a value
852 * too low doesn't break anything, but can generate extra
853 * traffic on the link.
854 * We set it to 1.6 us for now. It's shorter than, but in the
855 * same order of magnitude as the time spent to process a page
856 * fault.
857 */
858 timers = 0x2 << 4; /* long timer = 1.6 us */
859 pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS,
860 timers);
861
862 rc = 0;
863 out:
864 kfree(recv_rate);
865 return rc;
866 }
867 EXPORT_SYMBOL_GPL(ocxl_config_set_TL);
868
ocxl_config_terminate_pasid(struct pci_dev * dev,int afu_control,int pasid)869 int ocxl_config_terminate_pasid(struct pci_dev *dev, int afu_control, int pasid)
870 {
871 u32 val;
872 unsigned long timeout;
873
874 pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
875 &val);
876 if (EXTRACT_BIT(val, 20)) {
877 dev_err(&dev->dev,
878 "Can't terminate PASID %#x, previous termination didn't complete\n",
879 pasid);
880 return -EBUSY;
881 }
882
883 val &= ~OCXL_DVSEC_PASID_MASK;
884 val |= pasid & OCXL_DVSEC_PASID_MASK;
885 val |= BIT(20);
886 pci_write_config_dword(dev,
887 afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
888 val);
889
890 timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT);
891 pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
892 &val);
893 while (EXTRACT_BIT(val, 20)) {
894 if (time_after_eq(jiffies, timeout)) {
895 dev_err(&dev->dev,
896 "Timeout while waiting for AFU to terminate PASID %#x\n",
897 pasid);
898 return -EBUSY;
899 }
900 cpu_relax();
901 pci_read_config_dword(dev,
902 afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
903 &val);
904 }
905 return 0;
906 }
907 EXPORT_SYMBOL_GPL(ocxl_config_terminate_pasid);
908
ocxl_config_set_actag(struct pci_dev * dev,int func_dvsec,u32 tag_first,u32 tag_count)909 void ocxl_config_set_actag(struct pci_dev *dev, int func_dvsec, u32 tag_first,
910 u32 tag_count)
911 {
912 u32 val;
913
914 val = (tag_first & OCXL_DVSEC_ACTAG_MASK) << 16;
915 val |= tag_count & OCXL_DVSEC_ACTAG_MASK;
916 pci_write_config_dword(dev, func_dvsec + OCXL_DVSEC_FUNC_OFF_ACTAG,
917 val);
918 }
919 EXPORT_SYMBOL_GPL(ocxl_config_set_actag);
920