1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2018 Solarflare Communications Inc.
5 * Copyright 2019-2022 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11
12 #include "net_driver.h"
13 #include <linux/module.h>
14 #include <linux/aer.h>
15 #include "efx_common.h"
16 #include "efx_channels.h"
17 #include "io.h"
18 #include "ef100_nic.h"
19 #include "ef100_netdev.h"
20 #include "ef100_sriov.h"
21 #include "ef100_regs.h"
22 #include "ef100.h"
23
24 #define EFX_EF100_PCI_DEFAULT_BAR 2
25
26 /* Number of bytes at start of vendor specified extended capability that indicate
27 * that the capability is vendor specified. i.e. offset from value returned by
28 * pci_find_next_ext_capability() to beginning of vendor specified capability
29 * header.
30 */
31 #define PCI_EXT_CAP_HDR_LENGTH 4
32
33 /* Expected size of a Xilinx continuation address table entry. */
34 #define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16
35
36 struct ef100_func_ctl_window {
37 bool valid;
38 unsigned int bar;
39 u64 offset;
40 };
41
42 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
43 struct ef100_func_ctl_window *result);
44
45 /* Number of bytes to offset when reading bit position x with dword accessors. */
46 #define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3)
47
48 #define EXTRACT_BITS(x, lbn, width) \
49 (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1))
50
_ef100_pci_get_bar_bits_with_width(struct efx_nic * efx,int structure_start,int lbn,int width)51 static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx,
52 int structure_start,
53 int lbn, int width)
54 {
55 efx_dword_t dword;
56
57 efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn));
58
59 return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width);
60 }
61
62 #define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \
63 _ef100_pci_get_bar_bits_with_width(efx, entry_location, \
64 ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \
65 ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH)
66
ef100_pci_parse_ef100_entry(struct efx_nic * efx,int entry_location,struct ef100_func_ctl_window * result)67 static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location,
68 struct ef100_func_ctl_window *result)
69 {
70 u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) <<
71 ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT;
72 u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR);
73
74 netif_dbg(efx, probe, efx->net_dev,
75 "Found EF100 function control window bar=%d offset=0x%llx\n",
76 bar, offset);
77
78 if (result->valid) {
79 netif_err(efx, probe, efx->net_dev,
80 "Duplicated EF100 table entry.\n");
81 return -EINVAL;
82 }
83
84 if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM ||
85 bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) {
86 netif_err(efx, probe, efx->net_dev,
87 "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n",
88 bar);
89 return -EINVAL;
90 }
91
92 result->bar = bar;
93 result->offset = offset;
94 result->valid = true;
95 return 0;
96 }
97
ef100_pci_does_bar_overflow(struct efx_nic * efx,int bar,u64 next_entry)98 static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar,
99 u64 next_entry)
100 {
101 return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE >
102 pci_resource_len(efx->pci_dev, bar);
103 }
104
105 /* Parse a Xilinx capabilities table entry describing a continuation to a new
106 * sub-table.
107 */
ef100_pci_parse_continue_entry(struct efx_nic * efx,int entry_location,struct ef100_func_ctl_window * result)108 static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location,
109 struct ef100_func_ctl_window *result)
110 {
111 unsigned int previous_bar;
112 efx_oword_t entry;
113 u64 offset;
114 int rc = 0;
115 u32 bar;
116
117 efx_reado(efx, &entry, entry_location);
118
119 bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR);
120
121 offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) <<
122 ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT;
123
124 previous_bar = efx->mem_bar;
125
126 if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM ||
127 bar == ESE_GZ_VSEC_BAR_NUM_INVALID) {
128 netif_err(efx, probe, efx->net_dev,
129 "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
130 bar);
131 return -EINVAL;
132 }
133
134 if (bar != previous_bar) {
135 efx_fini_io(efx);
136
137 if (ef100_pci_does_bar_overflow(efx, bar, offset)) {
138 netif_err(efx, probe, efx->net_dev,
139 "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
140 bar, offset);
141 return -EINVAL;
142 }
143
144 /* Temporarily map new BAR. */
145 rc = efx_init_io(efx, bar,
146 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
147 pci_resource_len(efx->pci_dev, bar));
148 if (rc) {
149 netif_err(efx, probe, efx->net_dev,
150 "Mapping new BAR for Xilinx table failed, rc=%d\n", rc);
151 return rc;
152 }
153 }
154
155 rc = ef100_pci_walk_xilinx_table(efx, offset, result);
156 if (rc)
157 return rc;
158
159 if (bar != previous_bar) {
160 efx_fini_io(efx);
161
162 /* Put old BAR back. */
163 rc = efx_init_io(efx, previous_bar,
164 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
165 pci_resource_len(efx->pci_dev, previous_bar));
166 if (rc) {
167 netif_err(efx, probe, efx->net_dev,
168 "Putting old BAR back failed, rc=%d\n", rc);
169 return rc;
170 }
171 }
172
173 return 0;
174 }
175
176 /* Iterate over the Xilinx capabilities table in the currently mapped BAR and
177 * call ef100_pci_parse_ef100_entry() on any EF100 entries and
178 * ef100_pci_parse_continue_entry() on any table continuations.
179 */
ef100_pci_walk_xilinx_table(struct efx_nic * efx,u64 offset,struct ef100_func_ctl_window * result)180 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset,
181 struct ef100_func_ctl_window *result)
182 {
183 u64 current_entry = offset;
184 int rc = 0;
185
186 while (true) {
187 u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT);
188 u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST);
189 u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV);
190 u32 entry_size;
191
192 if (id == ESE_GZ_CFGBAR_ENTRY_LAST)
193 return 0;
194
195 entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE);
196
197 netif_dbg(efx, probe, efx->net_dev,
198 "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n",
199 id, entry_size, current_entry, efx->mem_bar);
200
201 if (entry_size < sizeof(u32) * 2) {
202 netif_err(efx, probe, efx->net_dev,
203 "Xilinx table entry too short len=0x%x\n", entry_size);
204 return -EINVAL;
205 }
206
207 switch (id) {
208 case ESE_GZ_CFGBAR_ENTRY_EF100:
209 if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 ||
210 entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) {
211 netif_err(efx, probe, efx->net_dev,
212 "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
213 entry_size, rev);
214 return -EINVAL;
215 }
216
217 rc = ef100_pci_parse_ef100_entry(efx, current_entry,
218 result);
219 if (rc)
220 return rc;
221 break;
222 case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR:
223 if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) {
224 netif_err(efx, probe, efx->net_dev,
225 "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n",
226 entry_size, rev);
227 return -EINVAL;
228 }
229
230 rc = ef100_pci_parse_continue_entry(efx, current_entry, result);
231 if (rc)
232 return rc;
233 break;
234 default:
235 /* Ignore unknown table entries. */
236 break;
237 }
238
239 if (last)
240 return 0;
241
242 current_entry += entry_size;
243
244 if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) {
245 netif_err(efx, probe, efx->net_dev,
246 "Xilinx table overrun at position=0x%llx.\n",
247 current_entry);
248 return -EINVAL;
249 }
250 }
251 }
252
_ef100_pci_get_config_bits_with_width(struct efx_nic * efx,int structure_start,int lbn,int width,u32 * result)253 static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx,
254 int structure_start, int lbn,
255 int width, u32 *result)
256 {
257 int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn);
258 u32 temp;
259
260 rc = pci_read_config_dword(efx->pci_dev, pos, &temp);
261 if (rc) {
262 netif_err(efx, probe, efx->net_dev,
263 "Failed to read PCI config dword at %d\n",
264 pos);
265 return rc;
266 }
267
268 *result = EXTRACT_BITS(temp, lbn, width);
269
270 return 0;
271 }
272
273 #define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \
274 _ef100_pci_get_config_bits_with_width(efx, entry_location, \
275 ESF_GZ_VSEC_ ## bitdef ## _LBN, \
276 ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result)
277
278 /* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed
279 * to by this PCI_EXT_CAP_ID_VNDR.
280 */
ef100_pci_parse_xilinx_cap(struct efx_nic * efx,int vndr_cap,bool has_offset_hi,struct ef100_func_ctl_window * result)281 static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap,
282 bool has_offset_hi,
283 struct ef100_func_ctl_window *result)
284 {
285 u32 offset_high = 0;
286 u32 offset_lo = 0;
287 u64 offset = 0;
288 u32 bar = 0;
289 int rc = 0;
290
291 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar);
292 if (rc) {
293 netif_err(efx, probe, efx->net_dev,
294 "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n",
295 rc);
296 return rc;
297 }
298
299 if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM ||
300 bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) {
301 netif_err(efx, probe, efx->net_dev,
302 "Bad BAR value of %d in Xilinx capabilities sub-table.\n",
303 bar);
304 return -EINVAL;
305 }
306
307 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo);
308 if (rc) {
309 netif_err(efx, probe, efx->net_dev,
310 "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n",
311 rc);
312 return rc;
313 }
314
315 /* Get optional extension to 64bit offset. */
316 if (has_offset_hi) {
317 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high);
318 if (rc) {
319 netif_err(efx, probe, efx->net_dev,
320 "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n",
321 rc);
322 return rc;
323 }
324 }
325
326 offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) |
327 (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT);
328
329 if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) {
330 netif_err(efx, probe, efx->net_dev,
331 "Xilinx table will overrun BAR[%d] offset=0x%llx\n",
332 bar, offset);
333 return -EINVAL;
334 }
335
336 /* Temporarily map BAR. */
337 rc = efx_init_io(efx, bar,
338 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
339 pci_resource_len(efx->pci_dev, bar));
340 if (rc) {
341 netif_err(efx, probe, efx->net_dev,
342 "efx_init_io failed, rc=%d\n", rc);
343 return rc;
344 }
345
346 rc = ef100_pci_walk_xilinx_table(efx, offset, result);
347
348 /* Unmap temporarily mapped BAR. */
349 efx_fini_io(efx);
350 return rc;
351 }
352
353 /* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR
354 * capability.
355 */
ef100_pci_find_func_ctrl_window(struct efx_nic * efx,struct ef100_func_ctl_window * result)356 static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
357 struct ef100_func_ctl_window *result)
358 {
359 int num_xilinx_caps = 0;
360 int cap = 0;
361
362 result->valid = false;
363
364 while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) {
365 int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH;
366 u32 vsec_ver = 0;
367 u32 vsec_len = 0;
368 u32 vsec_id = 0;
369 int rc = 0;
370
371 num_xilinx_caps++;
372
373 rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id);
374 if (rc) {
375 netif_err(efx, probe, efx->net_dev,
376 "Failed to read ESF_GZ_VSEC_ID, rc=%d\n",
377 rc);
378 return rc;
379 }
380
381 rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver);
382 if (rc) {
383 netif_err(efx, probe, efx->net_dev,
384 "Failed to read ESF_GZ_VSEC_VER, rc=%d\n",
385 rc);
386 return rc;
387 }
388
389 /* Get length of whole capability - i.e. starting at cap */
390 rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len);
391 if (rc) {
392 netif_err(efx, probe, efx->net_dev,
393 "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n",
394 rc);
395 return rc;
396 }
397
398 if (vsec_id == ESE_GZ_XILINX_VSEC_ID &&
399 vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR &&
400 vsec_len >= ESE_GZ_VSEC_LEN_MIN) {
401 bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT);
402
403 rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap,
404 has_offset_hi, result);
405 if (rc)
406 return rc;
407 }
408 }
409
410 if (num_xilinx_caps && !result->valid) {
411 netif_err(efx, probe, efx->net_dev,
412 "Seen %d Xilinx tables, but no EF100 entry.\n",
413 num_xilinx_caps);
414 return -EINVAL;
415 }
416
417 return 0;
418 }
419
420 /* Final NIC shutdown
421 * This is called only at module unload (or hotplug removal). A PF can call
422 * this on its VFs to ensure they are unbound first.
423 */
ef100_pci_remove(struct pci_dev * pci_dev)424 static void ef100_pci_remove(struct pci_dev *pci_dev)
425 {
426 struct efx_nic *efx;
427
428 efx = pci_get_drvdata(pci_dev);
429 if (!efx)
430 return;
431
432 rtnl_lock();
433 dev_close(efx->net_dev);
434 rtnl_unlock();
435
436 /* Unregistering our netdev notifier triggers unbinding of TC indirect
437 * blocks, so we have to do it before PCI removal.
438 */
439 unregister_netdevice_notifier(&efx->netdev_notifier);
440 #if defined(CONFIG_SFC_SRIOV)
441 if (!efx->type->is_vf)
442 efx_ef100_pci_sriov_disable(efx);
443 #endif
444 ef100_remove(efx);
445 efx_fini_io(efx);
446 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
447
448 pci_set_drvdata(pci_dev, NULL);
449 efx_fini_struct(efx);
450 free_netdev(efx->net_dev);
451
452 pci_disable_pcie_error_reporting(pci_dev);
453 };
454
ef100_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * entry)455 static int ef100_pci_probe(struct pci_dev *pci_dev,
456 const struct pci_device_id *entry)
457 {
458 struct ef100_func_ctl_window fcw = { 0 };
459 struct net_device *net_dev;
460 struct efx_nic *efx;
461 int rc;
462
463 /* Allocate and initialise a struct net_device and struct efx_nic */
464 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
465 if (!net_dev)
466 return -ENOMEM;
467 efx = netdev_priv(net_dev);
468 efx->type = (const struct efx_nic_type *)entry->driver_data;
469
470 pci_set_drvdata(pci_dev, efx);
471 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
472 rc = efx_init_struct(efx, pci_dev, net_dev);
473 if (rc)
474 goto fail;
475
476 efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
477 netif_info(efx, probe, efx->net_dev,
478 "Solarflare EF100 NIC detected\n");
479
480 rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
481 if (rc) {
482 netif_err(efx, probe, efx->net_dev,
483 "Error looking for ef100 function control window, rc=%d\n",
484 rc);
485 goto fail;
486 }
487
488 if (!fcw.valid) {
489 /* Extended capability not found - use defaults. */
490 fcw.bar = EFX_EF100_PCI_DEFAULT_BAR;
491 fcw.offset = 0;
492 fcw.valid = true;
493 }
494
495 if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
496 netif_err(efx, probe, efx->net_dev,
497 "Func control window overruns BAR\n");
498 rc = -EIO;
499 goto fail;
500 }
501
502 /* Set up basic I/O (BAR mappings etc) */
503 rc = efx_init_io(efx, fcw.bar,
504 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
505 pci_resource_len(efx->pci_dev, fcw.bar));
506 if (rc)
507 goto fail;
508
509 efx->reg_base = fcw.offset;
510
511 efx->netdev_notifier.notifier_call = ef100_netdev_event;
512 rc = register_netdevice_notifier(&efx->netdev_notifier);
513 if (rc) {
514 netif_err(efx, probe, efx->net_dev,
515 "Failed to register netdevice notifier, rc=%d\n", rc);
516 goto fail;
517 }
518
519 rc = efx->type->probe(efx);
520 if (rc)
521 goto fail;
522
523 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
524
525 return 0;
526
527 fail:
528 ef100_pci_remove(pci_dev);
529 return rc;
530 }
531
532 #ifdef CONFIG_SFC_SRIOV
ef100_pci_sriov_configure(struct pci_dev * dev,int num_vfs)533 static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
534 {
535 struct efx_nic *efx = pci_get_drvdata(dev);
536 int rc;
537
538 if (efx->type->sriov_configure) {
539 rc = efx->type->sriov_configure(efx, num_vfs);
540 if (rc)
541 return rc;
542 else
543 return num_vfs;
544 }
545 return -ENOENT;
546 }
547 #endif
548
549 /* PCI device ID table */
550 static const struct pci_device_id ef100_pci_table[] = {
551 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
552 .driver_data = (unsigned long) &ef100_pf_nic_type },
553 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */
554 .driver_data = (unsigned long) &ef100_vf_nic_type },
555 {0} /* end of list */
556 };
557
558 struct pci_driver ef100_pci_driver = {
559 .name = "sfc_ef100",
560 .id_table = ef100_pci_table,
561 .probe = ef100_pci_probe,
562 .remove = ef100_pci_remove,
563 #ifdef CONFIG_SFC_SRIOV
564 .sriov_configure = ef100_pci_sriov_configure,
565 #endif
566 .err_handler = &efx_err_handlers,
567 };
568
569 MODULE_DEVICE_TABLE(pci, ef100_pci_table);
570