1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * pci_dn.c
4 *
5 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 *
7 * PCI manipulation via device_nodes.
8 */
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/string.h>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/of.h>
16
17 #include <asm/io.h>
18 #include <asm/pci-bridge.h>
19 #include <asm/ppc-pci.h>
20 #include <asm/firmware.h>
21 #include <asm/eeh.h>
22
23 /*
24 * The function is used to find the firmware data of one
25 * specific PCI device, which is attached to the indicated
26 * PCI bus. For VFs, their firmware data is linked to that
27 * one of PF's bridge. For other devices, their firmware
28 * data is linked to that of their bridge.
29 */
pci_bus_to_pdn(struct pci_bus * bus)30 static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
31 {
32 struct pci_bus *pbus;
33 struct device_node *dn;
34 struct pci_dn *pdn;
35
36 /*
37 * We probably have virtual bus which doesn't
38 * have associated bridge.
39 */
40 pbus = bus;
41 while (pbus) {
42 if (pci_is_root_bus(pbus) || pbus->self)
43 break;
44
45 pbus = pbus->parent;
46 }
47
48 /*
49 * Except virtual bus, all PCI buses should
50 * have device nodes.
51 */
52 dn = pci_bus_to_OF_node(pbus);
53 pdn = dn ? PCI_DN(dn) : NULL;
54
55 return pdn;
56 }
57
pci_get_pdn_by_devfn(struct pci_bus * bus,int devfn)58 struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
59 int devfn)
60 {
61 struct device_node *dn = NULL;
62 struct pci_dn *parent, *pdn;
63 struct pci_dev *pdev = NULL;
64
65 /* Fast path: fetch from PCI device */
66 list_for_each_entry(pdev, &bus->devices, bus_list) {
67 if (pdev->devfn == devfn) {
68 if (pdev->dev.archdata.pci_data)
69 return pdev->dev.archdata.pci_data;
70
71 dn = pci_device_to_OF_node(pdev);
72 break;
73 }
74 }
75
76 /* Fast path: fetch from device node */
77 pdn = dn ? PCI_DN(dn) : NULL;
78 if (pdn)
79 return pdn;
80
81 /* Slow path: fetch from firmware data hierarchy */
82 parent = pci_bus_to_pdn(bus);
83 if (!parent)
84 return NULL;
85
86 list_for_each_entry(pdn, &parent->child_list, list) {
87 if (pdn->busno == bus->number &&
88 pdn->devfn == devfn)
89 return pdn;
90 }
91
92 return NULL;
93 }
94
pci_get_pdn(struct pci_dev * pdev)95 struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
96 {
97 struct device_node *dn;
98 struct pci_dn *parent, *pdn;
99
100 /* Search device directly */
101 if (pdev->dev.archdata.pci_data)
102 return pdev->dev.archdata.pci_data;
103
104 /* Check device node */
105 dn = pci_device_to_OF_node(pdev);
106 pdn = dn ? PCI_DN(dn) : NULL;
107 if (pdn)
108 return pdn;
109
110 /*
111 * VFs don't have device nodes. We hook their
112 * firmware data to PF's bridge.
113 */
114 parent = pci_bus_to_pdn(pdev->bus);
115 if (!parent)
116 return NULL;
117
118 list_for_each_entry(pdn, &parent->child_list, list) {
119 if (pdn->busno == pdev->bus->number &&
120 pdn->devfn == pdev->devfn)
121 return pdn;
122 }
123
124 return NULL;
125 }
126
127 #ifdef CONFIG_EEH
eeh_dev_init(struct pci_dn * pdn)128 static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
129 {
130 struct eeh_dev *edev;
131
132 /* Allocate EEH device */
133 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
134 if (!edev)
135 return NULL;
136
137 /* Associate EEH device with OF node */
138 pdn->edev = edev;
139 edev->pdn = pdn;
140 edev->bdfn = (pdn->busno << 8) | pdn->devfn;
141 edev->controller = pdn->phb;
142
143 return edev;
144 }
145 #endif /* CONFIG_EEH */
146
147 #ifdef CONFIG_PCI_IOV
add_one_sriov_vf_pdn(struct pci_dn * parent,int busno,int devfn)148 static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
149 int busno, int devfn)
150 {
151 struct pci_dn *pdn;
152
153 /* Except PHB, we always have the parent */
154 if (!parent)
155 return NULL;
156
157 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
158 if (!pdn)
159 return NULL;
160
161 pdn->phb = parent->phb;
162 pdn->parent = parent;
163 pdn->busno = busno;
164 pdn->devfn = devfn;
165 pdn->pe_number = IODA_INVALID_PE;
166 INIT_LIST_HEAD(&pdn->child_list);
167 INIT_LIST_HEAD(&pdn->list);
168 list_add_tail(&pdn->list, &parent->child_list);
169
170 return pdn;
171 }
172
add_sriov_vf_pdns(struct pci_dev * pdev)173 struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
174 {
175 struct pci_dn *parent, *pdn;
176 int i;
177
178 /* Only support IOV for now */
179 if (WARN_ON(!pdev->is_physfn))
180 return NULL;
181
182 /* Check if VFs have been populated */
183 pdn = pci_get_pdn(pdev);
184 if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
185 return NULL;
186
187 pdn->flags |= PCI_DN_FLAG_IOV_VF;
188 parent = pci_bus_to_pdn(pdev->bus);
189 if (!parent)
190 return NULL;
191
192 for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
193 struct eeh_dev *edev __maybe_unused;
194
195 pdn = add_one_sriov_vf_pdn(parent,
196 pci_iov_virtfn_bus(pdev, i),
197 pci_iov_virtfn_devfn(pdev, i));
198 if (!pdn) {
199 dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
200 __func__, i);
201 return NULL;
202 }
203
204 #ifdef CONFIG_EEH
205 /* Create the EEH device for the VF */
206 edev = eeh_dev_init(pdn);
207 BUG_ON(!edev);
208
209 /* FIXME: these should probably be populated by the EEH probe */
210 edev->physfn = pdev;
211 edev->vf_index = i;
212 #endif /* CONFIG_EEH */
213 }
214 return pci_get_pdn(pdev);
215 }
216
remove_sriov_vf_pdns(struct pci_dev * pdev)217 void remove_sriov_vf_pdns(struct pci_dev *pdev)
218 {
219 struct pci_dn *parent;
220 struct pci_dn *pdn, *tmp;
221 int i;
222
223 /* Only support IOV PF for now */
224 if (WARN_ON(!pdev->is_physfn))
225 return;
226
227 /* Check if VFs have been populated */
228 pdn = pci_get_pdn(pdev);
229 if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
230 return;
231
232 pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
233 parent = pci_bus_to_pdn(pdev->bus);
234 if (!parent)
235 return;
236
237 /*
238 * We might introduce flag to pci_dn in future
239 * so that we can release VF's firmware data in
240 * a batch mode.
241 */
242 for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
243 struct eeh_dev *edev __maybe_unused;
244
245 list_for_each_entry_safe(pdn, tmp,
246 &parent->child_list, list) {
247 if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
248 pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
249 continue;
250
251 #ifdef CONFIG_EEH
252 /*
253 * Release EEH state for this VF. The PCI core
254 * has already torn down the pci_dev for this VF, but
255 * we're responsible to removing the eeh_dev since it
256 * has the same lifetime as the pci_dn that spawned it.
257 */
258 edev = pdn_to_eeh_dev(pdn);
259 if (edev) {
260 /*
261 * We allocate pci_dn's for the totalvfs count,
262 * but only only the vfs that were activated
263 * have a configured PE.
264 */
265 if (edev->pe)
266 eeh_pe_tree_remove(edev);
267
268 pdn->edev = NULL;
269 kfree(edev);
270 }
271 #endif /* CONFIG_EEH */
272
273 if (!list_empty(&pdn->list))
274 list_del(&pdn->list);
275
276 kfree(pdn);
277 }
278 }
279 }
280 #endif /* CONFIG_PCI_IOV */
281
pci_add_device_node_info(struct pci_controller * hose,struct device_node * dn)282 struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
283 struct device_node *dn)
284 {
285 const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
286 const __be32 *regs;
287 struct device_node *parent;
288 struct pci_dn *pdn;
289 #ifdef CONFIG_EEH
290 struct eeh_dev *edev;
291 #endif
292
293 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
294 if (pdn == NULL)
295 return NULL;
296 dn->data = pdn;
297 pdn->phb = hose;
298 pdn->pe_number = IODA_INVALID_PE;
299 regs = of_get_property(dn, "reg", NULL);
300 if (regs) {
301 u32 addr = of_read_number(regs, 1);
302
303 /* First register entry is addr (00BBSS00) */
304 pdn->busno = (addr >> 16) & 0xff;
305 pdn->devfn = (addr >> 8) & 0xff;
306 }
307
308 /* vendor/device IDs and class code */
309 regs = of_get_property(dn, "vendor-id", NULL);
310 pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
311 regs = of_get_property(dn, "device-id", NULL);
312 pdn->device_id = regs ? of_read_number(regs, 1) : 0;
313 regs = of_get_property(dn, "class-code", NULL);
314 pdn->class_code = regs ? of_read_number(regs, 1) : 0;
315
316 /* Extended config space */
317 pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
318
319 /* Create EEH device */
320 #ifdef CONFIG_EEH
321 edev = eeh_dev_init(pdn);
322 if (!edev) {
323 kfree(pdn);
324 return NULL;
325 }
326 #endif
327
328 /* Attach to parent node */
329 INIT_LIST_HEAD(&pdn->child_list);
330 INIT_LIST_HEAD(&pdn->list);
331 parent = of_get_parent(dn);
332 pdn->parent = parent ? PCI_DN(parent) : NULL;
333 if (pdn->parent)
334 list_add_tail(&pdn->list, &pdn->parent->child_list);
335
336 return pdn;
337 }
338 EXPORT_SYMBOL_GPL(pci_add_device_node_info);
339
pci_remove_device_node_info(struct device_node * dn)340 void pci_remove_device_node_info(struct device_node *dn)
341 {
342 struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL;
343 struct device_node *parent;
344 struct pci_dev *pdev;
345 #ifdef CONFIG_EEH
346 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
347
348 if (edev)
349 edev->pdn = NULL;
350 #endif
351
352 if (!pdn)
353 return;
354
355 WARN_ON(!list_empty(&pdn->child_list));
356 list_del(&pdn->list);
357
358 /* Drop the parent pci_dn's ref to our backing dt node */
359 parent = of_get_parent(dn);
360 if (parent)
361 of_node_put(parent);
362
363 /*
364 * At this point we *might* still have a pci_dev that was
365 * instantiated from this pci_dn. So defer free()ing it until
366 * the pci_dev's release function is called.
367 */
368 pdev = pci_get_domain_bus_and_slot(pdn->phb->global_number,
369 pdn->busno, pdn->devfn);
370 if (pdev) {
371 /* NB: pdev has a ref to dn */
372 pci_dbg(pdev, "marked pdn (from %pOF) as dead\n", dn);
373 pdn->flags |= PCI_DN_FLAG_DEAD;
374 } else {
375 dn->data = NULL;
376 kfree(pdn);
377 }
378
379 pci_dev_put(pdev);
380 }
381 EXPORT_SYMBOL_GPL(pci_remove_device_node_info);
382
383 /*
384 * Traverse a device tree stopping each PCI device in the tree.
385 * This is done depth first. As each node is processed, a "pre"
386 * function is called and the children are processed recursively.
387 *
388 * The "pre" func returns a value. If non-zero is returned from
389 * the "pre" func, the traversal stops and this value is returned.
390 * This return value is useful when using traverse as a method of
391 * finding a device.
392 *
393 * NOTE: we do not run the func for devices that do not appear to
394 * be PCI except for the start node which we assume (this is good
395 * because the start node is often a phb which may be missing PCI
396 * properties).
397 * We use the class-code as an indicator. If we run into
398 * one of these nodes we also assume its siblings are non-pci for
399 * performance.
400 */
pci_traverse_device_nodes(struct device_node * start,void * (* fn)(struct device_node *,void *),void * data)401 void *pci_traverse_device_nodes(struct device_node *start,
402 void *(*fn)(struct device_node *, void *),
403 void *data)
404 {
405 struct device_node *dn, *nextdn;
406 void *ret;
407
408 /* We started with a phb, iterate all childs */
409 for (dn = start->child; dn; dn = nextdn) {
410 const __be32 *classp;
411 u32 class = 0;
412
413 nextdn = NULL;
414 classp = of_get_property(dn, "class-code", NULL);
415 if (classp)
416 class = of_read_number(classp, 1);
417
418 if (fn) {
419 ret = fn(dn, data);
420 if (ret)
421 return ret;
422 }
423
424 /* If we are a PCI bridge, go down */
425 if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
426 (class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
427 /* Depth first...do children */
428 nextdn = dn->child;
429 else if (dn->sibling)
430 /* ok, try next sibling instead. */
431 nextdn = dn->sibling;
432 if (!nextdn) {
433 /* Walk up to next valid sibling. */
434 do {
435 dn = dn->parent;
436 if (dn == start)
437 return NULL;
438 } while (dn->sibling == NULL);
439 nextdn = dn->sibling;
440 }
441 }
442 return NULL;
443 }
444 EXPORT_SYMBOL_GPL(pci_traverse_device_nodes);
445
add_pdn(struct device_node * dn,void * data)446 static void *add_pdn(struct device_node *dn, void *data)
447 {
448 struct pci_controller *hose = data;
449 struct pci_dn *pdn;
450
451 pdn = pci_add_device_node_info(hose, dn);
452 if (!pdn)
453 return ERR_PTR(-ENOMEM);
454
455 return NULL;
456 }
457
458 /**
459 * pci_devs_phb_init_dynamic - setup pci devices under this PHB
460 * phb: pci-to-host bridge (top-level bridge connecting to cpu)
461 *
462 * This routine is called both during boot, (before the memory
463 * subsystem is set up, before kmalloc is valid) and during the
464 * dynamic lpar operation of adding a PHB to a running system.
465 */
pci_devs_phb_init_dynamic(struct pci_controller * phb)466 void pci_devs_phb_init_dynamic(struct pci_controller *phb)
467 {
468 struct device_node *dn = phb->dn;
469 struct pci_dn *pdn;
470
471 /* PHB nodes themselves must not match */
472 pdn = pci_add_device_node_info(phb, dn);
473 if (pdn) {
474 pdn->devfn = pdn->busno = -1;
475 pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
476 pdn->phb = phb;
477 phb->pci_data = pdn;
478 }
479
480 /* Update dn->phb ptrs for new phb and children devices */
481 pci_traverse_device_nodes(dn, add_pdn, phb);
482 }
483
pci_dev_pdn_setup(struct pci_dev * pdev)484 static void pci_dev_pdn_setup(struct pci_dev *pdev)
485 {
486 struct pci_dn *pdn;
487
488 if (pdev->dev.archdata.pci_data)
489 return;
490
491 /* Setup the fast path */
492 pdn = pci_get_pdn(pdev);
493 pdev->dev.archdata.pci_data = pdn;
494 }
495 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
496