1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36
37 DEFINE_MUTEX(pci_slot_mutex);
38
39 const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43
44 int isa_dma_bridge_buggy;
45 EXPORT_SYMBOL(isa_dma_bridge_buggy);
46
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49
50 unsigned int pci_pm_d3hot_delay;
51
52 static void pci_pme_list_scan(struct work_struct *work);
53
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58 struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61 };
62
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64
pci_dev_d3_sleep(struct pci_dev * dev)65 static void pci_dev_d3_sleep(struct pci_dev *dev)
66 {
67 unsigned int delay = dev->d3hot_delay;
68
69 if (delay < pci_pm_d3hot_delay)
70 delay = pci_pm_d3hot_delay;
71
72 if (delay)
73 msleep(delay);
74 }
75
pci_reset_supported(struct pci_dev * dev)76 bool pci_reset_supported(struct pci_dev *dev)
77 {
78 return dev->reset_methods[0] != 0;
79 }
80
81 #ifdef CONFIG_PCI_DOMAINS
82 int pci_domains_supported = 1;
83 #endif
84
85 #define DEFAULT_CARDBUS_IO_SIZE (256)
86 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
87 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
88 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
89 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
90
91 #define DEFAULT_HOTPLUG_IO_SIZE (256)
92 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
93 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
94 /* hpiosize=nn can override this */
95 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
96 /*
97 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
98 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
99 * pci=hpmemsize=nnM overrides both
100 */
101 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
102 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
103
104 #define DEFAULT_HOTPLUG_BUS_SIZE 1
105 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
106
107
108 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
109 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
110 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
111 #elif defined CONFIG_PCIE_BUS_SAFE
112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
113 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
114 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
115 #elif defined CONFIG_PCIE_BUS_PEER2PEER
116 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
117 #else
118 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
119 #endif
120
121 /*
122 * The default CLS is used if arch didn't set CLS explicitly and not
123 * all pci devices agree on the same value. Arch can override either
124 * the dfl or actual value as it sees fit. Don't forget this is
125 * measured in 32-bit words, not bytes.
126 */
127 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
128 u8 pci_cache_line_size;
129
130 /*
131 * If we set up a device for bus mastering, we need to check the latency
132 * timer as certain BIOSes forget to set it properly.
133 */
134 unsigned int pcibios_max_latency = 255;
135
136 /* If set, the PCIe ARI capability will not be used. */
137 static bool pcie_ari_disabled;
138
139 /* If set, the PCIe ATS capability will not be used. */
140 static bool pcie_ats_disabled;
141
142 /* If set, the PCI config space of each device is printed during boot. */
143 bool pci_early_dump;
144
pci_ats_disabled(void)145 bool pci_ats_disabled(void)
146 {
147 return pcie_ats_disabled;
148 }
149 EXPORT_SYMBOL_GPL(pci_ats_disabled);
150
151 /* Disable bridge_d3 for all PCIe ports */
152 static bool pci_bridge_d3_disable;
153 /* Force bridge_d3 for all PCIe ports */
154 static bool pci_bridge_d3_force;
155
pcie_port_pm_setup(char * str)156 static int __init pcie_port_pm_setup(char *str)
157 {
158 if (!strcmp(str, "off"))
159 pci_bridge_d3_disable = true;
160 else if (!strcmp(str, "force"))
161 pci_bridge_d3_force = true;
162 return 1;
163 }
164 __setup("pcie_port_pm=", pcie_port_pm_setup);
165
166 /* Time to wait after a reset for device to become responsive */
167 #define PCIE_RESET_READY_POLL_MS 60000
168
169 /**
170 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
171 * @bus: pointer to PCI bus structure to search
172 *
173 * Given a PCI bus, returns the highest PCI bus number present in the set
174 * including the given PCI bus and its list of child PCI buses.
175 */
pci_bus_max_busnr(struct pci_bus * bus)176 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
177 {
178 struct pci_bus *tmp;
179 unsigned char max, n;
180
181 max = bus->busn_res.end;
182 list_for_each_entry(tmp, &bus->children, node) {
183 n = pci_bus_max_busnr(tmp);
184 if (n > max)
185 max = n;
186 }
187 return max;
188 }
189 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
190
191 /**
192 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
193 * @pdev: the PCI device
194 *
195 * Returns error bits set in PCI_STATUS and clears them.
196 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)197 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
198 {
199 u16 status;
200 int ret;
201
202 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
203 if (ret != PCIBIOS_SUCCESSFUL)
204 return -EIO;
205
206 status &= PCI_STATUS_ERROR_BITS;
207 if (status)
208 pci_write_config_word(pdev, PCI_STATUS, status);
209
210 return status;
211 }
212 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
213
214 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)215 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
216 bool write_combine)
217 {
218 struct resource *res = &pdev->resource[bar];
219 resource_size_t start = res->start;
220 resource_size_t size = resource_size(res);
221
222 /*
223 * Make sure the BAR is actually a memory resource, not an IO resource
224 */
225 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
226 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
227 return NULL;
228 }
229
230 if (write_combine)
231 return ioremap_wc(start, size);
232
233 return ioremap(start, size);
234 }
235
pci_ioremap_bar(struct pci_dev * pdev,int bar)236 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
237 {
238 return __pci_ioremap_resource(pdev, bar, false);
239 }
240 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
241
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)242 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
243 {
244 return __pci_ioremap_resource(pdev, bar, true);
245 }
246 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
247 #endif
248
249 /**
250 * pci_dev_str_match_path - test if a path string matches a device
251 * @dev: the PCI device to test
252 * @path: string to match the device against
253 * @endptr: pointer to the string after the match
254 *
255 * Test if a string (typically from a kernel parameter) formatted as a
256 * path of device/function addresses matches a PCI device. The string must
257 * be of the form:
258 *
259 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
260 *
261 * A path for a device can be obtained using 'lspci -t'. Using a path
262 * is more robust against bus renumbering than using only a single bus,
263 * device and function address.
264 *
265 * Returns 1 if the string matches the device, 0 if it does not and
266 * a negative error code if it fails to parse the string.
267 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)268 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
269 const char **endptr)
270 {
271 int ret;
272 unsigned int seg, bus, slot, func;
273 char *wpath, *p;
274 char end;
275
276 *endptr = strchrnul(path, ';');
277
278 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
279 if (!wpath)
280 return -ENOMEM;
281
282 while (1) {
283 p = strrchr(wpath, '/');
284 if (!p)
285 break;
286 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
287 if (ret != 2) {
288 ret = -EINVAL;
289 goto free_and_exit;
290 }
291
292 if (dev->devfn != PCI_DEVFN(slot, func)) {
293 ret = 0;
294 goto free_and_exit;
295 }
296
297 /*
298 * Note: we don't need to get a reference to the upstream
299 * bridge because we hold a reference to the top level
300 * device which should hold a reference to the bridge,
301 * and so on.
302 */
303 dev = pci_upstream_bridge(dev);
304 if (!dev) {
305 ret = 0;
306 goto free_and_exit;
307 }
308
309 *p = 0;
310 }
311
312 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
313 &func, &end);
314 if (ret != 4) {
315 seg = 0;
316 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
317 if (ret != 3) {
318 ret = -EINVAL;
319 goto free_and_exit;
320 }
321 }
322
323 ret = (seg == pci_domain_nr(dev->bus) &&
324 bus == dev->bus->number &&
325 dev->devfn == PCI_DEVFN(slot, func));
326
327 free_and_exit:
328 kfree(wpath);
329 return ret;
330 }
331
332 /**
333 * pci_dev_str_match - test if a string matches a device
334 * @dev: the PCI device to test
335 * @p: string to match the device against
336 * @endptr: pointer to the string after the match
337 *
338 * Test if a string (typically from a kernel parameter) matches a specified
339 * PCI device. The string may be of one of the following formats:
340 *
341 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
342 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
343 *
344 * The first format specifies a PCI bus/device/function address which
345 * may change if new hardware is inserted, if motherboard firmware changes,
346 * or due to changes caused in kernel parameters. If the domain is
347 * left unspecified, it is taken to be 0. In order to be robust against
348 * bus renumbering issues, a path of PCI device/function numbers may be used
349 * to address the specific device. The path for a device can be determined
350 * through the use of 'lspci -t'.
351 *
352 * The second format matches devices using IDs in the configuration
353 * space which may match multiple devices in the system. A value of 0
354 * for any field will match all devices. (Note: this differs from
355 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
356 * legacy reasons and convenience so users don't have to specify
357 * FFFFFFFFs on the command line.)
358 *
359 * Returns 1 if the string matches the device, 0 if it does not and
360 * a negative error code if the string cannot be parsed.
361 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)362 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
363 const char **endptr)
364 {
365 int ret;
366 int count;
367 unsigned short vendor, device, subsystem_vendor, subsystem_device;
368
369 if (strncmp(p, "pci:", 4) == 0) {
370 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
371 p += 4;
372 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
373 &subsystem_vendor, &subsystem_device, &count);
374 if (ret != 4) {
375 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
376 if (ret != 2)
377 return -EINVAL;
378
379 subsystem_vendor = 0;
380 subsystem_device = 0;
381 }
382
383 p += count;
384
385 if ((!vendor || vendor == dev->vendor) &&
386 (!device || device == dev->device) &&
387 (!subsystem_vendor ||
388 subsystem_vendor == dev->subsystem_vendor) &&
389 (!subsystem_device ||
390 subsystem_device == dev->subsystem_device))
391 goto found;
392 } else {
393 /*
394 * PCI Bus, Device, Function IDs are specified
395 * (optionally, may include a path of devfns following it)
396 */
397 ret = pci_dev_str_match_path(dev, p, &p);
398 if (ret < 0)
399 return ret;
400 else if (ret)
401 goto found;
402 }
403
404 *endptr = p;
405 return 0;
406
407 found:
408 *endptr = p;
409 return 1;
410 }
411
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)412 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
413 u8 pos, int cap, int *ttl)
414 {
415 u8 id;
416 u16 ent;
417
418 pci_bus_read_config_byte(bus, devfn, pos, &pos);
419
420 while ((*ttl)--) {
421 if (pos < 0x40)
422 break;
423 pos &= ~3;
424 pci_bus_read_config_word(bus, devfn, pos, &ent);
425
426 id = ent & 0xff;
427 if (id == 0xff)
428 break;
429 if (id == cap)
430 return pos;
431 pos = (ent >> 8);
432 }
433 return 0;
434 }
435
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)436 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
437 u8 pos, int cap)
438 {
439 int ttl = PCI_FIND_CAP_TTL;
440
441 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
442 }
443
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)444 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
445 {
446 return __pci_find_next_cap(dev->bus, dev->devfn,
447 pos + PCI_CAP_LIST_NEXT, cap);
448 }
449 EXPORT_SYMBOL_GPL(pci_find_next_capability);
450
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)451 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
452 unsigned int devfn, u8 hdr_type)
453 {
454 u16 status;
455
456 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
457 if (!(status & PCI_STATUS_CAP_LIST))
458 return 0;
459
460 switch (hdr_type) {
461 case PCI_HEADER_TYPE_NORMAL:
462 case PCI_HEADER_TYPE_BRIDGE:
463 return PCI_CAPABILITY_LIST;
464 case PCI_HEADER_TYPE_CARDBUS:
465 return PCI_CB_CAPABILITY_LIST;
466 }
467
468 return 0;
469 }
470
471 /**
472 * pci_find_capability - query for devices' capabilities
473 * @dev: PCI device to query
474 * @cap: capability code
475 *
476 * Tell if a device supports a given PCI capability.
477 * Returns the address of the requested capability structure within the
478 * device's PCI configuration space or 0 in case the device does not
479 * support it. Possible values for @cap include:
480 *
481 * %PCI_CAP_ID_PM Power Management
482 * %PCI_CAP_ID_AGP Accelerated Graphics Port
483 * %PCI_CAP_ID_VPD Vital Product Data
484 * %PCI_CAP_ID_SLOTID Slot Identification
485 * %PCI_CAP_ID_MSI Message Signalled Interrupts
486 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
487 * %PCI_CAP_ID_PCIX PCI-X
488 * %PCI_CAP_ID_EXP PCI Express
489 */
pci_find_capability(struct pci_dev * dev,int cap)490 u8 pci_find_capability(struct pci_dev *dev, int cap)
491 {
492 u8 pos;
493
494 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
495 if (pos)
496 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
497
498 return pos;
499 }
500 EXPORT_SYMBOL(pci_find_capability);
501
502 /**
503 * pci_bus_find_capability - query for devices' capabilities
504 * @bus: the PCI bus to query
505 * @devfn: PCI device to query
506 * @cap: capability code
507 *
508 * Like pci_find_capability() but works for PCI devices that do not have a
509 * pci_dev structure set up yet.
510 *
511 * Returns the address of the requested capability structure within the
512 * device's PCI configuration space or 0 in case the device does not
513 * support it.
514 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)515 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
516 {
517 u8 hdr_type, pos;
518
519 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
520
521 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
522 if (pos)
523 pos = __pci_find_next_cap(bus, devfn, pos, cap);
524
525 return pos;
526 }
527 EXPORT_SYMBOL(pci_bus_find_capability);
528
529 /**
530 * pci_find_next_ext_capability - Find an extended capability
531 * @dev: PCI device to query
532 * @start: address at which to start looking (0 to start at beginning of list)
533 * @cap: capability code
534 *
535 * Returns the address of the next matching extended capability structure
536 * within the device's PCI configuration space or 0 if the device does
537 * not support it. Some capabilities can occur several times, e.g., the
538 * vendor-specific capability, and this provides a way to find them all.
539 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)540 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
541 {
542 u32 header;
543 int ttl;
544 u16 pos = PCI_CFG_SPACE_SIZE;
545
546 /* minimum 8 bytes per capability */
547 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
548
549 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
550 return 0;
551
552 if (start)
553 pos = start;
554
555 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 return 0;
557
558 /*
559 * If we have no capabilities, this is indicated by cap ID,
560 * cap version and next pointer all being 0.
561 */
562 if (header == 0)
563 return 0;
564
565 while (ttl-- > 0) {
566 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
567 return pos;
568
569 pos = PCI_EXT_CAP_NEXT(header);
570 if (pos < PCI_CFG_SPACE_SIZE)
571 break;
572
573 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
574 break;
575 }
576
577 return 0;
578 }
579 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
580
581 /**
582 * pci_find_ext_capability - Find an extended capability
583 * @dev: PCI device to query
584 * @cap: capability code
585 *
586 * Returns the address of the requested extended capability structure
587 * within the device's PCI configuration space or 0 if the device does
588 * not support it. Possible values for @cap include:
589 *
590 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
591 * %PCI_EXT_CAP_ID_VC Virtual Channel
592 * %PCI_EXT_CAP_ID_DSN Device Serial Number
593 * %PCI_EXT_CAP_ID_PWR Power Budgeting
594 */
pci_find_ext_capability(struct pci_dev * dev,int cap)595 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
596 {
597 return pci_find_next_ext_capability(dev, 0, cap);
598 }
599 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
600
601 /**
602 * pci_get_dsn - Read and return the 8-byte Device Serial Number
603 * @dev: PCI device to query
604 *
605 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
606 * Number.
607 *
608 * Returns the DSN, or zero if the capability does not exist.
609 */
pci_get_dsn(struct pci_dev * dev)610 u64 pci_get_dsn(struct pci_dev *dev)
611 {
612 u32 dword;
613 u64 dsn;
614 int pos;
615
616 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
617 if (!pos)
618 return 0;
619
620 /*
621 * The Device Serial Number is two dwords offset 4 bytes from the
622 * capability position. The specification says that the first dword is
623 * the lower half, and the second dword is the upper half.
624 */
625 pos += 4;
626 pci_read_config_dword(dev, pos, &dword);
627 dsn = (u64)dword;
628 pci_read_config_dword(dev, pos + 4, &dword);
629 dsn |= ((u64)dword) << 32;
630
631 return dsn;
632 }
633 EXPORT_SYMBOL_GPL(pci_get_dsn);
634
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)635 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
636 {
637 int rc, ttl = PCI_FIND_CAP_TTL;
638 u8 cap, mask;
639
640 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
641 mask = HT_3BIT_CAP_MASK;
642 else
643 mask = HT_5BIT_CAP_MASK;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
646 PCI_CAP_ID_HT, &ttl);
647 while (pos) {
648 rc = pci_read_config_byte(dev, pos + 3, &cap);
649 if (rc != PCIBIOS_SUCCESSFUL)
650 return 0;
651
652 if ((cap & mask) == ht_cap)
653 return pos;
654
655 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
656 pos + PCI_CAP_LIST_NEXT,
657 PCI_CAP_ID_HT, &ttl);
658 }
659
660 return 0;
661 }
662
663 /**
664 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
665 * @dev: PCI device to query
666 * @pos: Position from which to continue searching
667 * @ht_cap: HyperTransport capability code
668 *
669 * To be used in conjunction with pci_find_ht_capability() to search for
670 * all capabilities matching @ht_cap. @pos should always be a value returned
671 * from pci_find_ht_capability().
672 *
673 * NB. To be 100% safe against broken PCI devices, the caller should take
674 * steps to avoid an infinite loop.
675 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)676 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
677 {
678 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
679 }
680 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
681
682 /**
683 * pci_find_ht_capability - query a device's HyperTransport capabilities
684 * @dev: PCI device to query
685 * @ht_cap: HyperTransport capability code
686 *
687 * Tell if a device supports a given HyperTransport capability.
688 * Returns an address within the device's PCI configuration space
689 * or 0 in case the device does not support the request capability.
690 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
691 * which has a HyperTransport capability matching @ht_cap.
692 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)693 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
694 {
695 u8 pos;
696
697 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
698 if (pos)
699 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
700
701 return pos;
702 }
703 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
704
705 /**
706 * pci_find_vsec_capability - Find a vendor-specific extended capability
707 * @dev: PCI device to query
708 * @vendor: Vendor ID for which capability is defined
709 * @cap: Vendor-specific capability ID
710 *
711 * If @dev has Vendor ID @vendor, search for a VSEC capability with
712 * VSEC ID @cap. If found, return the capability offset in
713 * config space; otherwise return 0.
714 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)715 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
716 {
717 u16 vsec = 0;
718 u32 header;
719
720 if (vendor != dev->vendor)
721 return 0;
722
723 while ((vsec = pci_find_next_ext_capability(dev, vsec,
724 PCI_EXT_CAP_ID_VNDR))) {
725 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
726 &header) == PCIBIOS_SUCCESSFUL &&
727 PCI_VNDR_HEADER_ID(header) == cap)
728 return vsec;
729 }
730
731 return 0;
732 }
733 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
734
735 /**
736 * pci_find_dvsec_capability - Find DVSEC for vendor
737 * @dev: PCI device to query
738 * @vendor: Vendor ID to match for the DVSEC
739 * @dvsec: Designated Vendor-specific capability ID
740 *
741 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
742 * offset in config space; otherwise return 0.
743 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)744 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
745 {
746 int pos;
747
748 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
749 if (!pos)
750 return 0;
751
752 while (pos) {
753 u16 v, id;
754
755 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
756 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
757 if (vendor == v && dvsec == id)
758 return pos;
759
760 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
761 }
762
763 return 0;
764 }
765 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
766
767 /**
768 * pci_find_parent_resource - return resource region of parent bus of given
769 * region
770 * @dev: PCI device structure contains resources to be searched
771 * @res: child resource record for which parent is sought
772 *
773 * For given resource region of given device, return the resource region of
774 * parent bus the given region is contained in.
775 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)776 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
777 struct resource *res)
778 {
779 const struct pci_bus *bus = dev->bus;
780 struct resource *r;
781 int i;
782
783 pci_bus_for_each_resource(bus, r, i) {
784 if (!r)
785 continue;
786 if (resource_contains(r, res)) {
787
788 /*
789 * If the window is prefetchable but the BAR is
790 * not, the allocator made a mistake.
791 */
792 if (r->flags & IORESOURCE_PREFETCH &&
793 !(res->flags & IORESOURCE_PREFETCH))
794 return NULL;
795
796 /*
797 * If we're below a transparent bridge, there may
798 * be both a positively-decoded aperture and a
799 * subtractively-decoded region that contain the BAR.
800 * We want the positively-decoded one, so this depends
801 * on pci_bus_for_each_resource() giving us those
802 * first.
803 */
804 return r;
805 }
806 }
807 return NULL;
808 }
809 EXPORT_SYMBOL(pci_find_parent_resource);
810
811 /**
812 * pci_find_resource - Return matching PCI device resource
813 * @dev: PCI device to query
814 * @res: Resource to look for
815 *
816 * Goes over standard PCI resources (BARs) and checks if the given resource
817 * is partially or fully contained in any of them. In that case the
818 * matching resource is returned, %NULL otherwise.
819 */
pci_find_resource(struct pci_dev * dev,struct resource * res)820 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
821 {
822 int i;
823
824 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
825 struct resource *r = &dev->resource[i];
826
827 if (r->start && resource_contains(r, res))
828 return r;
829 }
830
831 return NULL;
832 }
833 EXPORT_SYMBOL(pci_find_resource);
834
835 /**
836 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
837 * @dev: the PCI device to operate on
838 * @pos: config space offset of status word
839 * @mask: mask of bit(s) to care about in status word
840 *
841 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
842 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)843 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
844 {
845 int i;
846
847 /* Wait for Transaction Pending bit clean */
848 for (i = 0; i < 4; i++) {
849 u16 status;
850 if (i)
851 msleep((1 << (i - 1)) * 100);
852
853 pci_read_config_word(dev, pos, &status);
854 if (!(status & mask))
855 return 1;
856 }
857
858 return 0;
859 }
860
861 static int pci_acs_enable;
862
863 /**
864 * pci_request_acs - ask for ACS to be enabled if supported
865 */
pci_request_acs(void)866 void pci_request_acs(void)
867 {
868 pci_acs_enable = 1;
869 }
870
871 static const char *disable_acs_redir_param;
872
873 /**
874 * pci_disable_acs_redir - disable ACS redirect capabilities
875 * @dev: the PCI device
876 *
877 * For only devices specified in the disable_acs_redir parameter.
878 */
pci_disable_acs_redir(struct pci_dev * dev)879 static void pci_disable_acs_redir(struct pci_dev *dev)
880 {
881 int ret = 0;
882 const char *p;
883 int pos;
884 u16 ctrl;
885
886 if (!disable_acs_redir_param)
887 return;
888
889 p = disable_acs_redir_param;
890 while (*p) {
891 ret = pci_dev_str_match(dev, p, &p);
892 if (ret < 0) {
893 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
894 disable_acs_redir_param);
895
896 break;
897 } else if (ret == 1) {
898 /* Found a match */
899 break;
900 }
901
902 if (*p != ';' && *p != ',') {
903 /* End of param or invalid format */
904 break;
905 }
906 p++;
907 }
908
909 if (ret != 1)
910 return;
911
912 if (!pci_dev_specific_disable_acs_redir(dev))
913 return;
914
915 pos = dev->acs_cap;
916 if (!pos) {
917 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
918 return;
919 }
920
921 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
922
923 /* P2P Request & Completion Redirect */
924 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
925
926 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
927
928 pci_info(dev, "disabled ACS redirect\n");
929 }
930
931 /**
932 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
933 * @dev: the PCI device
934 */
pci_std_enable_acs(struct pci_dev * dev)935 static void pci_std_enable_acs(struct pci_dev *dev)
936 {
937 int pos;
938 u16 cap;
939 u16 ctrl;
940
941 pos = dev->acs_cap;
942 if (!pos)
943 return;
944
945 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
946 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
947
948 /* Source Validation */
949 ctrl |= (cap & PCI_ACS_SV);
950
951 /* P2P Request Redirect */
952 ctrl |= (cap & PCI_ACS_RR);
953
954 /* P2P Completion Redirect */
955 ctrl |= (cap & PCI_ACS_CR);
956
957 /* Upstream Forwarding */
958 ctrl |= (cap & PCI_ACS_UF);
959
960 /* Enable Translation Blocking for external devices and noats */
961 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
962 ctrl |= (cap & PCI_ACS_TB);
963
964 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
965 }
966
967 /**
968 * pci_enable_acs - enable ACS if hardware support it
969 * @dev: the PCI device
970 */
pci_enable_acs(struct pci_dev * dev)971 static void pci_enable_acs(struct pci_dev *dev)
972 {
973 if (!pci_acs_enable)
974 goto disable_acs_redir;
975
976 if (!pci_dev_specific_enable_acs(dev))
977 goto disable_acs_redir;
978
979 pci_std_enable_acs(dev);
980
981 disable_acs_redir:
982 /*
983 * Note: pci_disable_acs_redir() must be called even if ACS was not
984 * enabled by the kernel because it may have been enabled by
985 * platform firmware. So if we are told to disable it, we should
986 * always disable it after setting the kernel's default
987 * preferences.
988 */
989 pci_disable_acs_redir(dev);
990 }
991
992 /**
993 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
994 * @dev: PCI device to have its BARs restored
995 *
996 * Restore the BAR values for a given device, so as to make it
997 * accessible by its driver.
998 */
pci_restore_bars(struct pci_dev * dev)999 static void pci_restore_bars(struct pci_dev *dev)
1000 {
1001 int i;
1002
1003 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1004 pci_update_resource(dev, i);
1005 }
1006
platform_pci_power_manageable(struct pci_dev * dev)1007 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1008 {
1009 if (pci_use_mid_pm())
1010 return true;
1011
1012 return acpi_pci_power_manageable(dev);
1013 }
1014
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1015 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1016 pci_power_t t)
1017 {
1018 if (pci_use_mid_pm())
1019 return mid_pci_set_power_state(dev, t);
1020
1021 return acpi_pci_set_power_state(dev, t);
1022 }
1023
platform_pci_get_power_state(struct pci_dev * dev)1024 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1025 {
1026 if (pci_use_mid_pm())
1027 return mid_pci_get_power_state(dev);
1028
1029 return acpi_pci_get_power_state(dev);
1030 }
1031
platform_pci_refresh_power_state(struct pci_dev * dev)1032 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1033 {
1034 if (!pci_use_mid_pm())
1035 acpi_pci_refresh_power_state(dev);
1036 }
1037
platform_pci_choose_state(struct pci_dev * dev)1038 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1039 {
1040 if (pci_use_mid_pm())
1041 return PCI_POWER_ERROR;
1042
1043 return acpi_pci_choose_state(dev);
1044 }
1045
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1046 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1047 {
1048 if (pci_use_mid_pm())
1049 return PCI_POWER_ERROR;
1050
1051 return acpi_pci_wakeup(dev, enable);
1052 }
1053
platform_pci_need_resume(struct pci_dev * dev)1054 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1055 {
1056 if (pci_use_mid_pm())
1057 return false;
1058
1059 return acpi_pci_need_resume(dev);
1060 }
1061
platform_pci_bridge_d3(struct pci_dev * dev)1062 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1063 {
1064 if (pci_use_mid_pm())
1065 return false;
1066
1067 return acpi_pci_bridge_d3(dev);
1068 }
1069
1070 /**
1071 * pci_update_current_state - Read power state of given device and cache it
1072 * @dev: PCI device to handle.
1073 * @state: State to cache in case the device doesn't have the PM capability
1074 *
1075 * The power state is read from the PMCSR register, which however is
1076 * inaccessible in D3cold. The platform firmware is therefore queried first
1077 * to detect accessibility of the register. In case the platform firmware
1078 * reports an incorrect state or the device isn't power manageable by the
1079 * platform at all, we try to detect D3cold by testing accessibility of the
1080 * vendor ID in config space.
1081 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1082 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1083 {
1084 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1085 dev->current_state = PCI_D3cold;
1086 } else if (dev->pm_cap) {
1087 u16 pmcsr;
1088
1089 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1090 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1091 dev->current_state = PCI_D3cold;
1092 return;
1093 }
1094 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1095 } else {
1096 dev->current_state = state;
1097 }
1098 }
1099
1100 /**
1101 * pci_refresh_power_state - Refresh the given device's power state data
1102 * @dev: Target PCI device.
1103 *
1104 * Ask the platform to refresh the devices power state information and invoke
1105 * pci_update_current_state() to update its current PCI power state.
1106 */
pci_refresh_power_state(struct pci_dev * dev)1107 void pci_refresh_power_state(struct pci_dev *dev)
1108 {
1109 platform_pci_refresh_power_state(dev);
1110 pci_update_current_state(dev, dev->current_state);
1111 }
1112
1113 /**
1114 * pci_platform_power_transition - Use platform to change device power state
1115 * @dev: PCI device to handle.
1116 * @state: State to put the device into.
1117 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1118 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1119 {
1120 int error;
1121
1122 error = platform_pci_set_power_state(dev, state);
1123 if (!error)
1124 pci_update_current_state(dev, state);
1125 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1126 dev->current_state = PCI_D0;
1127
1128 return error;
1129 }
1130 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1131
pci_resume_one(struct pci_dev * pci_dev,void * ign)1132 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1133 {
1134 pm_request_resume(&pci_dev->dev);
1135 return 0;
1136 }
1137
1138 /**
1139 * pci_resume_bus - Walk given bus and runtime resume devices on it
1140 * @bus: Top bus of the subtree to walk.
1141 */
pci_resume_bus(struct pci_bus * bus)1142 void pci_resume_bus(struct pci_bus *bus)
1143 {
1144 if (bus)
1145 pci_walk_bus(bus, pci_resume_one, NULL);
1146 }
1147
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1148 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1149 {
1150 int delay = 1;
1151 u32 id;
1152
1153 /*
1154 * After reset, the device should not silently discard config
1155 * requests, but it may still indicate that it needs more time by
1156 * responding to them with CRS completions. The Root Port will
1157 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1158 * the read (except when CRS SV is enabled and the read was for the
1159 * Vendor ID; in that case it synthesizes 0x0001 data).
1160 *
1161 * Wait for the device to return a non-CRS completion. Read the
1162 * Command register instead of Vendor ID so we don't have to
1163 * contend with the CRS SV value.
1164 */
1165 pci_read_config_dword(dev, PCI_COMMAND, &id);
1166 while (PCI_POSSIBLE_ERROR(id)) {
1167 if (delay > timeout) {
1168 pci_warn(dev, "not ready %dms after %s; giving up\n",
1169 delay - 1, reset_type);
1170 return -ENOTTY;
1171 }
1172
1173 if (delay > 1000)
1174 pci_info(dev, "not ready %dms after %s; waiting\n",
1175 delay - 1, reset_type);
1176
1177 msleep(delay);
1178 delay *= 2;
1179 pci_read_config_dword(dev, PCI_COMMAND, &id);
1180 }
1181
1182 if (delay > 1000)
1183 pci_info(dev, "ready %dms after %s\n", delay - 1,
1184 reset_type);
1185
1186 return 0;
1187 }
1188
1189 /**
1190 * pci_power_up - Put the given device into D0
1191 * @dev: PCI device to power up
1192 *
1193 * On success, return 0 or 1, depending on whether or not it is necessary to
1194 * restore the device's BARs subsequently (1 is returned in that case).
1195 */
pci_power_up(struct pci_dev * dev)1196 int pci_power_up(struct pci_dev *dev)
1197 {
1198 bool need_restore;
1199 pci_power_t state;
1200 u16 pmcsr;
1201
1202 platform_pci_set_power_state(dev, PCI_D0);
1203
1204 if (!dev->pm_cap) {
1205 state = platform_pci_get_power_state(dev);
1206 if (state == PCI_UNKNOWN)
1207 dev->current_state = PCI_D0;
1208 else
1209 dev->current_state = state;
1210
1211 if (state == PCI_D0)
1212 return 0;
1213
1214 return -EIO;
1215 }
1216
1217 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1218 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1219 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1220 pci_power_name(dev->current_state));
1221 dev->current_state = PCI_D3cold;
1222 return -EIO;
1223 }
1224
1225 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1226
1227 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1228 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1229
1230 if (state == PCI_D0)
1231 goto end;
1232
1233 /*
1234 * Force the entire word to 0. This doesn't affect PME_Status, disables
1235 * PME_En, and sets PowerState to 0.
1236 */
1237 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1238
1239 /* Mandatory transition delays; see PCI PM 1.2. */
1240 if (state == PCI_D3hot)
1241 pci_dev_d3_sleep(dev);
1242 else if (state == PCI_D2)
1243 udelay(PCI_PM_D2_DELAY);
1244
1245 end:
1246 dev->current_state = PCI_D0;
1247 if (need_restore)
1248 return 1;
1249
1250 return 0;
1251 }
1252
1253 /**
1254 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1255 * @dev: PCI device to power up
1256 *
1257 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1258 * to confirm the state change, restore its BARs if they might be lost and
1259 * reconfigure ASPM in acordance with the new power state.
1260 *
1261 * If pci_restore_state() is going to be called right after a power state change
1262 * to D0, it is more efficient to use pci_power_up() directly instead of this
1263 * function.
1264 */
pci_set_full_power_state(struct pci_dev * dev)1265 static int pci_set_full_power_state(struct pci_dev *dev)
1266 {
1267 u16 pmcsr;
1268 int ret;
1269
1270 ret = pci_power_up(dev);
1271 if (ret < 0)
1272 return ret;
1273
1274 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1275 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1276 if (dev->current_state != PCI_D0) {
1277 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1278 pci_power_name(dev->current_state));
1279 } else if (ret > 0) {
1280 /*
1281 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1282 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1283 * from D3hot to D0 _may_ perform an internal reset, thereby
1284 * going to "D0 Uninitialized" rather than "D0 Initialized".
1285 * For example, at least some versions of the 3c905B and the
1286 * 3c556B exhibit this behaviour.
1287 *
1288 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1289 * devices in a D3hot state at boot. Consequently, we need to
1290 * restore at least the BARs so that the device will be
1291 * accessible to its driver.
1292 */
1293 pci_restore_bars(dev);
1294 }
1295
1296 if (dev->bus->self)
1297 pcie_aspm_pm_state_change(dev->bus->self);
1298
1299 return 0;
1300 }
1301
1302 /**
1303 * __pci_dev_set_current_state - Set current state of a PCI device
1304 * @dev: Device to handle
1305 * @data: pointer to state to be set
1306 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1307 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1308 {
1309 pci_power_t state = *(pci_power_t *)data;
1310
1311 dev->current_state = state;
1312 return 0;
1313 }
1314
1315 /**
1316 * pci_bus_set_current_state - Walk given bus and set current state of devices
1317 * @bus: Top bus of the subtree to walk.
1318 * @state: state to be set
1319 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1320 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1321 {
1322 if (bus)
1323 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1324 }
1325
1326 /**
1327 * pci_set_low_power_state - Put a PCI device into a low-power state.
1328 * @dev: PCI device to handle.
1329 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1330 *
1331 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1332 *
1333 * RETURN VALUE:
1334 * -EINVAL if the requested state is invalid.
1335 * -EIO if device does not support PCI PM or its PM capabilities register has a
1336 * wrong version, or device doesn't support the requested state.
1337 * 0 if device already is in the requested state.
1338 * 0 if device's power state has been successfully changed.
1339 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state)1340 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
1341 {
1342 u16 pmcsr;
1343
1344 if (!dev->pm_cap)
1345 return -EIO;
1346
1347 /*
1348 * Validate transition: We can enter D0 from any state, but if
1349 * we're already in a low-power state, we can only go deeper. E.g.,
1350 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1351 * we'd have to go from D3 to D0, then to D1.
1352 */
1353 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1354 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1355 pci_power_name(dev->current_state),
1356 pci_power_name(state));
1357 return -EINVAL;
1358 }
1359
1360 /* Check if this device supports the desired state */
1361 if ((state == PCI_D1 && !dev->d1_support)
1362 || (state == PCI_D2 && !dev->d2_support))
1363 return -EIO;
1364
1365 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1366 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1367 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1368 pci_power_name(dev->current_state),
1369 pci_power_name(state));
1370 dev->current_state = PCI_D3cold;
1371 return -EIO;
1372 }
1373
1374 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1375 pmcsr |= state;
1376
1377 /* Enter specified state */
1378 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1379
1380 /* Mandatory power management transition delays; see PCI PM 1.2. */
1381 if (state == PCI_D3hot)
1382 pci_dev_d3_sleep(dev);
1383 else if (state == PCI_D2)
1384 udelay(PCI_PM_D2_DELAY);
1385
1386 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1387 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1388 if (dev->current_state != state)
1389 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1390 pci_power_name(dev->current_state),
1391 pci_power_name(state));
1392
1393 if (dev->bus->self)
1394 pcie_aspm_pm_state_change(dev->bus->self);
1395
1396 return 0;
1397 }
1398
1399 /**
1400 * pci_set_power_state - Set the power state of a PCI device
1401 * @dev: PCI device to handle.
1402 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1403 *
1404 * Transition a device to a new power state, using the platform firmware and/or
1405 * the device's PCI PM registers.
1406 *
1407 * RETURN VALUE:
1408 * -EINVAL if the requested state is invalid.
1409 * -EIO if device does not support PCI PM or its PM capabilities register has a
1410 * wrong version, or device doesn't support the requested state.
1411 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1412 * 0 if device already is in the requested state.
1413 * 0 if the transition is to D3 but D3 is not supported.
1414 * 0 if device's power state has been successfully changed.
1415 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1416 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1417 {
1418 int error;
1419
1420 /* Bound the state we're entering */
1421 if (state > PCI_D3cold)
1422 state = PCI_D3cold;
1423 else if (state < PCI_D0)
1424 state = PCI_D0;
1425 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1426
1427 /*
1428 * If the device or the parent bridge do not support PCI
1429 * PM, ignore the request if we're doing anything other
1430 * than putting it into D0 (which would only happen on
1431 * boot).
1432 */
1433 return 0;
1434
1435 /* Check if we're already there */
1436 if (dev->current_state == state)
1437 return 0;
1438
1439 if (state == PCI_D0)
1440 return pci_set_full_power_state(dev);
1441
1442 /*
1443 * This device is quirked not to be put into D3, so don't put it in
1444 * D3
1445 */
1446 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1447 return 0;
1448
1449 if (state == PCI_D3cold) {
1450 /*
1451 * To put the device in D3cold, put it into D3hot in the native
1452 * way, then put it into D3cold using platform ops.
1453 */
1454 error = pci_set_low_power_state(dev, PCI_D3hot);
1455
1456 if (pci_platform_power_transition(dev, PCI_D3cold))
1457 return error;
1458
1459 /* Powering off a bridge may power off the whole hierarchy */
1460 if (dev->current_state == PCI_D3cold)
1461 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1462 } else {
1463 error = pci_set_low_power_state(dev, state);
1464
1465 if (pci_platform_power_transition(dev, state))
1466 return error;
1467 }
1468
1469 return 0;
1470 }
1471 EXPORT_SYMBOL(pci_set_power_state);
1472
1473 #define PCI_EXP_SAVE_REGS 7
1474
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1475 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1476 u16 cap, bool extended)
1477 {
1478 struct pci_cap_saved_state *tmp;
1479
1480 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1481 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1482 return tmp;
1483 }
1484 return NULL;
1485 }
1486
pci_find_saved_cap(struct pci_dev * dev,char cap)1487 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1488 {
1489 return _pci_find_saved_cap(dev, cap, false);
1490 }
1491
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1492 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1493 {
1494 return _pci_find_saved_cap(dev, cap, true);
1495 }
1496
pci_save_pcie_state(struct pci_dev * dev)1497 static int pci_save_pcie_state(struct pci_dev *dev)
1498 {
1499 int i = 0;
1500 struct pci_cap_saved_state *save_state;
1501 u16 *cap;
1502
1503 if (!pci_is_pcie(dev))
1504 return 0;
1505
1506 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1507 if (!save_state) {
1508 pci_err(dev, "buffer not found in %s\n", __func__);
1509 return -ENOMEM;
1510 }
1511
1512 cap = (u16 *)&save_state->cap.data[0];
1513 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1514 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1515 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1516 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1517 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1518 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1519 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1520
1521 return 0;
1522 }
1523
pci_bridge_reconfigure_ltr(struct pci_dev * dev)1524 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1525 {
1526 #ifdef CONFIG_PCIEASPM
1527 struct pci_dev *bridge;
1528 u32 ctl;
1529
1530 bridge = pci_upstream_bridge(dev);
1531 if (bridge && bridge->ltr_path) {
1532 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1533 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1534 pci_dbg(bridge, "re-enabling LTR\n");
1535 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1536 PCI_EXP_DEVCTL2_LTR_EN);
1537 }
1538 }
1539 #endif
1540 }
1541
pci_restore_pcie_state(struct pci_dev * dev)1542 static void pci_restore_pcie_state(struct pci_dev *dev)
1543 {
1544 int i = 0;
1545 struct pci_cap_saved_state *save_state;
1546 u16 *cap;
1547
1548 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1549 if (!save_state)
1550 return;
1551
1552 /*
1553 * Downstream ports reset the LTR enable bit when link goes down.
1554 * Check and re-configure the bit here before restoring device.
1555 * PCIe r5.0, sec 7.5.3.16.
1556 */
1557 pci_bridge_reconfigure_ltr(dev);
1558
1559 cap = (u16 *)&save_state->cap.data[0];
1560 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1561 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1562 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1563 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1564 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1565 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1566 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1567 }
1568
pci_save_pcix_state(struct pci_dev * dev)1569 static int pci_save_pcix_state(struct pci_dev *dev)
1570 {
1571 int pos;
1572 struct pci_cap_saved_state *save_state;
1573
1574 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1575 if (!pos)
1576 return 0;
1577
1578 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1579 if (!save_state) {
1580 pci_err(dev, "buffer not found in %s\n", __func__);
1581 return -ENOMEM;
1582 }
1583
1584 pci_read_config_word(dev, pos + PCI_X_CMD,
1585 (u16 *)save_state->cap.data);
1586
1587 return 0;
1588 }
1589
pci_restore_pcix_state(struct pci_dev * dev)1590 static void pci_restore_pcix_state(struct pci_dev *dev)
1591 {
1592 int i = 0, pos;
1593 struct pci_cap_saved_state *save_state;
1594 u16 *cap;
1595
1596 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1597 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1598 if (!save_state || !pos)
1599 return;
1600 cap = (u16 *)&save_state->cap.data[0];
1601
1602 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1603 }
1604
pci_save_ltr_state(struct pci_dev * dev)1605 static void pci_save_ltr_state(struct pci_dev *dev)
1606 {
1607 int ltr;
1608 struct pci_cap_saved_state *save_state;
1609 u32 *cap;
1610
1611 if (!pci_is_pcie(dev))
1612 return;
1613
1614 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1615 if (!ltr)
1616 return;
1617
1618 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1619 if (!save_state) {
1620 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1621 return;
1622 }
1623
1624 /* Some broken devices only support dword access to LTR */
1625 cap = &save_state->cap.data[0];
1626 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1627 }
1628
pci_restore_ltr_state(struct pci_dev * dev)1629 static void pci_restore_ltr_state(struct pci_dev *dev)
1630 {
1631 struct pci_cap_saved_state *save_state;
1632 int ltr;
1633 u32 *cap;
1634
1635 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1636 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1637 if (!save_state || !ltr)
1638 return;
1639
1640 /* Some broken devices only support dword access to LTR */
1641 cap = &save_state->cap.data[0];
1642 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1643 }
1644
1645 /**
1646 * pci_save_state - save the PCI configuration space of a device before
1647 * suspending
1648 * @dev: PCI device that we're dealing with
1649 */
pci_save_state(struct pci_dev * dev)1650 int pci_save_state(struct pci_dev *dev)
1651 {
1652 int i;
1653 /* XXX: 100% dword access ok here? */
1654 for (i = 0; i < 16; i++) {
1655 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1656 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1657 i * 4, dev->saved_config_space[i]);
1658 }
1659 dev->state_saved = true;
1660
1661 i = pci_save_pcie_state(dev);
1662 if (i != 0)
1663 return i;
1664
1665 i = pci_save_pcix_state(dev);
1666 if (i != 0)
1667 return i;
1668
1669 pci_save_ltr_state(dev);
1670 pci_save_dpc_state(dev);
1671 pci_save_aer_state(dev);
1672 pci_save_ptm_state(dev);
1673 return pci_save_vc_state(dev);
1674 }
1675 EXPORT_SYMBOL(pci_save_state);
1676
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1677 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1678 u32 saved_val, int retry, bool force)
1679 {
1680 u32 val;
1681
1682 pci_read_config_dword(pdev, offset, &val);
1683 if (!force && val == saved_val)
1684 return;
1685
1686 for (;;) {
1687 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1688 offset, val, saved_val);
1689 pci_write_config_dword(pdev, offset, saved_val);
1690 if (retry-- <= 0)
1691 return;
1692
1693 pci_read_config_dword(pdev, offset, &val);
1694 if (val == saved_val)
1695 return;
1696
1697 mdelay(1);
1698 }
1699 }
1700
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1701 static void pci_restore_config_space_range(struct pci_dev *pdev,
1702 int start, int end, int retry,
1703 bool force)
1704 {
1705 int index;
1706
1707 for (index = end; index >= start; index--)
1708 pci_restore_config_dword(pdev, 4 * index,
1709 pdev->saved_config_space[index],
1710 retry, force);
1711 }
1712
pci_restore_config_space(struct pci_dev * pdev)1713 static void pci_restore_config_space(struct pci_dev *pdev)
1714 {
1715 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1716 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1717 /* Restore BARs before the command register. */
1718 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1719 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1720 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1721 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1722
1723 /*
1724 * Force rewriting of prefetch registers to avoid S3 resume
1725 * issues on Intel PCI bridges that occur when these
1726 * registers are not explicitly written.
1727 */
1728 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1729 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1730 } else {
1731 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1732 }
1733 }
1734
pci_restore_rebar_state(struct pci_dev * pdev)1735 static void pci_restore_rebar_state(struct pci_dev *pdev)
1736 {
1737 unsigned int pos, nbars, i;
1738 u32 ctrl;
1739
1740 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1741 if (!pos)
1742 return;
1743
1744 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1745 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1746 PCI_REBAR_CTRL_NBAR_SHIFT;
1747
1748 for (i = 0; i < nbars; i++, pos += 8) {
1749 struct resource *res;
1750 int bar_idx, size;
1751
1752 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1753 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1754 res = pdev->resource + bar_idx;
1755 size = pci_rebar_bytes_to_size(resource_size(res));
1756 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1757 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1758 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1759 }
1760 }
1761
1762 /**
1763 * pci_restore_state - Restore the saved state of a PCI device
1764 * @dev: PCI device that we're dealing with
1765 */
pci_restore_state(struct pci_dev * dev)1766 void pci_restore_state(struct pci_dev *dev)
1767 {
1768 if (!dev->state_saved)
1769 return;
1770
1771 /*
1772 * Restore max latencies (in the LTR capability) before enabling
1773 * LTR itself (in the PCIe capability).
1774 */
1775 pci_restore_ltr_state(dev);
1776
1777 pci_restore_pcie_state(dev);
1778 pci_restore_pasid_state(dev);
1779 pci_restore_pri_state(dev);
1780 pci_restore_ats_state(dev);
1781 pci_restore_vc_state(dev);
1782 pci_restore_rebar_state(dev);
1783 pci_restore_dpc_state(dev);
1784 pci_restore_ptm_state(dev);
1785
1786 pci_aer_clear_status(dev);
1787 pci_restore_aer_state(dev);
1788
1789 pci_restore_config_space(dev);
1790
1791 pci_restore_pcix_state(dev);
1792 pci_restore_msi_state(dev);
1793
1794 /* Restore ACS and IOV configuration state */
1795 pci_enable_acs(dev);
1796 pci_restore_iov_state(dev);
1797
1798 dev->state_saved = false;
1799 }
1800 EXPORT_SYMBOL(pci_restore_state);
1801
1802 struct pci_saved_state {
1803 u32 config_space[16];
1804 struct pci_cap_saved_data cap[];
1805 };
1806
1807 /**
1808 * pci_store_saved_state - Allocate and return an opaque struct containing
1809 * the device saved state.
1810 * @dev: PCI device that we're dealing with
1811 *
1812 * Return NULL if no state or error.
1813 */
pci_store_saved_state(struct pci_dev * dev)1814 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1815 {
1816 struct pci_saved_state *state;
1817 struct pci_cap_saved_state *tmp;
1818 struct pci_cap_saved_data *cap;
1819 size_t size;
1820
1821 if (!dev->state_saved)
1822 return NULL;
1823
1824 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1825
1826 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1827 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1828
1829 state = kzalloc(size, GFP_KERNEL);
1830 if (!state)
1831 return NULL;
1832
1833 memcpy(state->config_space, dev->saved_config_space,
1834 sizeof(state->config_space));
1835
1836 cap = state->cap;
1837 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1838 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1839 memcpy(cap, &tmp->cap, len);
1840 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1841 }
1842 /* Empty cap_save terminates list */
1843
1844 return state;
1845 }
1846 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1847
1848 /**
1849 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1850 * @dev: PCI device that we're dealing with
1851 * @state: Saved state returned from pci_store_saved_state()
1852 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1853 int pci_load_saved_state(struct pci_dev *dev,
1854 struct pci_saved_state *state)
1855 {
1856 struct pci_cap_saved_data *cap;
1857
1858 dev->state_saved = false;
1859
1860 if (!state)
1861 return 0;
1862
1863 memcpy(dev->saved_config_space, state->config_space,
1864 sizeof(state->config_space));
1865
1866 cap = state->cap;
1867 while (cap->size) {
1868 struct pci_cap_saved_state *tmp;
1869
1870 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1871 if (!tmp || tmp->cap.size != cap->size)
1872 return -EINVAL;
1873
1874 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1875 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1876 sizeof(struct pci_cap_saved_data) + cap->size);
1877 }
1878
1879 dev->state_saved = true;
1880 return 0;
1881 }
1882 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1883
1884 /**
1885 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1886 * and free the memory allocated for it.
1887 * @dev: PCI device that we're dealing with
1888 * @state: Pointer to saved state returned from pci_store_saved_state()
1889 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1890 int pci_load_and_free_saved_state(struct pci_dev *dev,
1891 struct pci_saved_state **state)
1892 {
1893 int ret = pci_load_saved_state(dev, *state);
1894 kfree(*state);
1895 *state = NULL;
1896 return ret;
1897 }
1898 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1899
pcibios_enable_device(struct pci_dev * dev,int bars)1900 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1901 {
1902 return pci_enable_resources(dev, bars);
1903 }
1904
do_pci_enable_device(struct pci_dev * dev,int bars)1905 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1906 {
1907 int err;
1908 struct pci_dev *bridge;
1909 u16 cmd;
1910 u8 pin;
1911
1912 err = pci_set_power_state(dev, PCI_D0);
1913 if (err < 0 && err != -EIO)
1914 return err;
1915
1916 bridge = pci_upstream_bridge(dev);
1917 if (bridge)
1918 pcie_aspm_powersave_config_link(bridge);
1919
1920 err = pcibios_enable_device(dev, bars);
1921 if (err < 0)
1922 return err;
1923 pci_fixup_device(pci_fixup_enable, dev);
1924
1925 if (dev->msi_enabled || dev->msix_enabled)
1926 return 0;
1927
1928 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1929 if (pin) {
1930 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1931 if (cmd & PCI_COMMAND_INTX_DISABLE)
1932 pci_write_config_word(dev, PCI_COMMAND,
1933 cmd & ~PCI_COMMAND_INTX_DISABLE);
1934 }
1935
1936 return 0;
1937 }
1938
1939 /**
1940 * pci_reenable_device - Resume abandoned device
1941 * @dev: PCI device to be resumed
1942 *
1943 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1944 * to be called by normal code, write proper resume handler and use it instead.
1945 */
pci_reenable_device(struct pci_dev * dev)1946 int pci_reenable_device(struct pci_dev *dev)
1947 {
1948 if (pci_is_enabled(dev))
1949 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1950 return 0;
1951 }
1952 EXPORT_SYMBOL(pci_reenable_device);
1953
pci_enable_bridge(struct pci_dev * dev)1954 static void pci_enable_bridge(struct pci_dev *dev)
1955 {
1956 struct pci_dev *bridge;
1957 int retval;
1958
1959 bridge = pci_upstream_bridge(dev);
1960 if (bridge)
1961 pci_enable_bridge(bridge);
1962
1963 if (pci_is_enabled(dev)) {
1964 if (!dev->is_busmaster)
1965 pci_set_master(dev);
1966 return;
1967 }
1968
1969 retval = pci_enable_device(dev);
1970 if (retval)
1971 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1972 retval);
1973 pci_set_master(dev);
1974 }
1975
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1976 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1977 {
1978 struct pci_dev *bridge;
1979 int err;
1980 int i, bars = 0;
1981
1982 /*
1983 * Power state could be unknown at this point, either due to a fresh
1984 * boot or a device removal call. So get the current power state
1985 * so that things like MSI message writing will behave as expected
1986 * (e.g. if the device really is in D0 at enable time).
1987 */
1988 pci_update_current_state(dev, dev->current_state);
1989
1990 if (atomic_inc_return(&dev->enable_cnt) > 1)
1991 return 0; /* already enabled */
1992
1993 bridge = pci_upstream_bridge(dev);
1994 if (bridge)
1995 pci_enable_bridge(bridge);
1996
1997 /* only skip sriov related */
1998 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1999 if (dev->resource[i].flags & flags)
2000 bars |= (1 << i);
2001 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2002 if (dev->resource[i].flags & flags)
2003 bars |= (1 << i);
2004
2005 err = do_pci_enable_device(dev, bars);
2006 if (err < 0)
2007 atomic_dec(&dev->enable_cnt);
2008 return err;
2009 }
2010
2011 /**
2012 * pci_enable_device_io - Initialize a device for use with IO space
2013 * @dev: PCI device to be initialized
2014 *
2015 * Initialize device before it's used by a driver. Ask low-level code
2016 * to enable I/O resources. Wake up the device if it was suspended.
2017 * Beware, this function can fail.
2018 */
pci_enable_device_io(struct pci_dev * dev)2019 int pci_enable_device_io(struct pci_dev *dev)
2020 {
2021 return pci_enable_device_flags(dev, IORESOURCE_IO);
2022 }
2023 EXPORT_SYMBOL(pci_enable_device_io);
2024
2025 /**
2026 * pci_enable_device_mem - Initialize a device for use with Memory space
2027 * @dev: PCI device to be initialized
2028 *
2029 * Initialize device before it's used by a driver. Ask low-level code
2030 * to enable Memory resources. Wake up the device if it was suspended.
2031 * Beware, this function can fail.
2032 */
pci_enable_device_mem(struct pci_dev * dev)2033 int pci_enable_device_mem(struct pci_dev *dev)
2034 {
2035 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2036 }
2037 EXPORT_SYMBOL(pci_enable_device_mem);
2038
2039 /**
2040 * pci_enable_device - Initialize device before it's used by a driver.
2041 * @dev: PCI device to be initialized
2042 *
2043 * Initialize device before it's used by a driver. Ask low-level code
2044 * to enable I/O and memory. Wake up the device if it was suspended.
2045 * Beware, this function can fail.
2046 *
2047 * Note we don't actually enable the device many times if we call
2048 * this function repeatedly (we just increment the count).
2049 */
pci_enable_device(struct pci_dev * dev)2050 int pci_enable_device(struct pci_dev *dev)
2051 {
2052 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2053 }
2054 EXPORT_SYMBOL(pci_enable_device);
2055
2056 /*
2057 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
2058 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
2059 * there's no need to track it separately. pci_devres is initialized
2060 * when a device is enabled using managed PCI device enable interface.
2061 */
2062 struct pci_devres {
2063 unsigned int enabled:1;
2064 unsigned int pinned:1;
2065 unsigned int orig_intx:1;
2066 unsigned int restore_intx:1;
2067 unsigned int mwi:1;
2068 u32 region_mask;
2069 };
2070
pcim_release(struct device * gendev,void * res)2071 static void pcim_release(struct device *gendev, void *res)
2072 {
2073 struct pci_dev *dev = to_pci_dev(gendev);
2074 struct pci_devres *this = res;
2075 int i;
2076
2077 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2078 if (this->region_mask & (1 << i))
2079 pci_release_region(dev, i);
2080
2081 if (this->mwi)
2082 pci_clear_mwi(dev);
2083
2084 if (this->restore_intx)
2085 pci_intx(dev, this->orig_intx);
2086
2087 if (this->enabled && !this->pinned)
2088 pci_disable_device(dev);
2089 }
2090
get_pci_dr(struct pci_dev * pdev)2091 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2092 {
2093 struct pci_devres *dr, *new_dr;
2094
2095 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2096 if (dr)
2097 return dr;
2098
2099 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2100 if (!new_dr)
2101 return NULL;
2102 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2103 }
2104
find_pci_dr(struct pci_dev * pdev)2105 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2106 {
2107 if (pci_is_managed(pdev))
2108 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2109 return NULL;
2110 }
2111
2112 /**
2113 * pcim_enable_device - Managed pci_enable_device()
2114 * @pdev: PCI device to be initialized
2115 *
2116 * Managed pci_enable_device().
2117 */
pcim_enable_device(struct pci_dev * pdev)2118 int pcim_enable_device(struct pci_dev *pdev)
2119 {
2120 struct pci_devres *dr;
2121 int rc;
2122
2123 dr = get_pci_dr(pdev);
2124 if (unlikely(!dr))
2125 return -ENOMEM;
2126 if (dr->enabled)
2127 return 0;
2128
2129 rc = pci_enable_device(pdev);
2130 if (!rc) {
2131 pdev->is_managed = 1;
2132 dr->enabled = 1;
2133 }
2134 return rc;
2135 }
2136 EXPORT_SYMBOL(pcim_enable_device);
2137
2138 /**
2139 * pcim_pin_device - Pin managed PCI device
2140 * @pdev: PCI device to pin
2141 *
2142 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2143 * driver detach. @pdev must have been enabled with
2144 * pcim_enable_device().
2145 */
pcim_pin_device(struct pci_dev * pdev)2146 void pcim_pin_device(struct pci_dev *pdev)
2147 {
2148 struct pci_devres *dr;
2149
2150 dr = find_pci_dr(pdev);
2151 WARN_ON(!dr || !dr->enabled);
2152 if (dr)
2153 dr->pinned = 1;
2154 }
2155 EXPORT_SYMBOL(pcim_pin_device);
2156
2157 /*
2158 * pcibios_device_add - provide arch specific hooks when adding device dev
2159 * @dev: the PCI device being added
2160 *
2161 * Permits the platform to provide architecture specific functionality when
2162 * devices are added. This is the default implementation. Architecture
2163 * implementations can override this.
2164 */
pcibios_device_add(struct pci_dev * dev)2165 int __weak pcibios_device_add(struct pci_dev *dev)
2166 {
2167 return 0;
2168 }
2169
2170 /**
2171 * pcibios_release_device - provide arch specific hooks when releasing
2172 * device dev
2173 * @dev: the PCI device being released
2174 *
2175 * Permits the platform to provide architecture specific functionality when
2176 * devices are released. This is the default implementation. Architecture
2177 * implementations can override this.
2178 */
pcibios_release_device(struct pci_dev * dev)2179 void __weak pcibios_release_device(struct pci_dev *dev) {}
2180
2181 /**
2182 * pcibios_disable_device - disable arch specific PCI resources for device dev
2183 * @dev: the PCI device to disable
2184 *
2185 * Disables architecture specific PCI resources for the device. This
2186 * is the default implementation. Architecture implementations can
2187 * override this.
2188 */
pcibios_disable_device(struct pci_dev * dev)2189 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2190
2191 /**
2192 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2193 * @irq: ISA IRQ to penalize
2194 * @active: IRQ active or not
2195 *
2196 * Permits the platform to provide architecture-specific functionality when
2197 * penalizing ISA IRQs. This is the default implementation. Architecture
2198 * implementations can override this.
2199 */
pcibios_penalize_isa_irq(int irq,int active)2200 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2201
do_pci_disable_device(struct pci_dev * dev)2202 static void do_pci_disable_device(struct pci_dev *dev)
2203 {
2204 u16 pci_command;
2205
2206 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2207 if (pci_command & PCI_COMMAND_MASTER) {
2208 pci_command &= ~PCI_COMMAND_MASTER;
2209 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2210 }
2211
2212 pcibios_disable_device(dev);
2213 }
2214
2215 /**
2216 * pci_disable_enabled_device - Disable device without updating enable_cnt
2217 * @dev: PCI device to disable
2218 *
2219 * NOTE: This function is a backend of PCI power management routines and is
2220 * not supposed to be called drivers.
2221 */
pci_disable_enabled_device(struct pci_dev * dev)2222 void pci_disable_enabled_device(struct pci_dev *dev)
2223 {
2224 if (pci_is_enabled(dev))
2225 do_pci_disable_device(dev);
2226 }
2227
2228 /**
2229 * pci_disable_device - Disable PCI device after use
2230 * @dev: PCI device to be disabled
2231 *
2232 * Signal to the system that the PCI device is not in use by the system
2233 * anymore. This only involves disabling PCI bus-mastering, if active.
2234 *
2235 * Note we don't actually disable the device until all callers of
2236 * pci_enable_device() have called pci_disable_device().
2237 */
pci_disable_device(struct pci_dev * dev)2238 void pci_disable_device(struct pci_dev *dev)
2239 {
2240 struct pci_devres *dr;
2241
2242 dr = find_pci_dr(dev);
2243 if (dr)
2244 dr->enabled = 0;
2245
2246 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2247 "disabling already-disabled device");
2248
2249 if (atomic_dec_return(&dev->enable_cnt) != 0)
2250 return;
2251
2252 do_pci_disable_device(dev);
2253
2254 dev->is_busmaster = 0;
2255 }
2256 EXPORT_SYMBOL(pci_disable_device);
2257
2258 /**
2259 * pcibios_set_pcie_reset_state - set reset state for device dev
2260 * @dev: the PCIe device reset
2261 * @state: Reset state to enter into
2262 *
2263 * Set the PCIe reset state for the device. This is the default
2264 * implementation. Architecture implementations can override this.
2265 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2266 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2267 enum pcie_reset_state state)
2268 {
2269 return -EINVAL;
2270 }
2271
2272 /**
2273 * pci_set_pcie_reset_state - set reset state for device dev
2274 * @dev: the PCIe device reset
2275 * @state: Reset state to enter into
2276 *
2277 * Sets the PCI reset state for the device.
2278 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2279 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2280 {
2281 return pcibios_set_pcie_reset_state(dev, state);
2282 }
2283 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2284
2285 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2286 void pcie_clear_device_status(struct pci_dev *dev)
2287 {
2288 u16 sta;
2289
2290 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2291 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2292 }
2293 #endif
2294
2295 /**
2296 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2297 * @dev: PCIe root port or event collector.
2298 */
pcie_clear_root_pme_status(struct pci_dev * dev)2299 void pcie_clear_root_pme_status(struct pci_dev *dev)
2300 {
2301 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2302 }
2303
2304 /**
2305 * pci_check_pme_status - Check if given device has generated PME.
2306 * @dev: Device to check.
2307 *
2308 * Check the PME status of the device and if set, clear it and clear PME enable
2309 * (if set). Return 'true' if PME status and PME enable were both set or
2310 * 'false' otherwise.
2311 */
pci_check_pme_status(struct pci_dev * dev)2312 bool pci_check_pme_status(struct pci_dev *dev)
2313 {
2314 int pmcsr_pos;
2315 u16 pmcsr;
2316 bool ret = false;
2317
2318 if (!dev->pm_cap)
2319 return false;
2320
2321 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2322 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2323 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2324 return false;
2325
2326 /* Clear PME status. */
2327 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2328 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2329 /* Disable PME to avoid interrupt flood. */
2330 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2331 ret = true;
2332 }
2333
2334 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2335
2336 return ret;
2337 }
2338
2339 /**
2340 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2341 * @dev: Device to handle.
2342 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2343 *
2344 * Check if @dev has generated PME and queue a resume request for it in that
2345 * case.
2346 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2347 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2348 {
2349 if (pme_poll_reset && dev->pme_poll)
2350 dev->pme_poll = false;
2351
2352 if (pci_check_pme_status(dev)) {
2353 pci_wakeup_event(dev);
2354 pm_request_resume(&dev->dev);
2355 }
2356 return 0;
2357 }
2358
2359 /**
2360 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2361 * @bus: Top bus of the subtree to walk.
2362 */
pci_pme_wakeup_bus(struct pci_bus * bus)2363 void pci_pme_wakeup_bus(struct pci_bus *bus)
2364 {
2365 if (bus)
2366 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2367 }
2368
2369
2370 /**
2371 * pci_pme_capable - check the capability of PCI device to generate PME#
2372 * @dev: PCI device to handle.
2373 * @state: PCI state from which device will issue PME#.
2374 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2375 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2376 {
2377 if (!dev->pm_cap)
2378 return false;
2379
2380 return !!(dev->pme_support & (1 << state));
2381 }
2382 EXPORT_SYMBOL(pci_pme_capable);
2383
pci_pme_list_scan(struct work_struct * work)2384 static void pci_pme_list_scan(struct work_struct *work)
2385 {
2386 struct pci_pme_device *pme_dev, *n;
2387
2388 mutex_lock(&pci_pme_list_mutex);
2389 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2390 if (pme_dev->dev->pme_poll) {
2391 struct pci_dev *bridge;
2392
2393 bridge = pme_dev->dev->bus->self;
2394 /*
2395 * If bridge is in low power state, the
2396 * configuration space of subordinate devices
2397 * may be not accessible
2398 */
2399 if (bridge && bridge->current_state != PCI_D0)
2400 continue;
2401 /*
2402 * If the device is in D3cold it should not be
2403 * polled either.
2404 */
2405 if (pme_dev->dev->current_state == PCI_D3cold)
2406 continue;
2407
2408 pci_pme_wakeup(pme_dev->dev, NULL);
2409 } else {
2410 list_del(&pme_dev->list);
2411 kfree(pme_dev);
2412 }
2413 }
2414 if (!list_empty(&pci_pme_list))
2415 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2416 msecs_to_jiffies(PME_TIMEOUT));
2417 mutex_unlock(&pci_pme_list_mutex);
2418 }
2419
__pci_pme_active(struct pci_dev * dev,bool enable)2420 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2421 {
2422 u16 pmcsr;
2423
2424 if (!dev->pme_support)
2425 return;
2426
2427 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2428 /* Clear PME_Status by writing 1 to it and enable PME# */
2429 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2430 if (!enable)
2431 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2432
2433 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2434 }
2435
2436 /**
2437 * pci_pme_restore - Restore PME configuration after config space restore.
2438 * @dev: PCI device to update.
2439 */
pci_pme_restore(struct pci_dev * dev)2440 void pci_pme_restore(struct pci_dev *dev)
2441 {
2442 u16 pmcsr;
2443
2444 if (!dev->pme_support)
2445 return;
2446
2447 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2448 if (dev->wakeup_prepared) {
2449 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2450 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2451 } else {
2452 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2453 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2454 }
2455 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2456 }
2457
2458 /**
2459 * pci_pme_active - enable or disable PCI device's PME# function
2460 * @dev: PCI device to handle.
2461 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2462 *
2463 * The caller must verify that the device is capable of generating PME# before
2464 * calling this function with @enable equal to 'true'.
2465 */
pci_pme_active(struct pci_dev * dev,bool enable)2466 void pci_pme_active(struct pci_dev *dev, bool enable)
2467 {
2468 __pci_pme_active(dev, enable);
2469
2470 /*
2471 * PCI (as opposed to PCIe) PME requires that the device have
2472 * its PME# line hooked up correctly. Not all hardware vendors
2473 * do this, so the PME never gets delivered and the device
2474 * remains asleep. The easiest way around this is to
2475 * periodically walk the list of suspended devices and check
2476 * whether any have their PME flag set. The assumption is that
2477 * we'll wake up often enough anyway that this won't be a huge
2478 * hit, and the power savings from the devices will still be a
2479 * win.
2480 *
2481 * Although PCIe uses in-band PME message instead of PME# line
2482 * to report PME, PME does not work for some PCIe devices in
2483 * reality. For example, there are devices that set their PME
2484 * status bits, but don't really bother to send a PME message;
2485 * there are PCI Express Root Ports that don't bother to
2486 * trigger interrupts when they receive PME messages from the
2487 * devices below. So PME poll is used for PCIe devices too.
2488 */
2489
2490 if (dev->pme_poll) {
2491 struct pci_pme_device *pme_dev;
2492 if (enable) {
2493 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2494 GFP_KERNEL);
2495 if (!pme_dev) {
2496 pci_warn(dev, "can't enable PME#\n");
2497 return;
2498 }
2499 pme_dev->dev = dev;
2500 mutex_lock(&pci_pme_list_mutex);
2501 list_add(&pme_dev->list, &pci_pme_list);
2502 if (list_is_singular(&pci_pme_list))
2503 queue_delayed_work(system_freezable_wq,
2504 &pci_pme_work,
2505 msecs_to_jiffies(PME_TIMEOUT));
2506 mutex_unlock(&pci_pme_list_mutex);
2507 } else {
2508 mutex_lock(&pci_pme_list_mutex);
2509 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2510 if (pme_dev->dev == dev) {
2511 list_del(&pme_dev->list);
2512 kfree(pme_dev);
2513 break;
2514 }
2515 }
2516 mutex_unlock(&pci_pme_list_mutex);
2517 }
2518 }
2519
2520 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2521 }
2522 EXPORT_SYMBOL(pci_pme_active);
2523
2524 /**
2525 * __pci_enable_wake - enable PCI device as wakeup event source
2526 * @dev: PCI device affected
2527 * @state: PCI state from which device will issue wakeup events
2528 * @enable: True to enable event generation; false to disable
2529 *
2530 * This enables the device as a wakeup event source, or disables it.
2531 * When such events involves platform-specific hooks, those hooks are
2532 * called automatically by this routine.
2533 *
2534 * Devices with legacy power management (no standard PCI PM capabilities)
2535 * always require such platform hooks.
2536 *
2537 * RETURN VALUE:
2538 * 0 is returned on success
2539 * -EINVAL is returned if device is not supposed to wake up the system
2540 * Error code depending on the platform is returned if both the platform and
2541 * the native mechanism fail to enable the generation of wake-up events
2542 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2543 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2544 {
2545 int ret = 0;
2546
2547 /*
2548 * Bridges that are not power-manageable directly only signal
2549 * wakeup on behalf of subordinate devices which is set up
2550 * elsewhere, so skip them. However, bridges that are
2551 * power-manageable may signal wakeup for themselves (for example,
2552 * on a hotplug event) and they need to be covered here.
2553 */
2554 if (!pci_power_manageable(dev))
2555 return 0;
2556
2557 /* Don't do the same thing twice in a row for one device. */
2558 if (!!enable == !!dev->wakeup_prepared)
2559 return 0;
2560
2561 /*
2562 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2563 * Anderson we should be doing PME# wake enable followed by ACPI wake
2564 * enable. To disable wake-up we call the platform first, for symmetry.
2565 */
2566
2567 if (enable) {
2568 int error;
2569
2570 /*
2571 * Enable PME signaling if the device can signal PME from
2572 * D3cold regardless of whether or not it can signal PME from
2573 * the current target state, because that will allow it to
2574 * signal PME when the hierarchy above it goes into D3cold and
2575 * the device itself ends up in D3cold as a result of that.
2576 */
2577 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2578 pci_pme_active(dev, true);
2579 else
2580 ret = 1;
2581 error = platform_pci_set_wakeup(dev, true);
2582 if (ret)
2583 ret = error;
2584 if (!ret)
2585 dev->wakeup_prepared = true;
2586 } else {
2587 platform_pci_set_wakeup(dev, false);
2588 pci_pme_active(dev, false);
2589 dev->wakeup_prepared = false;
2590 }
2591
2592 return ret;
2593 }
2594
2595 /**
2596 * pci_enable_wake - change wakeup settings for a PCI device
2597 * @pci_dev: Target device
2598 * @state: PCI state from which device will issue wakeup events
2599 * @enable: Whether or not to enable event generation
2600 *
2601 * If @enable is set, check device_may_wakeup() for the device before calling
2602 * __pci_enable_wake() for it.
2603 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2604 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2605 {
2606 if (enable && !device_may_wakeup(&pci_dev->dev))
2607 return -EINVAL;
2608
2609 return __pci_enable_wake(pci_dev, state, enable);
2610 }
2611 EXPORT_SYMBOL(pci_enable_wake);
2612
2613 /**
2614 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2615 * @dev: PCI device to prepare
2616 * @enable: True to enable wake-up event generation; false to disable
2617 *
2618 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2619 * and this function allows them to set that up cleanly - pci_enable_wake()
2620 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2621 * ordering constraints.
2622 *
2623 * This function only returns error code if the device is not allowed to wake
2624 * up the system from sleep or it is not capable of generating PME# from both
2625 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2626 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2627 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2628 {
2629 return pci_pme_capable(dev, PCI_D3cold) ?
2630 pci_enable_wake(dev, PCI_D3cold, enable) :
2631 pci_enable_wake(dev, PCI_D3hot, enable);
2632 }
2633 EXPORT_SYMBOL(pci_wake_from_d3);
2634
2635 /**
2636 * pci_target_state - find an appropriate low power state for a given PCI dev
2637 * @dev: PCI device
2638 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2639 *
2640 * Use underlying platform code to find a supported low power state for @dev.
2641 * If the platform can't manage @dev, return the deepest state from which it
2642 * can generate wake events, based on any available PME info.
2643 */
pci_target_state(struct pci_dev * dev,bool wakeup)2644 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2645 {
2646 if (platform_pci_power_manageable(dev)) {
2647 /*
2648 * Call the platform to find the target state for the device.
2649 */
2650 pci_power_t state = platform_pci_choose_state(dev);
2651
2652 switch (state) {
2653 case PCI_POWER_ERROR:
2654 case PCI_UNKNOWN:
2655 return PCI_D3hot;
2656
2657 case PCI_D1:
2658 case PCI_D2:
2659 if (pci_no_d1d2(dev))
2660 return PCI_D3hot;
2661 }
2662
2663 return state;
2664 }
2665
2666 /*
2667 * If the device is in D3cold even though it's not power-manageable by
2668 * the platform, it may have been powered down by non-standard means.
2669 * Best to let it slumber.
2670 */
2671 if (dev->current_state == PCI_D3cold)
2672 return PCI_D3cold;
2673 else if (!dev->pm_cap)
2674 return PCI_D0;
2675
2676 if (wakeup && dev->pme_support) {
2677 pci_power_t state = PCI_D3hot;
2678
2679 /*
2680 * Find the deepest state from which the device can generate
2681 * PME#.
2682 */
2683 while (state && !(dev->pme_support & (1 << state)))
2684 state--;
2685
2686 if (state)
2687 return state;
2688 else if (dev->pme_support & 1)
2689 return PCI_D0;
2690 }
2691
2692 return PCI_D3hot;
2693 }
2694
2695 /**
2696 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2697 * into a sleep state
2698 * @dev: Device to handle.
2699 *
2700 * Choose the power state appropriate for the device depending on whether
2701 * it can wake up the system and/or is power manageable by the platform
2702 * (PCI_D3hot is the default) and put the device into that state.
2703 */
pci_prepare_to_sleep(struct pci_dev * dev)2704 int pci_prepare_to_sleep(struct pci_dev *dev)
2705 {
2706 bool wakeup = device_may_wakeup(&dev->dev);
2707 pci_power_t target_state = pci_target_state(dev, wakeup);
2708 int error;
2709
2710 if (target_state == PCI_POWER_ERROR)
2711 return -EIO;
2712
2713 /*
2714 * There are systems (for example, Intel mobile chips since Coffee
2715 * Lake) where the power drawn while suspended can be significantly
2716 * reduced by disabling PTM on PCIe root ports as this allows the
2717 * port to enter a lower-power PM state and the SoC to reach a
2718 * lower-power idle state as a whole.
2719 */
2720 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2721 pci_disable_ptm(dev);
2722
2723 pci_enable_wake(dev, target_state, wakeup);
2724
2725 error = pci_set_power_state(dev, target_state);
2726
2727 if (error) {
2728 pci_enable_wake(dev, target_state, false);
2729 pci_restore_ptm_state(dev);
2730 }
2731
2732 return error;
2733 }
2734 EXPORT_SYMBOL(pci_prepare_to_sleep);
2735
2736 /**
2737 * pci_back_from_sleep - turn PCI device on during system-wide transition
2738 * into working state
2739 * @dev: Device to handle.
2740 *
2741 * Disable device's system wake-up capability and put it into D0.
2742 */
pci_back_from_sleep(struct pci_dev * dev)2743 int pci_back_from_sleep(struct pci_dev *dev)
2744 {
2745 int ret = pci_set_power_state(dev, PCI_D0);
2746
2747 if (ret)
2748 return ret;
2749
2750 pci_enable_wake(dev, PCI_D0, false);
2751 return 0;
2752 }
2753 EXPORT_SYMBOL(pci_back_from_sleep);
2754
2755 /**
2756 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2757 * @dev: PCI device being suspended.
2758 *
2759 * Prepare @dev to generate wake-up events at run time and put it into a low
2760 * power state.
2761 */
pci_finish_runtime_suspend(struct pci_dev * dev)2762 int pci_finish_runtime_suspend(struct pci_dev *dev)
2763 {
2764 pci_power_t target_state;
2765 int error;
2766
2767 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2768 if (target_state == PCI_POWER_ERROR)
2769 return -EIO;
2770
2771 /*
2772 * There are systems (for example, Intel mobile chips since Coffee
2773 * Lake) where the power drawn while suspended can be significantly
2774 * reduced by disabling PTM on PCIe root ports as this allows the
2775 * port to enter a lower-power PM state and the SoC to reach a
2776 * lower-power idle state as a whole.
2777 */
2778 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2779 pci_disable_ptm(dev);
2780
2781 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2782
2783 error = pci_set_power_state(dev, target_state);
2784
2785 if (error) {
2786 pci_enable_wake(dev, target_state, false);
2787 pci_restore_ptm_state(dev);
2788 }
2789
2790 return error;
2791 }
2792
2793 /**
2794 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2795 * @dev: Device to check.
2796 *
2797 * Return true if the device itself is capable of generating wake-up events
2798 * (through the platform or using the native PCIe PME) or if the device supports
2799 * PME and one of its upstream bridges can generate wake-up events.
2800 */
pci_dev_run_wake(struct pci_dev * dev)2801 bool pci_dev_run_wake(struct pci_dev *dev)
2802 {
2803 struct pci_bus *bus = dev->bus;
2804
2805 if (!dev->pme_support)
2806 return false;
2807
2808 /* PME-capable in principle, but not from the target power state */
2809 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2810 return false;
2811
2812 if (device_can_wakeup(&dev->dev))
2813 return true;
2814
2815 while (bus->parent) {
2816 struct pci_dev *bridge = bus->self;
2817
2818 if (device_can_wakeup(&bridge->dev))
2819 return true;
2820
2821 bus = bus->parent;
2822 }
2823
2824 /* We have reached the root bus. */
2825 if (bus->bridge)
2826 return device_can_wakeup(bus->bridge);
2827
2828 return false;
2829 }
2830 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2831
2832 /**
2833 * pci_dev_need_resume - Check if it is necessary to resume the device.
2834 * @pci_dev: Device to check.
2835 *
2836 * Return 'true' if the device is not runtime-suspended or it has to be
2837 * reconfigured due to wakeup settings difference between system and runtime
2838 * suspend, or the current power state of it is not suitable for the upcoming
2839 * (system-wide) transition.
2840 */
pci_dev_need_resume(struct pci_dev * pci_dev)2841 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2842 {
2843 struct device *dev = &pci_dev->dev;
2844 pci_power_t target_state;
2845
2846 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2847 return true;
2848
2849 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2850
2851 /*
2852 * If the earlier platform check has not triggered, D3cold is just power
2853 * removal on top of D3hot, so no need to resume the device in that
2854 * case.
2855 */
2856 return target_state != pci_dev->current_state &&
2857 target_state != PCI_D3cold &&
2858 pci_dev->current_state != PCI_D3hot;
2859 }
2860
2861 /**
2862 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2863 * @pci_dev: Device to check.
2864 *
2865 * If the device is suspended and it is not configured for system wakeup,
2866 * disable PME for it to prevent it from waking up the system unnecessarily.
2867 *
2868 * Note that if the device's power state is D3cold and the platform check in
2869 * pci_dev_need_resume() has not triggered, the device's configuration need not
2870 * be changed.
2871 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2872 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2873 {
2874 struct device *dev = &pci_dev->dev;
2875
2876 spin_lock_irq(&dev->power.lock);
2877
2878 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2879 pci_dev->current_state < PCI_D3cold)
2880 __pci_pme_active(pci_dev, false);
2881
2882 spin_unlock_irq(&dev->power.lock);
2883 }
2884
2885 /**
2886 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2887 * @pci_dev: Device to handle.
2888 *
2889 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2890 * it might have been disabled during the prepare phase of system suspend if
2891 * the device was not configured for system wakeup.
2892 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2893 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2894 {
2895 struct device *dev = &pci_dev->dev;
2896
2897 if (!pci_dev_run_wake(pci_dev))
2898 return;
2899
2900 spin_lock_irq(&dev->power.lock);
2901
2902 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2903 __pci_pme_active(pci_dev, true);
2904
2905 spin_unlock_irq(&dev->power.lock);
2906 }
2907
2908 /**
2909 * pci_choose_state - Choose the power state of a PCI device.
2910 * @dev: Target PCI device.
2911 * @state: Target state for the whole system.
2912 *
2913 * Returns PCI power state suitable for @dev and @state.
2914 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2915 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2916 {
2917 if (state.event == PM_EVENT_ON)
2918 return PCI_D0;
2919
2920 return pci_target_state(dev, false);
2921 }
2922 EXPORT_SYMBOL(pci_choose_state);
2923
pci_config_pm_runtime_get(struct pci_dev * pdev)2924 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2925 {
2926 struct device *dev = &pdev->dev;
2927 struct device *parent = dev->parent;
2928
2929 if (parent)
2930 pm_runtime_get_sync(parent);
2931 pm_runtime_get_noresume(dev);
2932 /*
2933 * pdev->current_state is set to PCI_D3cold during suspending,
2934 * so wait until suspending completes
2935 */
2936 pm_runtime_barrier(dev);
2937 /*
2938 * Only need to resume devices in D3cold, because config
2939 * registers are still accessible for devices suspended but
2940 * not in D3cold.
2941 */
2942 if (pdev->current_state == PCI_D3cold)
2943 pm_runtime_resume(dev);
2944 }
2945
pci_config_pm_runtime_put(struct pci_dev * pdev)2946 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2947 {
2948 struct device *dev = &pdev->dev;
2949 struct device *parent = dev->parent;
2950
2951 pm_runtime_put(dev);
2952 if (parent)
2953 pm_runtime_put_sync(parent);
2954 }
2955
2956 static const struct dmi_system_id bridge_d3_blacklist[] = {
2957 #ifdef CONFIG_X86
2958 {
2959 /*
2960 * Gigabyte X299 root port is not marked as hotplug capable
2961 * which allows Linux to power manage it. However, this
2962 * confuses the BIOS SMI handler so don't power manage root
2963 * ports on that system.
2964 */
2965 .ident = "X299 DESIGNARE EX-CF",
2966 .matches = {
2967 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2968 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2969 },
2970 },
2971 {
2972 /*
2973 * Downstream device is not accessible after putting a root port
2974 * into D3cold and back into D0 on Elo i2.
2975 */
2976 .ident = "Elo i2",
2977 .matches = {
2978 DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
2979 DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
2980 DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
2981 },
2982 },
2983 #endif
2984 { }
2985 };
2986
2987 /**
2988 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2989 * @bridge: Bridge to check
2990 *
2991 * This function checks if it is possible to move the bridge to D3.
2992 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2993 */
pci_bridge_d3_possible(struct pci_dev * bridge)2994 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2995 {
2996 if (!pci_is_pcie(bridge))
2997 return false;
2998
2999 switch (pci_pcie_type(bridge)) {
3000 case PCI_EXP_TYPE_ROOT_PORT:
3001 case PCI_EXP_TYPE_UPSTREAM:
3002 case PCI_EXP_TYPE_DOWNSTREAM:
3003 if (pci_bridge_d3_disable)
3004 return false;
3005
3006 /*
3007 * Hotplug ports handled by firmware in System Management Mode
3008 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3009 */
3010 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3011 return false;
3012
3013 if (pci_bridge_d3_force)
3014 return true;
3015
3016 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3017 if (bridge->is_thunderbolt)
3018 return true;
3019
3020 /* Platform might know better if the bridge supports D3 */
3021 if (platform_pci_bridge_d3(bridge))
3022 return true;
3023
3024 /*
3025 * Hotplug ports handled natively by the OS were not validated
3026 * by vendors for runtime D3 at least until 2018 because there
3027 * was no OS support.
3028 */
3029 if (bridge->is_hotplug_bridge)
3030 return false;
3031
3032 if (dmi_check_system(bridge_d3_blacklist))
3033 return false;
3034
3035 /*
3036 * It should be safe to put PCIe ports from 2015 or newer
3037 * to D3.
3038 */
3039 if (dmi_get_bios_year() >= 2015)
3040 return true;
3041 break;
3042 }
3043
3044 return false;
3045 }
3046
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3047 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3048 {
3049 bool *d3cold_ok = data;
3050
3051 if (/* The device needs to be allowed to go D3cold ... */
3052 dev->no_d3cold || !dev->d3cold_allowed ||
3053
3054 /* ... and if it is wakeup capable to do so from D3cold. */
3055 (device_may_wakeup(&dev->dev) &&
3056 !pci_pme_capable(dev, PCI_D3cold)) ||
3057
3058 /* If it is a bridge it must be allowed to go to D3. */
3059 !pci_power_manageable(dev))
3060
3061 *d3cold_ok = false;
3062
3063 return !*d3cold_ok;
3064 }
3065
3066 /*
3067 * pci_bridge_d3_update - Update bridge D3 capabilities
3068 * @dev: PCI device which is changed
3069 *
3070 * Update upstream bridge PM capabilities accordingly depending on if the
3071 * device PM configuration was changed or the device is being removed. The
3072 * change is also propagated upstream.
3073 */
pci_bridge_d3_update(struct pci_dev * dev)3074 void pci_bridge_d3_update(struct pci_dev *dev)
3075 {
3076 bool remove = !device_is_registered(&dev->dev);
3077 struct pci_dev *bridge;
3078 bool d3cold_ok = true;
3079
3080 bridge = pci_upstream_bridge(dev);
3081 if (!bridge || !pci_bridge_d3_possible(bridge))
3082 return;
3083
3084 /*
3085 * If D3 is currently allowed for the bridge, removing one of its
3086 * children won't change that.
3087 */
3088 if (remove && bridge->bridge_d3)
3089 return;
3090
3091 /*
3092 * If D3 is currently allowed for the bridge and a child is added or
3093 * changed, disallowance of D3 can only be caused by that child, so
3094 * we only need to check that single device, not any of its siblings.
3095 *
3096 * If D3 is currently not allowed for the bridge, checking the device
3097 * first may allow us to skip checking its siblings.
3098 */
3099 if (!remove)
3100 pci_dev_check_d3cold(dev, &d3cold_ok);
3101
3102 /*
3103 * If D3 is currently not allowed for the bridge, this may be caused
3104 * either by the device being changed/removed or any of its siblings,
3105 * so we need to go through all children to find out if one of them
3106 * continues to block D3.
3107 */
3108 if (d3cold_ok && !bridge->bridge_d3)
3109 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3110 &d3cold_ok);
3111
3112 if (bridge->bridge_d3 != d3cold_ok) {
3113 bridge->bridge_d3 = d3cold_ok;
3114 /* Propagate change to upstream bridges */
3115 pci_bridge_d3_update(bridge);
3116 }
3117 }
3118
3119 /**
3120 * pci_d3cold_enable - Enable D3cold for device
3121 * @dev: PCI device to handle
3122 *
3123 * This function can be used in drivers to enable D3cold from the device
3124 * they handle. It also updates upstream PCI bridge PM capabilities
3125 * accordingly.
3126 */
pci_d3cold_enable(struct pci_dev * dev)3127 void pci_d3cold_enable(struct pci_dev *dev)
3128 {
3129 if (dev->no_d3cold) {
3130 dev->no_d3cold = false;
3131 pci_bridge_d3_update(dev);
3132 }
3133 }
3134 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3135
3136 /**
3137 * pci_d3cold_disable - Disable D3cold for device
3138 * @dev: PCI device to handle
3139 *
3140 * This function can be used in drivers to disable D3cold from the device
3141 * they handle. It also updates upstream PCI bridge PM capabilities
3142 * accordingly.
3143 */
pci_d3cold_disable(struct pci_dev * dev)3144 void pci_d3cold_disable(struct pci_dev *dev)
3145 {
3146 if (!dev->no_d3cold) {
3147 dev->no_d3cold = true;
3148 pci_bridge_d3_update(dev);
3149 }
3150 }
3151 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3152
3153 /**
3154 * pci_pm_init - Initialize PM functions of given PCI device
3155 * @dev: PCI device to handle.
3156 */
pci_pm_init(struct pci_dev * dev)3157 void pci_pm_init(struct pci_dev *dev)
3158 {
3159 int pm;
3160 u16 status;
3161 u16 pmc;
3162
3163 pm_runtime_forbid(&dev->dev);
3164 pm_runtime_set_active(&dev->dev);
3165 pm_runtime_enable(&dev->dev);
3166 device_enable_async_suspend(&dev->dev);
3167 dev->wakeup_prepared = false;
3168
3169 dev->pm_cap = 0;
3170 dev->pme_support = 0;
3171
3172 /* find PCI PM capability in list */
3173 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3174 if (!pm)
3175 return;
3176 /* Check device's ability to generate PME# */
3177 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3178
3179 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3180 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3181 pmc & PCI_PM_CAP_VER_MASK);
3182 return;
3183 }
3184
3185 dev->pm_cap = pm;
3186 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3187 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3188 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3189 dev->d3cold_allowed = true;
3190
3191 dev->d1_support = false;
3192 dev->d2_support = false;
3193 if (!pci_no_d1d2(dev)) {
3194 if (pmc & PCI_PM_CAP_D1)
3195 dev->d1_support = true;
3196 if (pmc & PCI_PM_CAP_D2)
3197 dev->d2_support = true;
3198
3199 if (dev->d1_support || dev->d2_support)
3200 pci_info(dev, "supports%s%s\n",
3201 dev->d1_support ? " D1" : "",
3202 dev->d2_support ? " D2" : "");
3203 }
3204
3205 pmc &= PCI_PM_CAP_PME_MASK;
3206 if (pmc) {
3207 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3208 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3209 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3210 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3211 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3212 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3213 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3214 dev->pme_poll = true;
3215 /*
3216 * Make device's PM flags reflect the wake-up capability, but
3217 * let the user space enable it to wake up the system as needed.
3218 */
3219 device_set_wakeup_capable(&dev->dev, true);
3220 /* Disable the PME# generation functionality */
3221 pci_pme_active(dev, false);
3222 }
3223
3224 pci_read_config_word(dev, PCI_STATUS, &status);
3225 if (status & PCI_STATUS_IMM_READY)
3226 dev->imm_ready = 1;
3227 }
3228
pci_ea_flags(struct pci_dev * dev,u8 prop)3229 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3230 {
3231 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3232
3233 switch (prop) {
3234 case PCI_EA_P_MEM:
3235 case PCI_EA_P_VF_MEM:
3236 flags |= IORESOURCE_MEM;
3237 break;
3238 case PCI_EA_P_MEM_PREFETCH:
3239 case PCI_EA_P_VF_MEM_PREFETCH:
3240 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3241 break;
3242 case PCI_EA_P_IO:
3243 flags |= IORESOURCE_IO;
3244 break;
3245 default:
3246 return 0;
3247 }
3248
3249 return flags;
3250 }
3251
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3252 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3253 u8 prop)
3254 {
3255 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3256 return &dev->resource[bei];
3257 #ifdef CONFIG_PCI_IOV
3258 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3259 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3260 return &dev->resource[PCI_IOV_RESOURCES +
3261 bei - PCI_EA_BEI_VF_BAR0];
3262 #endif
3263 else if (bei == PCI_EA_BEI_ROM)
3264 return &dev->resource[PCI_ROM_RESOURCE];
3265 else
3266 return NULL;
3267 }
3268
3269 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3270 static int pci_ea_read(struct pci_dev *dev, int offset)
3271 {
3272 struct resource *res;
3273 int ent_size, ent_offset = offset;
3274 resource_size_t start, end;
3275 unsigned long flags;
3276 u32 dw0, bei, base, max_offset;
3277 u8 prop;
3278 bool support_64 = (sizeof(resource_size_t) >= 8);
3279
3280 pci_read_config_dword(dev, ent_offset, &dw0);
3281 ent_offset += 4;
3282
3283 /* Entry size field indicates DWORDs after 1st */
3284 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3285
3286 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3287 goto out;
3288
3289 bei = (dw0 & PCI_EA_BEI) >> 4;
3290 prop = (dw0 & PCI_EA_PP) >> 8;
3291
3292 /*
3293 * If the Property is in the reserved range, try the Secondary
3294 * Property instead.
3295 */
3296 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3297 prop = (dw0 & PCI_EA_SP) >> 16;
3298 if (prop > PCI_EA_P_BRIDGE_IO)
3299 goto out;
3300
3301 res = pci_ea_get_resource(dev, bei, prop);
3302 if (!res) {
3303 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3304 goto out;
3305 }
3306
3307 flags = pci_ea_flags(dev, prop);
3308 if (!flags) {
3309 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3310 goto out;
3311 }
3312
3313 /* Read Base */
3314 pci_read_config_dword(dev, ent_offset, &base);
3315 start = (base & PCI_EA_FIELD_MASK);
3316 ent_offset += 4;
3317
3318 /* Read MaxOffset */
3319 pci_read_config_dword(dev, ent_offset, &max_offset);
3320 ent_offset += 4;
3321
3322 /* Read Base MSBs (if 64-bit entry) */
3323 if (base & PCI_EA_IS_64) {
3324 u32 base_upper;
3325
3326 pci_read_config_dword(dev, ent_offset, &base_upper);
3327 ent_offset += 4;
3328
3329 flags |= IORESOURCE_MEM_64;
3330
3331 /* entry starts above 32-bit boundary, can't use */
3332 if (!support_64 && base_upper)
3333 goto out;
3334
3335 if (support_64)
3336 start |= ((u64)base_upper << 32);
3337 }
3338
3339 end = start + (max_offset | 0x03);
3340
3341 /* Read MaxOffset MSBs (if 64-bit entry) */
3342 if (max_offset & PCI_EA_IS_64) {
3343 u32 max_offset_upper;
3344
3345 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3346 ent_offset += 4;
3347
3348 flags |= IORESOURCE_MEM_64;
3349
3350 /* entry too big, can't use */
3351 if (!support_64 && max_offset_upper)
3352 goto out;
3353
3354 if (support_64)
3355 end += ((u64)max_offset_upper << 32);
3356 }
3357
3358 if (end < start) {
3359 pci_err(dev, "EA Entry crosses address boundary\n");
3360 goto out;
3361 }
3362
3363 if (ent_size != ent_offset - offset) {
3364 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3365 ent_size, ent_offset - offset);
3366 goto out;
3367 }
3368
3369 res->name = pci_name(dev);
3370 res->start = start;
3371 res->end = end;
3372 res->flags = flags;
3373
3374 if (bei <= PCI_EA_BEI_BAR5)
3375 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3376 bei, res, prop);
3377 else if (bei == PCI_EA_BEI_ROM)
3378 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3379 res, prop);
3380 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3381 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3382 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3383 else
3384 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3385 bei, res, prop);
3386
3387 out:
3388 return offset + ent_size;
3389 }
3390
3391 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3392 void pci_ea_init(struct pci_dev *dev)
3393 {
3394 int ea;
3395 u8 num_ent;
3396 int offset;
3397 int i;
3398
3399 /* find PCI EA capability in list */
3400 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3401 if (!ea)
3402 return;
3403
3404 /* determine the number of entries */
3405 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3406 &num_ent);
3407 num_ent &= PCI_EA_NUM_ENT_MASK;
3408
3409 offset = ea + PCI_EA_FIRST_ENT;
3410
3411 /* Skip DWORD 2 for type 1 functions */
3412 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3413 offset += 4;
3414
3415 /* parse each EA entry */
3416 for (i = 0; i < num_ent; ++i)
3417 offset = pci_ea_read(dev, offset);
3418 }
3419
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3420 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3421 struct pci_cap_saved_state *new_cap)
3422 {
3423 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3424 }
3425
3426 /**
3427 * _pci_add_cap_save_buffer - allocate buffer for saving given
3428 * capability registers
3429 * @dev: the PCI device
3430 * @cap: the capability to allocate the buffer for
3431 * @extended: Standard or Extended capability ID
3432 * @size: requested size of the buffer
3433 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3434 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3435 bool extended, unsigned int size)
3436 {
3437 int pos;
3438 struct pci_cap_saved_state *save_state;
3439
3440 if (extended)
3441 pos = pci_find_ext_capability(dev, cap);
3442 else
3443 pos = pci_find_capability(dev, cap);
3444
3445 if (!pos)
3446 return 0;
3447
3448 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3449 if (!save_state)
3450 return -ENOMEM;
3451
3452 save_state->cap.cap_nr = cap;
3453 save_state->cap.cap_extended = extended;
3454 save_state->cap.size = size;
3455 pci_add_saved_cap(dev, save_state);
3456
3457 return 0;
3458 }
3459
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3460 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3461 {
3462 return _pci_add_cap_save_buffer(dev, cap, false, size);
3463 }
3464
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3465 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3466 {
3467 return _pci_add_cap_save_buffer(dev, cap, true, size);
3468 }
3469
3470 /**
3471 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3472 * @dev: the PCI device
3473 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3474 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3475 {
3476 int error;
3477
3478 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3479 PCI_EXP_SAVE_REGS * sizeof(u16));
3480 if (error)
3481 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3482
3483 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3484 if (error)
3485 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3486
3487 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3488 2 * sizeof(u16));
3489 if (error)
3490 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3491
3492 pci_allocate_vc_save_buffers(dev);
3493 }
3494
pci_free_cap_save_buffers(struct pci_dev * dev)3495 void pci_free_cap_save_buffers(struct pci_dev *dev)
3496 {
3497 struct pci_cap_saved_state *tmp;
3498 struct hlist_node *n;
3499
3500 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3501 kfree(tmp);
3502 }
3503
3504 /**
3505 * pci_configure_ari - enable or disable ARI forwarding
3506 * @dev: the PCI device
3507 *
3508 * If @dev and its upstream bridge both support ARI, enable ARI in the
3509 * bridge. Otherwise, disable ARI in the bridge.
3510 */
pci_configure_ari(struct pci_dev * dev)3511 void pci_configure_ari(struct pci_dev *dev)
3512 {
3513 u32 cap;
3514 struct pci_dev *bridge;
3515
3516 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3517 return;
3518
3519 bridge = dev->bus->self;
3520 if (!bridge)
3521 return;
3522
3523 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3524 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3525 return;
3526
3527 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3528 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3529 PCI_EXP_DEVCTL2_ARI);
3530 bridge->ari_enabled = 1;
3531 } else {
3532 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3533 PCI_EXP_DEVCTL2_ARI);
3534 bridge->ari_enabled = 0;
3535 }
3536 }
3537
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3538 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3539 {
3540 int pos;
3541 u16 cap, ctrl;
3542
3543 pos = pdev->acs_cap;
3544 if (!pos)
3545 return false;
3546
3547 /*
3548 * Except for egress control, capabilities are either required
3549 * or only required if controllable. Features missing from the
3550 * capability field can therefore be assumed as hard-wired enabled.
3551 */
3552 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3553 acs_flags &= (cap | PCI_ACS_EC);
3554
3555 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3556 return (ctrl & acs_flags) == acs_flags;
3557 }
3558
3559 /**
3560 * pci_acs_enabled - test ACS against required flags for a given device
3561 * @pdev: device to test
3562 * @acs_flags: required PCI ACS flags
3563 *
3564 * Return true if the device supports the provided flags. Automatically
3565 * filters out flags that are not implemented on multifunction devices.
3566 *
3567 * Note that this interface checks the effective ACS capabilities of the
3568 * device rather than the actual capabilities. For instance, most single
3569 * function endpoints are not required to support ACS because they have no
3570 * opportunity for peer-to-peer access. We therefore return 'true'
3571 * regardless of whether the device exposes an ACS capability. This makes
3572 * it much easier for callers of this function to ignore the actual type
3573 * or topology of the device when testing ACS support.
3574 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3575 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3576 {
3577 int ret;
3578
3579 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3580 if (ret >= 0)
3581 return ret > 0;
3582
3583 /*
3584 * Conventional PCI and PCI-X devices never support ACS, either
3585 * effectively or actually. The shared bus topology implies that
3586 * any device on the bus can receive or snoop DMA.
3587 */
3588 if (!pci_is_pcie(pdev))
3589 return false;
3590
3591 switch (pci_pcie_type(pdev)) {
3592 /*
3593 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3594 * but since their primary interface is PCI/X, we conservatively
3595 * handle them as we would a non-PCIe device.
3596 */
3597 case PCI_EXP_TYPE_PCIE_BRIDGE:
3598 /*
3599 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3600 * applicable... must never implement an ACS Extended Capability...".
3601 * This seems arbitrary, but we take a conservative interpretation
3602 * of this statement.
3603 */
3604 case PCI_EXP_TYPE_PCI_BRIDGE:
3605 case PCI_EXP_TYPE_RC_EC:
3606 return false;
3607 /*
3608 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3609 * implement ACS in order to indicate their peer-to-peer capabilities,
3610 * regardless of whether they are single- or multi-function devices.
3611 */
3612 case PCI_EXP_TYPE_DOWNSTREAM:
3613 case PCI_EXP_TYPE_ROOT_PORT:
3614 return pci_acs_flags_enabled(pdev, acs_flags);
3615 /*
3616 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3617 * implemented by the remaining PCIe types to indicate peer-to-peer
3618 * capabilities, but only when they are part of a multifunction
3619 * device. The footnote for section 6.12 indicates the specific
3620 * PCIe types included here.
3621 */
3622 case PCI_EXP_TYPE_ENDPOINT:
3623 case PCI_EXP_TYPE_UPSTREAM:
3624 case PCI_EXP_TYPE_LEG_END:
3625 case PCI_EXP_TYPE_RC_END:
3626 if (!pdev->multifunction)
3627 break;
3628
3629 return pci_acs_flags_enabled(pdev, acs_flags);
3630 }
3631
3632 /*
3633 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3634 * to single function devices with the exception of downstream ports.
3635 */
3636 return true;
3637 }
3638
3639 /**
3640 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3641 * @start: starting downstream device
3642 * @end: ending upstream device or NULL to search to the root bus
3643 * @acs_flags: required flags
3644 *
3645 * Walk up a device tree from start to end testing PCI ACS support. If
3646 * any step along the way does not support the required flags, return false.
3647 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3648 bool pci_acs_path_enabled(struct pci_dev *start,
3649 struct pci_dev *end, u16 acs_flags)
3650 {
3651 struct pci_dev *pdev, *parent = start;
3652
3653 do {
3654 pdev = parent;
3655
3656 if (!pci_acs_enabled(pdev, acs_flags))
3657 return false;
3658
3659 if (pci_is_root_bus(pdev->bus))
3660 return (end == NULL);
3661
3662 parent = pdev->bus->self;
3663 } while (pdev != end);
3664
3665 return true;
3666 }
3667
3668 /**
3669 * pci_acs_init - Initialize ACS if hardware supports it
3670 * @dev: the PCI device
3671 */
pci_acs_init(struct pci_dev * dev)3672 void pci_acs_init(struct pci_dev *dev)
3673 {
3674 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3675
3676 /*
3677 * Attempt to enable ACS regardless of capability because some Root
3678 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3679 * the standard ACS capability but still support ACS via those
3680 * quirks.
3681 */
3682 pci_enable_acs(dev);
3683 }
3684
3685 /**
3686 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3687 * @pdev: PCI device
3688 * @bar: BAR to find
3689 *
3690 * Helper to find the position of the ctrl register for a BAR.
3691 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3692 * Returns -ENOENT if no ctrl register for the BAR could be found.
3693 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3694 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3695 {
3696 unsigned int pos, nbars, i;
3697 u32 ctrl;
3698
3699 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3700 if (!pos)
3701 return -ENOTSUPP;
3702
3703 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3704 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3705 PCI_REBAR_CTRL_NBAR_SHIFT;
3706
3707 for (i = 0; i < nbars; i++, pos += 8) {
3708 int bar_idx;
3709
3710 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3711 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3712 if (bar_idx == bar)
3713 return pos;
3714 }
3715
3716 return -ENOENT;
3717 }
3718
3719 /**
3720 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3721 * @pdev: PCI device
3722 * @bar: BAR to query
3723 *
3724 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3725 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3726 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3727 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3728 {
3729 int pos;
3730 u32 cap;
3731
3732 pos = pci_rebar_find_pos(pdev, bar);
3733 if (pos < 0)
3734 return 0;
3735
3736 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3737 cap &= PCI_REBAR_CAP_SIZES;
3738
3739 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3740 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3741 bar == 0 && cap == 0x7000)
3742 cap = 0x3f000;
3743
3744 return cap >> 4;
3745 }
3746 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3747
3748 /**
3749 * pci_rebar_get_current_size - get the current size of a BAR
3750 * @pdev: PCI device
3751 * @bar: BAR to set size to
3752 *
3753 * Read the size of a BAR from the resizable BAR config.
3754 * Returns size if found or negative error code.
3755 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3756 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3757 {
3758 int pos;
3759 u32 ctrl;
3760
3761 pos = pci_rebar_find_pos(pdev, bar);
3762 if (pos < 0)
3763 return pos;
3764
3765 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3766 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3767 }
3768
3769 /**
3770 * pci_rebar_set_size - set a new size for a BAR
3771 * @pdev: PCI device
3772 * @bar: BAR to set size to
3773 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3774 *
3775 * Set the new size of a BAR as defined in the spec.
3776 * Returns zero if resizing was successful, error code otherwise.
3777 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3778 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3779 {
3780 int pos;
3781 u32 ctrl;
3782
3783 pos = pci_rebar_find_pos(pdev, bar);
3784 if (pos < 0)
3785 return pos;
3786
3787 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3788 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3789 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3790 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3791 return 0;
3792 }
3793
3794 /**
3795 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3796 * @dev: the PCI device
3797 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3798 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3799 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3800 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3801 *
3802 * Return 0 if all upstream bridges support AtomicOp routing, egress
3803 * blocking is disabled on all upstream ports, and the root port supports
3804 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3805 * AtomicOp completion), or negative otherwise.
3806 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3807 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3808 {
3809 struct pci_bus *bus = dev->bus;
3810 struct pci_dev *bridge;
3811 u32 cap, ctl2;
3812
3813 /*
3814 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3815 * in Device Control 2 is reserved in VFs and the PF value applies
3816 * to all associated VFs.
3817 */
3818 if (dev->is_virtfn)
3819 return -EINVAL;
3820
3821 if (!pci_is_pcie(dev))
3822 return -EINVAL;
3823
3824 /*
3825 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3826 * AtomicOp requesters. For now, we only support endpoints as
3827 * requesters and root ports as completers. No endpoints as
3828 * completers, and no peer-to-peer.
3829 */
3830
3831 switch (pci_pcie_type(dev)) {
3832 case PCI_EXP_TYPE_ENDPOINT:
3833 case PCI_EXP_TYPE_LEG_END:
3834 case PCI_EXP_TYPE_RC_END:
3835 break;
3836 default:
3837 return -EINVAL;
3838 }
3839
3840 while (bus->parent) {
3841 bridge = bus->self;
3842
3843 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3844
3845 switch (pci_pcie_type(bridge)) {
3846 /* Ensure switch ports support AtomicOp routing */
3847 case PCI_EXP_TYPE_UPSTREAM:
3848 case PCI_EXP_TYPE_DOWNSTREAM:
3849 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3850 return -EINVAL;
3851 break;
3852
3853 /* Ensure root port supports all the sizes we care about */
3854 case PCI_EXP_TYPE_ROOT_PORT:
3855 if ((cap & cap_mask) != cap_mask)
3856 return -EINVAL;
3857 break;
3858 }
3859
3860 /* Ensure upstream ports don't block AtomicOps on egress */
3861 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3862 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3863 &ctl2);
3864 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3865 return -EINVAL;
3866 }
3867
3868 bus = bus->parent;
3869 }
3870
3871 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3872 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3873 return 0;
3874 }
3875 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3876
3877 /**
3878 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3879 * @dev: the PCI device
3880 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3881 *
3882 * Perform INTx swizzling for a device behind one level of bridge. This is
3883 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3884 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3885 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3886 * the PCI Express Base Specification, Revision 2.1)
3887 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3888 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3889 {
3890 int slot;
3891
3892 if (pci_ari_enabled(dev->bus))
3893 slot = 0;
3894 else
3895 slot = PCI_SLOT(dev->devfn);
3896
3897 return (((pin - 1) + slot) % 4) + 1;
3898 }
3899
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3900 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3901 {
3902 u8 pin;
3903
3904 pin = dev->pin;
3905 if (!pin)
3906 return -1;
3907
3908 while (!pci_is_root_bus(dev->bus)) {
3909 pin = pci_swizzle_interrupt_pin(dev, pin);
3910 dev = dev->bus->self;
3911 }
3912 *bridge = dev;
3913 return pin;
3914 }
3915
3916 /**
3917 * pci_common_swizzle - swizzle INTx all the way to root bridge
3918 * @dev: the PCI device
3919 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3920 *
3921 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3922 * bridges all the way up to a PCI root bus.
3923 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3924 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3925 {
3926 u8 pin = *pinp;
3927
3928 while (!pci_is_root_bus(dev->bus)) {
3929 pin = pci_swizzle_interrupt_pin(dev, pin);
3930 dev = dev->bus->self;
3931 }
3932 *pinp = pin;
3933 return PCI_SLOT(dev->devfn);
3934 }
3935 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3936
3937 /**
3938 * pci_release_region - Release a PCI bar
3939 * @pdev: PCI device whose resources were previously reserved by
3940 * pci_request_region()
3941 * @bar: BAR to release
3942 *
3943 * Releases the PCI I/O and memory resources previously reserved by a
3944 * successful call to pci_request_region(). Call this function only
3945 * after all use of the PCI regions has ceased.
3946 */
pci_release_region(struct pci_dev * pdev,int bar)3947 void pci_release_region(struct pci_dev *pdev, int bar)
3948 {
3949 struct pci_devres *dr;
3950
3951 if (pci_resource_len(pdev, bar) == 0)
3952 return;
3953 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3954 release_region(pci_resource_start(pdev, bar),
3955 pci_resource_len(pdev, bar));
3956 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3957 release_mem_region(pci_resource_start(pdev, bar),
3958 pci_resource_len(pdev, bar));
3959
3960 dr = find_pci_dr(pdev);
3961 if (dr)
3962 dr->region_mask &= ~(1 << bar);
3963 }
3964 EXPORT_SYMBOL(pci_release_region);
3965
3966 /**
3967 * __pci_request_region - Reserved PCI I/O and memory resource
3968 * @pdev: PCI device whose resources are to be reserved
3969 * @bar: BAR to be reserved
3970 * @res_name: Name to be associated with resource.
3971 * @exclusive: whether the region access is exclusive or not
3972 *
3973 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3974 * being reserved by owner @res_name. Do not access any
3975 * address inside the PCI regions unless this call returns
3976 * successfully.
3977 *
3978 * If @exclusive is set, then the region is marked so that userspace
3979 * is explicitly not allowed to map the resource via /dev/mem or
3980 * sysfs MMIO access.
3981 *
3982 * Returns 0 on success, or %EBUSY on error. A warning
3983 * message is also printed on failure.
3984 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3985 static int __pci_request_region(struct pci_dev *pdev, int bar,
3986 const char *res_name, int exclusive)
3987 {
3988 struct pci_devres *dr;
3989
3990 if (pci_resource_len(pdev, bar) == 0)
3991 return 0;
3992
3993 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3994 if (!request_region(pci_resource_start(pdev, bar),
3995 pci_resource_len(pdev, bar), res_name))
3996 goto err_out;
3997 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3998 if (!__request_mem_region(pci_resource_start(pdev, bar),
3999 pci_resource_len(pdev, bar), res_name,
4000 exclusive))
4001 goto err_out;
4002 }
4003
4004 dr = find_pci_dr(pdev);
4005 if (dr)
4006 dr->region_mask |= 1 << bar;
4007
4008 return 0;
4009
4010 err_out:
4011 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4012 &pdev->resource[bar]);
4013 return -EBUSY;
4014 }
4015
4016 /**
4017 * pci_request_region - Reserve PCI I/O and memory resource
4018 * @pdev: PCI device whose resources are to be reserved
4019 * @bar: BAR to be reserved
4020 * @res_name: Name to be associated with resource
4021 *
4022 * Mark the PCI region associated with PCI device @pdev BAR @bar as
4023 * being reserved by owner @res_name. Do not access any
4024 * address inside the PCI regions unless this call returns
4025 * successfully.
4026 *
4027 * Returns 0 on success, or %EBUSY on error. A warning
4028 * message is also printed on failure.
4029 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4030 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4031 {
4032 return __pci_request_region(pdev, bar, res_name, 0);
4033 }
4034 EXPORT_SYMBOL(pci_request_region);
4035
4036 /**
4037 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4038 * @pdev: PCI device whose resources were previously reserved
4039 * @bars: Bitmask of BARs to be released
4040 *
4041 * Release selected PCI I/O and memory resources previously reserved.
4042 * Call this function only after all use of the PCI regions has ceased.
4043 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4044 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4045 {
4046 int i;
4047
4048 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4049 if (bars & (1 << i))
4050 pci_release_region(pdev, i);
4051 }
4052 EXPORT_SYMBOL(pci_release_selected_regions);
4053
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4054 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4055 const char *res_name, int excl)
4056 {
4057 int i;
4058
4059 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4060 if (bars & (1 << i))
4061 if (__pci_request_region(pdev, i, res_name, excl))
4062 goto err_out;
4063 return 0;
4064
4065 err_out:
4066 while (--i >= 0)
4067 if (bars & (1 << i))
4068 pci_release_region(pdev, i);
4069
4070 return -EBUSY;
4071 }
4072
4073
4074 /**
4075 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4076 * @pdev: PCI device whose resources are to be reserved
4077 * @bars: Bitmask of BARs to be requested
4078 * @res_name: Name to be associated with resource
4079 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4080 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4081 const char *res_name)
4082 {
4083 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4084 }
4085 EXPORT_SYMBOL(pci_request_selected_regions);
4086
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4087 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4088 const char *res_name)
4089 {
4090 return __pci_request_selected_regions(pdev, bars, res_name,
4091 IORESOURCE_EXCLUSIVE);
4092 }
4093 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4094
4095 /**
4096 * pci_release_regions - Release reserved PCI I/O and memory resources
4097 * @pdev: PCI device whose resources were previously reserved by
4098 * pci_request_regions()
4099 *
4100 * Releases all PCI I/O and memory resources previously reserved by a
4101 * successful call to pci_request_regions(). Call this function only
4102 * after all use of the PCI regions has ceased.
4103 */
4104
pci_release_regions(struct pci_dev * pdev)4105 void pci_release_regions(struct pci_dev *pdev)
4106 {
4107 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4108 }
4109 EXPORT_SYMBOL(pci_release_regions);
4110
4111 /**
4112 * pci_request_regions - Reserve PCI I/O and memory resources
4113 * @pdev: PCI device whose resources are to be reserved
4114 * @res_name: Name to be associated with resource.
4115 *
4116 * Mark all PCI regions associated with PCI device @pdev as
4117 * being reserved by owner @res_name. Do not access any
4118 * address inside the PCI regions unless this call returns
4119 * successfully.
4120 *
4121 * Returns 0 on success, or %EBUSY on error. A warning
4122 * message is also printed on failure.
4123 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4124 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4125 {
4126 return pci_request_selected_regions(pdev,
4127 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4128 }
4129 EXPORT_SYMBOL(pci_request_regions);
4130
4131 /**
4132 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4133 * @pdev: PCI device whose resources are to be reserved
4134 * @res_name: Name to be associated with resource.
4135 *
4136 * Mark all PCI regions associated with PCI device @pdev as being reserved
4137 * by owner @res_name. Do not access any address inside the PCI regions
4138 * unless this call returns successfully.
4139 *
4140 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4141 * and the sysfs MMIO access will not be allowed.
4142 *
4143 * Returns 0 on success, or %EBUSY on error. A warning message is also
4144 * printed on failure.
4145 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4146 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4147 {
4148 return pci_request_selected_regions_exclusive(pdev,
4149 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4150 }
4151 EXPORT_SYMBOL(pci_request_regions_exclusive);
4152
4153 /*
4154 * Record the PCI IO range (expressed as CPU physical address + size).
4155 * Return a negative value if an error has occurred, zero otherwise
4156 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4157 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4158 resource_size_t size)
4159 {
4160 int ret = 0;
4161 #ifdef PCI_IOBASE
4162 struct logic_pio_hwaddr *range;
4163
4164 if (!size || addr + size < addr)
4165 return -EINVAL;
4166
4167 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4168 if (!range)
4169 return -ENOMEM;
4170
4171 range->fwnode = fwnode;
4172 range->size = size;
4173 range->hw_start = addr;
4174 range->flags = LOGIC_PIO_CPU_MMIO;
4175
4176 ret = logic_pio_register_range(range);
4177 if (ret)
4178 kfree(range);
4179
4180 /* Ignore duplicates due to deferred probing */
4181 if (ret == -EEXIST)
4182 ret = 0;
4183 #endif
4184
4185 return ret;
4186 }
4187
pci_pio_to_address(unsigned long pio)4188 phys_addr_t pci_pio_to_address(unsigned long pio)
4189 {
4190 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4191
4192 #ifdef PCI_IOBASE
4193 if (pio >= MMIO_UPPER_LIMIT)
4194 return address;
4195
4196 address = logic_pio_to_hwaddr(pio);
4197 #endif
4198
4199 return address;
4200 }
4201 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4202
pci_address_to_pio(phys_addr_t address)4203 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4204 {
4205 #ifdef PCI_IOBASE
4206 return logic_pio_trans_cpuaddr(address);
4207 #else
4208 if (address > IO_SPACE_LIMIT)
4209 return (unsigned long)-1;
4210
4211 return (unsigned long) address;
4212 #endif
4213 }
4214
4215 /**
4216 * pci_remap_iospace - Remap the memory mapped I/O space
4217 * @res: Resource describing the I/O space
4218 * @phys_addr: physical address of range to be mapped
4219 *
4220 * Remap the memory mapped I/O space described by the @res and the CPU
4221 * physical address @phys_addr into virtual address space. Only
4222 * architectures that have memory mapped IO functions defined (and the
4223 * PCI_IOBASE value defined) should call this function.
4224 */
4225 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4226 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4227 {
4228 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4229 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4230
4231 if (!(res->flags & IORESOURCE_IO))
4232 return -EINVAL;
4233
4234 if (res->end > IO_SPACE_LIMIT)
4235 return -EINVAL;
4236
4237 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4238 pgprot_device(PAGE_KERNEL));
4239 #else
4240 /*
4241 * This architecture does not have memory mapped I/O space,
4242 * so this function should never be called
4243 */
4244 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4245 return -ENODEV;
4246 #endif
4247 }
4248 EXPORT_SYMBOL(pci_remap_iospace);
4249 #endif
4250
4251 /**
4252 * pci_unmap_iospace - Unmap the memory mapped I/O space
4253 * @res: resource to be unmapped
4254 *
4255 * Unmap the CPU virtual address @res from virtual address space. Only
4256 * architectures that have memory mapped IO functions defined (and the
4257 * PCI_IOBASE value defined) should call this function.
4258 */
pci_unmap_iospace(struct resource * res)4259 void pci_unmap_iospace(struct resource *res)
4260 {
4261 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4262 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4263
4264 vunmap_range(vaddr, vaddr + resource_size(res));
4265 #endif
4266 }
4267 EXPORT_SYMBOL(pci_unmap_iospace);
4268
devm_pci_unmap_iospace(struct device * dev,void * ptr)4269 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4270 {
4271 struct resource **res = ptr;
4272
4273 pci_unmap_iospace(*res);
4274 }
4275
4276 /**
4277 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4278 * @dev: Generic device to remap IO address for
4279 * @res: Resource describing the I/O space
4280 * @phys_addr: physical address of range to be mapped
4281 *
4282 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4283 * detach.
4284 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4285 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4286 phys_addr_t phys_addr)
4287 {
4288 const struct resource **ptr;
4289 int error;
4290
4291 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4292 if (!ptr)
4293 return -ENOMEM;
4294
4295 error = pci_remap_iospace(res, phys_addr);
4296 if (error) {
4297 devres_free(ptr);
4298 } else {
4299 *ptr = res;
4300 devres_add(dev, ptr);
4301 }
4302
4303 return error;
4304 }
4305 EXPORT_SYMBOL(devm_pci_remap_iospace);
4306
4307 /**
4308 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4309 * @dev: Generic device to remap IO address for
4310 * @offset: Resource address to map
4311 * @size: Size of map
4312 *
4313 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4314 * detach.
4315 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4316 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4317 resource_size_t offset,
4318 resource_size_t size)
4319 {
4320 void __iomem **ptr, *addr;
4321
4322 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4323 if (!ptr)
4324 return NULL;
4325
4326 addr = pci_remap_cfgspace(offset, size);
4327 if (addr) {
4328 *ptr = addr;
4329 devres_add(dev, ptr);
4330 } else
4331 devres_free(ptr);
4332
4333 return addr;
4334 }
4335 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4336
4337 /**
4338 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4339 * @dev: generic device to handle the resource for
4340 * @res: configuration space resource to be handled
4341 *
4342 * Checks that a resource is a valid memory region, requests the memory
4343 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4344 * proper PCI configuration space memory attributes are guaranteed.
4345 *
4346 * All operations are managed and will be undone on driver detach.
4347 *
4348 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4349 * on failure. Usage example::
4350 *
4351 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4352 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4353 * if (IS_ERR(base))
4354 * return PTR_ERR(base);
4355 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4356 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4357 struct resource *res)
4358 {
4359 resource_size_t size;
4360 const char *name;
4361 void __iomem *dest_ptr;
4362
4363 BUG_ON(!dev);
4364
4365 if (!res || resource_type(res) != IORESOURCE_MEM) {
4366 dev_err(dev, "invalid resource\n");
4367 return IOMEM_ERR_PTR(-EINVAL);
4368 }
4369
4370 size = resource_size(res);
4371
4372 if (res->name)
4373 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4374 res->name);
4375 else
4376 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4377 if (!name)
4378 return IOMEM_ERR_PTR(-ENOMEM);
4379
4380 if (!devm_request_mem_region(dev, res->start, size, name)) {
4381 dev_err(dev, "can't request region for resource %pR\n", res);
4382 return IOMEM_ERR_PTR(-EBUSY);
4383 }
4384
4385 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4386 if (!dest_ptr) {
4387 dev_err(dev, "ioremap failed for resource %pR\n", res);
4388 devm_release_mem_region(dev, res->start, size);
4389 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4390 }
4391
4392 return dest_ptr;
4393 }
4394 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4395
__pci_set_master(struct pci_dev * dev,bool enable)4396 static void __pci_set_master(struct pci_dev *dev, bool enable)
4397 {
4398 u16 old_cmd, cmd;
4399
4400 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4401 if (enable)
4402 cmd = old_cmd | PCI_COMMAND_MASTER;
4403 else
4404 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4405 if (cmd != old_cmd) {
4406 pci_dbg(dev, "%s bus mastering\n",
4407 enable ? "enabling" : "disabling");
4408 pci_write_config_word(dev, PCI_COMMAND, cmd);
4409 }
4410 dev->is_busmaster = enable;
4411 }
4412
4413 /**
4414 * pcibios_setup - process "pci=" kernel boot arguments
4415 * @str: string used to pass in "pci=" kernel boot arguments
4416 *
4417 * Process kernel boot arguments. This is the default implementation.
4418 * Architecture specific implementations can override this as necessary.
4419 */
pcibios_setup(char * str)4420 char * __weak __init pcibios_setup(char *str)
4421 {
4422 return str;
4423 }
4424
4425 /**
4426 * pcibios_set_master - enable PCI bus-mastering for device dev
4427 * @dev: the PCI device to enable
4428 *
4429 * Enables PCI bus-mastering for the device. This is the default
4430 * implementation. Architecture specific implementations can override
4431 * this if necessary.
4432 */
pcibios_set_master(struct pci_dev * dev)4433 void __weak pcibios_set_master(struct pci_dev *dev)
4434 {
4435 u8 lat;
4436
4437 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4438 if (pci_is_pcie(dev))
4439 return;
4440
4441 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4442 if (lat < 16)
4443 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4444 else if (lat > pcibios_max_latency)
4445 lat = pcibios_max_latency;
4446 else
4447 return;
4448
4449 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4450 }
4451
4452 /**
4453 * pci_set_master - enables bus-mastering for device dev
4454 * @dev: the PCI device to enable
4455 *
4456 * Enables bus-mastering on the device and calls pcibios_set_master()
4457 * to do the needed arch specific settings.
4458 */
pci_set_master(struct pci_dev * dev)4459 void pci_set_master(struct pci_dev *dev)
4460 {
4461 __pci_set_master(dev, true);
4462 pcibios_set_master(dev);
4463 }
4464 EXPORT_SYMBOL(pci_set_master);
4465
4466 /**
4467 * pci_clear_master - disables bus-mastering for device dev
4468 * @dev: the PCI device to disable
4469 */
pci_clear_master(struct pci_dev * dev)4470 void pci_clear_master(struct pci_dev *dev)
4471 {
4472 __pci_set_master(dev, false);
4473 }
4474 EXPORT_SYMBOL(pci_clear_master);
4475
4476 /**
4477 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4478 * @dev: the PCI device for which MWI is to be enabled
4479 *
4480 * Helper function for pci_set_mwi.
4481 * Originally copied from drivers/net/acenic.c.
4482 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4483 *
4484 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4485 */
pci_set_cacheline_size(struct pci_dev * dev)4486 int pci_set_cacheline_size(struct pci_dev *dev)
4487 {
4488 u8 cacheline_size;
4489
4490 if (!pci_cache_line_size)
4491 return -EINVAL;
4492
4493 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4494 equal to or multiple of the right value. */
4495 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4496 if (cacheline_size >= pci_cache_line_size &&
4497 (cacheline_size % pci_cache_line_size) == 0)
4498 return 0;
4499
4500 /* Write the correct value. */
4501 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4502 /* Read it back. */
4503 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4504 if (cacheline_size == pci_cache_line_size)
4505 return 0;
4506
4507 pci_dbg(dev, "cache line size of %d is not supported\n",
4508 pci_cache_line_size << 2);
4509
4510 return -EINVAL;
4511 }
4512 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4513
4514 /**
4515 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4516 * @dev: the PCI device for which MWI is enabled
4517 *
4518 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4519 *
4520 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4521 */
pci_set_mwi(struct pci_dev * dev)4522 int pci_set_mwi(struct pci_dev *dev)
4523 {
4524 #ifdef PCI_DISABLE_MWI
4525 return 0;
4526 #else
4527 int rc;
4528 u16 cmd;
4529
4530 rc = pci_set_cacheline_size(dev);
4531 if (rc)
4532 return rc;
4533
4534 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4535 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4536 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4537 cmd |= PCI_COMMAND_INVALIDATE;
4538 pci_write_config_word(dev, PCI_COMMAND, cmd);
4539 }
4540 return 0;
4541 #endif
4542 }
4543 EXPORT_SYMBOL(pci_set_mwi);
4544
4545 /**
4546 * pcim_set_mwi - a device-managed pci_set_mwi()
4547 * @dev: the PCI device for which MWI is enabled
4548 *
4549 * Managed pci_set_mwi().
4550 *
4551 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4552 */
pcim_set_mwi(struct pci_dev * dev)4553 int pcim_set_mwi(struct pci_dev *dev)
4554 {
4555 struct pci_devres *dr;
4556
4557 dr = find_pci_dr(dev);
4558 if (!dr)
4559 return -ENOMEM;
4560
4561 dr->mwi = 1;
4562 return pci_set_mwi(dev);
4563 }
4564 EXPORT_SYMBOL(pcim_set_mwi);
4565
4566 /**
4567 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4568 * @dev: the PCI device for which MWI is enabled
4569 *
4570 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4571 * Callers are not required to check the return value.
4572 *
4573 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4574 */
pci_try_set_mwi(struct pci_dev * dev)4575 int pci_try_set_mwi(struct pci_dev *dev)
4576 {
4577 #ifdef PCI_DISABLE_MWI
4578 return 0;
4579 #else
4580 return pci_set_mwi(dev);
4581 #endif
4582 }
4583 EXPORT_SYMBOL(pci_try_set_mwi);
4584
4585 /**
4586 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4587 * @dev: the PCI device to disable
4588 *
4589 * Disables PCI Memory-Write-Invalidate transaction on the device
4590 */
pci_clear_mwi(struct pci_dev * dev)4591 void pci_clear_mwi(struct pci_dev *dev)
4592 {
4593 #ifndef PCI_DISABLE_MWI
4594 u16 cmd;
4595
4596 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4597 if (cmd & PCI_COMMAND_INVALIDATE) {
4598 cmd &= ~PCI_COMMAND_INVALIDATE;
4599 pci_write_config_word(dev, PCI_COMMAND, cmd);
4600 }
4601 #endif
4602 }
4603 EXPORT_SYMBOL(pci_clear_mwi);
4604
4605 /**
4606 * pci_disable_parity - disable parity checking for device
4607 * @dev: the PCI device to operate on
4608 *
4609 * Disable parity checking for device @dev
4610 */
pci_disable_parity(struct pci_dev * dev)4611 void pci_disable_parity(struct pci_dev *dev)
4612 {
4613 u16 cmd;
4614
4615 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4616 if (cmd & PCI_COMMAND_PARITY) {
4617 cmd &= ~PCI_COMMAND_PARITY;
4618 pci_write_config_word(dev, PCI_COMMAND, cmd);
4619 }
4620 }
4621
4622 /**
4623 * pci_intx - enables/disables PCI INTx for device dev
4624 * @pdev: the PCI device to operate on
4625 * @enable: boolean: whether to enable or disable PCI INTx
4626 *
4627 * Enables/disables PCI INTx for device @pdev
4628 */
pci_intx(struct pci_dev * pdev,int enable)4629 void pci_intx(struct pci_dev *pdev, int enable)
4630 {
4631 u16 pci_command, new;
4632
4633 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4634
4635 if (enable)
4636 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4637 else
4638 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4639
4640 if (new != pci_command) {
4641 struct pci_devres *dr;
4642
4643 pci_write_config_word(pdev, PCI_COMMAND, new);
4644
4645 dr = find_pci_dr(pdev);
4646 if (dr && !dr->restore_intx) {
4647 dr->restore_intx = 1;
4648 dr->orig_intx = !enable;
4649 }
4650 }
4651 }
4652 EXPORT_SYMBOL_GPL(pci_intx);
4653
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4654 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4655 {
4656 struct pci_bus *bus = dev->bus;
4657 bool mask_updated = true;
4658 u32 cmd_status_dword;
4659 u16 origcmd, newcmd;
4660 unsigned long flags;
4661 bool irq_pending;
4662
4663 /*
4664 * We do a single dword read to retrieve both command and status.
4665 * Document assumptions that make this possible.
4666 */
4667 BUILD_BUG_ON(PCI_COMMAND % 4);
4668 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4669
4670 raw_spin_lock_irqsave(&pci_lock, flags);
4671
4672 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4673
4674 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4675
4676 /*
4677 * Check interrupt status register to see whether our device
4678 * triggered the interrupt (when masking) or the next IRQ is
4679 * already pending (when unmasking).
4680 */
4681 if (mask != irq_pending) {
4682 mask_updated = false;
4683 goto done;
4684 }
4685
4686 origcmd = cmd_status_dword;
4687 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4688 if (mask)
4689 newcmd |= PCI_COMMAND_INTX_DISABLE;
4690 if (newcmd != origcmd)
4691 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4692
4693 done:
4694 raw_spin_unlock_irqrestore(&pci_lock, flags);
4695
4696 return mask_updated;
4697 }
4698
4699 /**
4700 * pci_check_and_mask_intx - mask INTx on pending interrupt
4701 * @dev: the PCI device to operate on
4702 *
4703 * Check if the device dev has its INTx line asserted, mask it and return
4704 * true in that case. False is returned if no interrupt was pending.
4705 */
pci_check_and_mask_intx(struct pci_dev * dev)4706 bool pci_check_and_mask_intx(struct pci_dev *dev)
4707 {
4708 return pci_check_and_set_intx_mask(dev, true);
4709 }
4710 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4711
4712 /**
4713 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4714 * @dev: the PCI device to operate on
4715 *
4716 * Check if the device dev has its INTx line asserted, unmask it if not and
4717 * return true. False is returned and the mask remains active if there was
4718 * still an interrupt pending.
4719 */
pci_check_and_unmask_intx(struct pci_dev * dev)4720 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4721 {
4722 return pci_check_and_set_intx_mask(dev, false);
4723 }
4724 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4725
4726 /**
4727 * pci_wait_for_pending_transaction - wait for pending transaction
4728 * @dev: the PCI device to operate on
4729 *
4730 * Return 0 if transaction is pending 1 otherwise.
4731 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4732 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4733 {
4734 if (!pci_is_pcie(dev))
4735 return 1;
4736
4737 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4738 PCI_EXP_DEVSTA_TRPND);
4739 }
4740 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4741
4742 /**
4743 * pcie_flr - initiate a PCIe function level reset
4744 * @dev: device to reset
4745 *
4746 * Initiate a function level reset unconditionally on @dev without
4747 * checking any flags and DEVCAP
4748 */
pcie_flr(struct pci_dev * dev)4749 int pcie_flr(struct pci_dev *dev)
4750 {
4751 if (!pci_wait_for_pending_transaction(dev))
4752 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4753
4754 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4755
4756 if (dev->imm_ready)
4757 return 0;
4758
4759 /*
4760 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4761 * 100ms, but may silently discard requests while the FLR is in
4762 * progress. Wait 100ms before trying to access the device.
4763 */
4764 msleep(100);
4765
4766 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4767 }
4768 EXPORT_SYMBOL_GPL(pcie_flr);
4769
4770 /**
4771 * pcie_reset_flr - initiate a PCIe function level reset
4772 * @dev: device to reset
4773 * @probe: if true, return 0 if device can be reset this way
4774 *
4775 * Initiate a function level reset on @dev.
4776 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4777 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4778 {
4779 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4780 return -ENOTTY;
4781
4782 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4783 return -ENOTTY;
4784
4785 if (probe)
4786 return 0;
4787
4788 return pcie_flr(dev);
4789 }
4790 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4791
pci_af_flr(struct pci_dev * dev,bool probe)4792 static int pci_af_flr(struct pci_dev *dev, bool probe)
4793 {
4794 int pos;
4795 u8 cap;
4796
4797 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4798 if (!pos)
4799 return -ENOTTY;
4800
4801 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4802 return -ENOTTY;
4803
4804 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4805 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4806 return -ENOTTY;
4807
4808 if (probe)
4809 return 0;
4810
4811 /*
4812 * Wait for Transaction Pending bit to clear. A word-aligned test
4813 * is used, so we use the control offset rather than status and shift
4814 * the test bit to match.
4815 */
4816 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4817 PCI_AF_STATUS_TP << 8))
4818 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4819
4820 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4821
4822 if (dev->imm_ready)
4823 return 0;
4824
4825 /*
4826 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4827 * updated 27 July 2006; a device must complete an FLR within
4828 * 100ms, but may silently discard requests while the FLR is in
4829 * progress. Wait 100ms before trying to access the device.
4830 */
4831 msleep(100);
4832
4833 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4834 }
4835
4836 /**
4837 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4838 * @dev: Device to reset.
4839 * @probe: if true, return 0 if the device can be reset this way.
4840 *
4841 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4842 * unset, it will be reinitialized internally when going from PCI_D3hot to
4843 * PCI_D0. If that's the case and the device is not in a low-power state
4844 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4845 *
4846 * NOTE: This causes the caller to sleep for twice the device power transition
4847 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4848 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4849 * Moreover, only devices in D0 can be reset by this function.
4850 */
pci_pm_reset(struct pci_dev * dev,bool probe)4851 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4852 {
4853 u16 csr;
4854
4855 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4856 return -ENOTTY;
4857
4858 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4859 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4860 return -ENOTTY;
4861
4862 if (probe)
4863 return 0;
4864
4865 if (dev->current_state != PCI_D0)
4866 return -EINVAL;
4867
4868 csr &= ~PCI_PM_CTRL_STATE_MASK;
4869 csr |= PCI_D3hot;
4870 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4871 pci_dev_d3_sleep(dev);
4872
4873 csr &= ~PCI_PM_CTRL_STATE_MASK;
4874 csr |= PCI_D0;
4875 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4876 pci_dev_d3_sleep(dev);
4877
4878 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4879 }
4880
4881 /**
4882 * pcie_wait_for_link_delay - Wait until link is active or inactive
4883 * @pdev: Bridge device
4884 * @active: waiting for active or inactive?
4885 * @delay: Delay to wait after link has become active (in ms)
4886 *
4887 * Use this to wait till link becomes active or inactive.
4888 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4889 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4890 int delay)
4891 {
4892 int timeout = 1000;
4893 bool ret;
4894 u16 lnk_status;
4895
4896 /*
4897 * Some controllers might not implement link active reporting. In this
4898 * case, we wait for 1000 ms + any delay requested by the caller.
4899 */
4900 if (!pdev->link_active_reporting) {
4901 msleep(timeout + delay);
4902 return true;
4903 }
4904
4905 /*
4906 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4907 * after which we should expect an link active if the reset was
4908 * successful. If so, software must wait a minimum 100ms before sending
4909 * configuration requests to devices downstream this port.
4910 *
4911 * If the link fails to activate, either the device was physically
4912 * removed or the link is permanently failed.
4913 */
4914 if (active)
4915 msleep(20);
4916 for (;;) {
4917 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4918 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4919 if (ret == active)
4920 break;
4921 if (timeout <= 0)
4922 break;
4923 msleep(10);
4924 timeout -= 10;
4925 }
4926 if (active && ret)
4927 msleep(delay);
4928
4929 return ret == active;
4930 }
4931
4932 /**
4933 * pcie_wait_for_link - Wait until link is active or inactive
4934 * @pdev: Bridge device
4935 * @active: waiting for active or inactive?
4936 *
4937 * Use this to wait till link becomes active or inactive.
4938 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4939 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4940 {
4941 return pcie_wait_for_link_delay(pdev, active, 100);
4942 }
4943
4944 /*
4945 * Find maximum D3cold delay required by all the devices on the bus. The
4946 * spec says 100 ms, but firmware can lower it and we allow drivers to
4947 * increase it as well.
4948 *
4949 * Called with @pci_bus_sem locked for reading.
4950 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4951 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4952 {
4953 const struct pci_dev *pdev;
4954 int min_delay = 100;
4955 int max_delay = 0;
4956
4957 list_for_each_entry(pdev, &bus->devices, bus_list) {
4958 if (pdev->d3cold_delay < min_delay)
4959 min_delay = pdev->d3cold_delay;
4960 if (pdev->d3cold_delay > max_delay)
4961 max_delay = pdev->d3cold_delay;
4962 }
4963
4964 return max(min_delay, max_delay);
4965 }
4966
4967 /**
4968 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4969 * @dev: PCI bridge
4970 *
4971 * Handle necessary delays before access to the devices on the secondary
4972 * side of the bridge are permitted after D3cold to D0 transition.
4973 *
4974 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4975 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4976 * 4.3.2.
4977 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev)4978 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4979 {
4980 struct pci_dev *child;
4981 int delay;
4982
4983 if (pci_dev_is_disconnected(dev))
4984 return;
4985
4986 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4987 return;
4988
4989 down_read(&pci_bus_sem);
4990
4991 /*
4992 * We only deal with devices that are present currently on the bus.
4993 * For any hot-added devices the access delay is handled in pciehp
4994 * board_added(). In case of ACPI hotplug the firmware is expected
4995 * to configure the devices before OS is notified.
4996 */
4997 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4998 up_read(&pci_bus_sem);
4999 return;
5000 }
5001
5002 /* Take d3cold_delay requirements into account */
5003 delay = pci_bus_max_d3cold_delay(dev->subordinate);
5004 if (!delay) {
5005 up_read(&pci_bus_sem);
5006 return;
5007 }
5008
5009 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5010 bus_list);
5011 up_read(&pci_bus_sem);
5012
5013 /*
5014 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
5015 * accessing the device after reset (that is 1000 ms + 100 ms). In
5016 * practice this should not be needed because we don't do power
5017 * management for them (see pci_bridge_d3_possible()).
5018 */
5019 if (!pci_is_pcie(dev)) {
5020 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5021 msleep(1000 + delay);
5022 return;
5023 }
5024
5025 /*
5026 * For PCIe downstream and root ports that do not support speeds
5027 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5028 * speeds (gen3) we need to wait first for the data link layer to
5029 * become active.
5030 *
5031 * However, 100 ms is the minimum and the PCIe spec says the
5032 * software must allow at least 1s before it can determine that the
5033 * device that did not respond is a broken device. There is
5034 * evidence that 100 ms is not always enough, for example certain
5035 * Titan Ridge xHCI controller does not always respond to
5036 * configuration requests if we only wait for 100 ms (see
5037 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
5038 *
5039 * Therefore we wait for 100 ms and check for the device presence.
5040 * If it is still not present give it an additional 100 ms.
5041 */
5042 if (!pcie_downstream_port(dev))
5043 return;
5044
5045 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5046 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5047 msleep(delay);
5048 } else {
5049 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5050 delay);
5051 if (!pcie_wait_for_link_delay(dev, true, delay)) {
5052 /* Did not train, no need to wait any further */
5053 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5054 return;
5055 }
5056 }
5057
5058 if (!pci_device_is_present(child)) {
5059 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
5060 msleep(delay);
5061 }
5062 }
5063
pci_reset_secondary_bus(struct pci_dev * dev)5064 void pci_reset_secondary_bus(struct pci_dev *dev)
5065 {
5066 u16 ctrl;
5067
5068 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5069 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5070 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5071
5072 /*
5073 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
5074 * this to 2ms to ensure that we meet the minimum requirement.
5075 */
5076 msleep(2);
5077
5078 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5079 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5080
5081 /*
5082 * Trhfa for conventional PCI is 2^25 clock cycles.
5083 * Assuming a minimum 33MHz clock this results in a 1s
5084 * delay before we can consider subordinate devices to
5085 * be re-initialized. PCIe has some ways to shorten this,
5086 * but we don't make use of them yet.
5087 */
5088 ssleep(1);
5089 }
5090
pcibios_reset_secondary_bus(struct pci_dev * dev)5091 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5092 {
5093 pci_reset_secondary_bus(dev);
5094 }
5095
5096 /**
5097 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5098 * @dev: Bridge device
5099 *
5100 * Use the bridge control register to assert reset on the secondary bus.
5101 * Devices on the secondary bus are left in power-on state.
5102 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)5103 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5104 {
5105 pcibios_reset_secondary_bus(dev);
5106
5107 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5108 }
5109 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5110
pci_parent_bus_reset(struct pci_dev * dev,bool probe)5111 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5112 {
5113 struct pci_dev *pdev;
5114
5115 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5116 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5117 return -ENOTTY;
5118
5119 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5120 if (pdev != dev)
5121 return -ENOTTY;
5122
5123 if (probe)
5124 return 0;
5125
5126 return pci_bridge_secondary_bus_reset(dev->bus->self);
5127 }
5128
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5129 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5130 {
5131 int rc = -ENOTTY;
5132
5133 if (!hotplug || !try_module_get(hotplug->owner))
5134 return rc;
5135
5136 if (hotplug->ops->reset_slot)
5137 rc = hotplug->ops->reset_slot(hotplug, probe);
5138
5139 module_put(hotplug->owner);
5140
5141 return rc;
5142 }
5143
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5144 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5145 {
5146 if (dev->multifunction || dev->subordinate || !dev->slot ||
5147 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5148 return -ENOTTY;
5149
5150 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5151 }
5152
pci_reset_bus_function(struct pci_dev * dev,bool probe)5153 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5154 {
5155 int rc;
5156
5157 rc = pci_dev_reset_slot_function(dev, probe);
5158 if (rc != -ENOTTY)
5159 return rc;
5160 return pci_parent_bus_reset(dev, probe);
5161 }
5162
pci_dev_lock(struct pci_dev * dev)5163 void pci_dev_lock(struct pci_dev *dev)
5164 {
5165 /* block PM suspend, driver probe, etc. */
5166 device_lock(&dev->dev);
5167 pci_cfg_access_lock(dev);
5168 }
5169 EXPORT_SYMBOL_GPL(pci_dev_lock);
5170
5171 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5172 int pci_dev_trylock(struct pci_dev *dev)
5173 {
5174 if (device_trylock(&dev->dev)) {
5175 if (pci_cfg_access_trylock(dev))
5176 return 1;
5177 device_unlock(&dev->dev);
5178 }
5179
5180 return 0;
5181 }
5182 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5183
pci_dev_unlock(struct pci_dev * dev)5184 void pci_dev_unlock(struct pci_dev *dev)
5185 {
5186 pci_cfg_access_unlock(dev);
5187 device_unlock(&dev->dev);
5188 }
5189 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5190
pci_dev_save_and_disable(struct pci_dev * dev)5191 static void pci_dev_save_and_disable(struct pci_dev *dev)
5192 {
5193 const struct pci_error_handlers *err_handler =
5194 dev->driver ? dev->driver->err_handler : NULL;
5195
5196 /*
5197 * dev->driver->err_handler->reset_prepare() is protected against
5198 * races with ->remove() by the device lock, which must be held by
5199 * the caller.
5200 */
5201 if (err_handler && err_handler->reset_prepare)
5202 err_handler->reset_prepare(dev);
5203
5204 /*
5205 * Wake-up device prior to save. PM registers default to D0 after
5206 * reset and a simple register restore doesn't reliably return
5207 * to a non-D0 state anyway.
5208 */
5209 pci_set_power_state(dev, PCI_D0);
5210
5211 pci_save_state(dev);
5212 /*
5213 * Disable the device by clearing the Command register, except for
5214 * INTx-disable which is set. This not only disables MMIO and I/O port
5215 * BARs, but also prevents the device from being Bus Master, preventing
5216 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5217 * compliant devices, INTx-disable prevents legacy interrupts.
5218 */
5219 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5220 }
5221
pci_dev_restore(struct pci_dev * dev)5222 static void pci_dev_restore(struct pci_dev *dev)
5223 {
5224 const struct pci_error_handlers *err_handler =
5225 dev->driver ? dev->driver->err_handler : NULL;
5226
5227 pci_restore_state(dev);
5228
5229 /*
5230 * dev->driver->err_handler->reset_done() is protected against
5231 * races with ->remove() by the device lock, which must be held by
5232 * the caller.
5233 */
5234 if (err_handler && err_handler->reset_done)
5235 err_handler->reset_done(dev);
5236 }
5237
5238 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5239 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5240 { },
5241 { pci_dev_specific_reset, .name = "device_specific" },
5242 { pci_dev_acpi_reset, .name = "acpi" },
5243 { pcie_reset_flr, .name = "flr" },
5244 { pci_af_flr, .name = "af_flr" },
5245 { pci_pm_reset, .name = "pm" },
5246 { pci_reset_bus_function, .name = "bus" },
5247 };
5248
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5249 static ssize_t reset_method_show(struct device *dev,
5250 struct device_attribute *attr, char *buf)
5251 {
5252 struct pci_dev *pdev = to_pci_dev(dev);
5253 ssize_t len = 0;
5254 int i, m;
5255
5256 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5257 m = pdev->reset_methods[i];
5258 if (!m)
5259 break;
5260
5261 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5262 pci_reset_fn_methods[m].name);
5263 }
5264
5265 if (len)
5266 len += sysfs_emit_at(buf, len, "\n");
5267
5268 return len;
5269 }
5270
reset_method_lookup(const char * name)5271 static int reset_method_lookup(const char *name)
5272 {
5273 int m;
5274
5275 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5276 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5277 return m;
5278 }
5279
5280 return 0; /* not found */
5281 }
5282
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5283 static ssize_t reset_method_store(struct device *dev,
5284 struct device_attribute *attr,
5285 const char *buf, size_t count)
5286 {
5287 struct pci_dev *pdev = to_pci_dev(dev);
5288 char *options, *name;
5289 int m, n;
5290 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5291
5292 if (sysfs_streq(buf, "")) {
5293 pdev->reset_methods[0] = 0;
5294 pci_warn(pdev, "All device reset methods disabled by user");
5295 return count;
5296 }
5297
5298 if (sysfs_streq(buf, "default")) {
5299 pci_init_reset_methods(pdev);
5300 return count;
5301 }
5302
5303 options = kstrndup(buf, count, GFP_KERNEL);
5304 if (!options)
5305 return -ENOMEM;
5306
5307 n = 0;
5308 while ((name = strsep(&options, " ")) != NULL) {
5309 if (sysfs_streq(name, ""))
5310 continue;
5311
5312 name = strim(name);
5313
5314 m = reset_method_lookup(name);
5315 if (!m) {
5316 pci_err(pdev, "Invalid reset method '%s'", name);
5317 goto error;
5318 }
5319
5320 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5321 pci_err(pdev, "Unsupported reset method '%s'", name);
5322 goto error;
5323 }
5324
5325 if (n == PCI_NUM_RESET_METHODS - 1) {
5326 pci_err(pdev, "Too many reset methods\n");
5327 goto error;
5328 }
5329
5330 reset_methods[n++] = m;
5331 }
5332
5333 reset_methods[n] = 0;
5334
5335 /* Warn if dev-specific supported but not highest priority */
5336 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5337 reset_methods[0] != 1)
5338 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5339 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5340 kfree(options);
5341 return count;
5342
5343 error:
5344 /* Leave previous methods unchanged */
5345 kfree(options);
5346 return -EINVAL;
5347 }
5348 static DEVICE_ATTR_RW(reset_method);
5349
5350 static struct attribute *pci_dev_reset_method_attrs[] = {
5351 &dev_attr_reset_method.attr,
5352 NULL,
5353 };
5354
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5355 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5356 struct attribute *a, int n)
5357 {
5358 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5359
5360 if (!pci_reset_supported(pdev))
5361 return 0;
5362
5363 return a->mode;
5364 }
5365
5366 const struct attribute_group pci_dev_reset_method_attr_group = {
5367 .attrs = pci_dev_reset_method_attrs,
5368 .is_visible = pci_dev_reset_method_attr_is_visible,
5369 };
5370
5371 /**
5372 * __pci_reset_function_locked - reset a PCI device function while holding
5373 * the @dev mutex lock.
5374 * @dev: PCI device to reset
5375 *
5376 * Some devices allow an individual function to be reset without affecting
5377 * other functions in the same device. The PCI device must be responsive
5378 * to PCI config space in order to use this function.
5379 *
5380 * The device function is presumed to be unused and the caller is holding
5381 * the device mutex lock when this function is called.
5382 *
5383 * Resetting the device will make the contents of PCI configuration space
5384 * random, so any caller of this must be prepared to reinitialise the
5385 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5386 * etc.
5387 *
5388 * Returns 0 if the device function was successfully reset or negative if the
5389 * device doesn't support resetting a single function.
5390 */
__pci_reset_function_locked(struct pci_dev * dev)5391 int __pci_reset_function_locked(struct pci_dev *dev)
5392 {
5393 int i, m, rc;
5394
5395 might_sleep();
5396
5397 /*
5398 * A reset method returns -ENOTTY if it doesn't support this device and
5399 * we should try the next method.
5400 *
5401 * If it returns 0 (success), we're finished. If it returns any other
5402 * error, we're also finished: this indicates that further reset
5403 * mechanisms might be broken on the device.
5404 */
5405 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5406 m = dev->reset_methods[i];
5407 if (!m)
5408 return -ENOTTY;
5409
5410 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5411 if (!rc)
5412 return 0;
5413 if (rc != -ENOTTY)
5414 return rc;
5415 }
5416
5417 return -ENOTTY;
5418 }
5419 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5420
5421 /**
5422 * pci_init_reset_methods - check whether device can be safely reset
5423 * and store supported reset mechanisms.
5424 * @dev: PCI device to check for reset mechanisms
5425 *
5426 * Some devices allow an individual function to be reset without affecting
5427 * other functions in the same device. The PCI device must be in D0-D3hot
5428 * state.
5429 *
5430 * Stores reset mechanisms supported by device in reset_methods byte array
5431 * which is a member of struct pci_dev.
5432 */
pci_init_reset_methods(struct pci_dev * dev)5433 void pci_init_reset_methods(struct pci_dev *dev)
5434 {
5435 int m, i, rc;
5436
5437 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5438
5439 might_sleep();
5440
5441 i = 0;
5442 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5443 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5444 if (!rc)
5445 dev->reset_methods[i++] = m;
5446 else if (rc != -ENOTTY)
5447 break;
5448 }
5449
5450 dev->reset_methods[i] = 0;
5451 }
5452
5453 /**
5454 * pci_reset_function - quiesce and reset a PCI device function
5455 * @dev: PCI device to reset
5456 *
5457 * Some devices allow an individual function to be reset without affecting
5458 * other functions in the same device. The PCI device must be responsive
5459 * to PCI config space in order to use this function.
5460 *
5461 * This function does not just reset the PCI portion of a device, but
5462 * clears all the state associated with the device. This function differs
5463 * from __pci_reset_function_locked() in that it saves and restores device state
5464 * over the reset and takes the PCI device lock.
5465 *
5466 * Returns 0 if the device function was successfully reset or negative if the
5467 * device doesn't support resetting a single function.
5468 */
pci_reset_function(struct pci_dev * dev)5469 int pci_reset_function(struct pci_dev *dev)
5470 {
5471 int rc;
5472
5473 if (!pci_reset_supported(dev))
5474 return -ENOTTY;
5475
5476 pci_dev_lock(dev);
5477 pci_dev_save_and_disable(dev);
5478
5479 rc = __pci_reset_function_locked(dev);
5480
5481 pci_dev_restore(dev);
5482 pci_dev_unlock(dev);
5483
5484 return rc;
5485 }
5486 EXPORT_SYMBOL_GPL(pci_reset_function);
5487
5488 /**
5489 * pci_reset_function_locked - quiesce and reset a PCI device function
5490 * @dev: PCI device to reset
5491 *
5492 * Some devices allow an individual function to be reset without affecting
5493 * other functions in the same device. The PCI device must be responsive
5494 * to PCI config space in order to use this function.
5495 *
5496 * This function does not just reset the PCI portion of a device, but
5497 * clears all the state associated with the device. This function differs
5498 * from __pci_reset_function_locked() in that it saves and restores device state
5499 * over the reset. It also differs from pci_reset_function() in that it
5500 * requires the PCI device lock to be held.
5501 *
5502 * Returns 0 if the device function was successfully reset or negative if the
5503 * device doesn't support resetting a single function.
5504 */
pci_reset_function_locked(struct pci_dev * dev)5505 int pci_reset_function_locked(struct pci_dev *dev)
5506 {
5507 int rc;
5508
5509 if (!pci_reset_supported(dev))
5510 return -ENOTTY;
5511
5512 pci_dev_save_and_disable(dev);
5513
5514 rc = __pci_reset_function_locked(dev);
5515
5516 pci_dev_restore(dev);
5517
5518 return rc;
5519 }
5520 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5521
5522 /**
5523 * pci_try_reset_function - quiesce and reset a PCI device function
5524 * @dev: PCI device to reset
5525 *
5526 * Same as above, except return -EAGAIN if unable to lock device.
5527 */
pci_try_reset_function(struct pci_dev * dev)5528 int pci_try_reset_function(struct pci_dev *dev)
5529 {
5530 int rc;
5531
5532 if (!pci_reset_supported(dev))
5533 return -ENOTTY;
5534
5535 if (!pci_dev_trylock(dev))
5536 return -EAGAIN;
5537
5538 pci_dev_save_and_disable(dev);
5539 rc = __pci_reset_function_locked(dev);
5540 pci_dev_restore(dev);
5541 pci_dev_unlock(dev);
5542
5543 return rc;
5544 }
5545 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5546
5547 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5548 static bool pci_bus_resetable(struct pci_bus *bus)
5549 {
5550 struct pci_dev *dev;
5551
5552
5553 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5554 return false;
5555
5556 list_for_each_entry(dev, &bus->devices, bus_list) {
5557 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5558 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5559 return false;
5560 }
5561
5562 return true;
5563 }
5564
5565 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5566 static void pci_bus_lock(struct pci_bus *bus)
5567 {
5568 struct pci_dev *dev;
5569
5570 list_for_each_entry(dev, &bus->devices, bus_list) {
5571 pci_dev_lock(dev);
5572 if (dev->subordinate)
5573 pci_bus_lock(dev->subordinate);
5574 }
5575 }
5576
5577 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5578 static void pci_bus_unlock(struct pci_bus *bus)
5579 {
5580 struct pci_dev *dev;
5581
5582 list_for_each_entry(dev, &bus->devices, bus_list) {
5583 if (dev->subordinate)
5584 pci_bus_unlock(dev->subordinate);
5585 pci_dev_unlock(dev);
5586 }
5587 }
5588
5589 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5590 static int pci_bus_trylock(struct pci_bus *bus)
5591 {
5592 struct pci_dev *dev;
5593
5594 list_for_each_entry(dev, &bus->devices, bus_list) {
5595 if (!pci_dev_trylock(dev))
5596 goto unlock;
5597 if (dev->subordinate) {
5598 if (!pci_bus_trylock(dev->subordinate)) {
5599 pci_dev_unlock(dev);
5600 goto unlock;
5601 }
5602 }
5603 }
5604 return 1;
5605
5606 unlock:
5607 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5608 if (dev->subordinate)
5609 pci_bus_unlock(dev->subordinate);
5610 pci_dev_unlock(dev);
5611 }
5612 return 0;
5613 }
5614
5615 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5616 static bool pci_slot_resetable(struct pci_slot *slot)
5617 {
5618 struct pci_dev *dev;
5619
5620 if (slot->bus->self &&
5621 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5622 return false;
5623
5624 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5625 if (!dev->slot || dev->slot != slot)
5626 continue;
5627 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5628 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5629 return false;
5630 }
5631
5632 return true;
5633 }
5634
5635 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5636 static void pci_slot_lock(struct pci_slot *slot)
5637 {
5638 struct pci_dev *dev;
5639
5640 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5641 if (!dev->slot || dev->slot != slot)
5642 continue;
5643 pci_dev_lock(dev);
5644 if (dev->subordinate)
5645 pci_bus_lock(dev->subordinate);
5646 }
5647 }
5648
5649 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5650 static void pci_slot_unlock(struct pci_slot *slot)
5651 {
5652 struct pci_dev *dev;
5653
5654 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5655 if (!dev->slot || dev->slot != slot)
5656 continue;
5657 if (dev->subordinate)
5658 pci_bus_unlock(dev->subordinate);
5659 pci_dev_unlock(dev);
5660 }
5661 }
5662
5663 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5664 static int pci_slot_trylock(struct pci_slot *slot)
5665 {
5666 struct pci_dev *dev;
5667
5668 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5669 if (!dev->slot || dev->slot != slot)
5670 continue;
5671 if (!pci_dev_trylock(dev))
5672 goto unlock;
5673 if (dev->subordinate) {
5674 if (!pci_bus_trylock(dev->subordinate)) {
5675 pci_dev_unlock(dev);
5676 goto unlock;
5677 }
5678 }
5679 }
5680 return 1;
5681
5682 unlock:
5683 list_for_each_entry_continue_reverse(dev,
5684 &slot->bus->devices, bus_list) {
5685 if (!dev->slot || dev->slot != slot)
5686 continue;
5687 if (dev->subordinate)
5688 pci_bus_unlock(dev->subordinate);
5689 pci_dev_unlock(dev);
5690 }
5691 return 0;
5692 }
5693
5694 /*
5695 * Save and disable devices from the top of the tree down while holding
5696 * the @dev mutex lock for the entire tree.
5697 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5698 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5699 {
5700 struct pci_dev *dev;
5701
5702 list_for_each_entry(dev, &bus->devices, bus_list) {
5703 pci_dev_save_and_disable(dev);
5704 if (dev->subordinate)
5705 pci_bus_save_and_disable_locked(dev->subordinate);
5706 }
5707 }
5708
5709 /*
5710 * Restore devices from top of the tree down while holding @dev mutex lock
5711 * for the entire tree. Parent bridges need to be restored before we can
5712 * get to subordinate devices.
5713 */
pci_bus_restore_locked(struct pci_bus * bus)5714 static void pci_bus_restore_locked(struct pci_bus *bus)
5715 {
5716 struct pci_dev *dev;
5717
5718 list_for_each_entry(dev, &bus->devices, bus_list) {
5719 pci_dev_restore(dev);
5720 if (dev->subordinate)
5721 pci_bus_restore_locked(dev->subordinate);
5722 }
5723 }
5724
5725 /*
5726 * Save and disable devices from the top of the tree down while holding
5727 * the @dev mutex lock for the entire tree.
5728 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5729 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5730 {
5731 struct pci_dev *dev;
5732
5733 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5734 if (!dev->slot || dev->slot != slot)
5735 continue;
5736 pci_dev_save_and_disable(dev);
5737 if (dev->subordinate)
5738 pci_bus_save_and_disable_locked(dev->subordinate);
5739 }
5740 }
5741
5742 /*
5743 * Restore devices from top of the tree down while holding @dev mutex lock
5744 * for the entire tree. Parent bridges need to be restored before we can
5745 * get to subordinate devices.
5746 */
pci_slot_restore_locked(struct pci_slot * slot)5747 static void pci_slot_restore_locked(struct pci_slot *slot)
5748 {
5749 struct pci_dev *dev;
5750
5751 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5752 if (!dev->slot || dev->slot != slot)
5753 continue;
5754 pci_dev_restore(dev);
5755 if (dev->subordinate)
5756 pci_bus_restore_locked(dev->subordinate);
5757 }
5758 }
5759
pci_slot_reset(struct pci_slot * slot,bool probe)5760 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5761 {
5762 int rc;
5763
5764 if (!slot || !pci_slot_resetable(slot))
5765 return -ENOTTY;
5766
5767 if (!probe)
5768 pci_slot_lock(slot);
5769
5770 might_sleep();
5771
5772 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5773
5774 if (!probe)
5775 pci_slot_unlock(slot);
5776
5777 return rc;
5778 }
5779
5780 /**
5781 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5782 * @slot: PCI slot to probe
5783 *
5784 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5785 */
pci_probe_reset_slot(struct pci_slot * slot)5786 int pci_probe_reset_slot(struct pci_slot *slot)
5787 {
5788 return pci_slot_reset(slot, PCI_RESET_PROBE);
5789 }
5790 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5791
5792 /**
5793 * __pci_reset_slot - Try to reset a PCI slot
5794 * @slot: PCI slot to reset
5795 *
5796 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5797 * independent of other slots. For instance, some slots may support slot power
5798 * control. In the case of a 1:1 bus to slot architecture, this function may
5799 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5800 * Generally a slot reset should be attempted before a bus reset. All of the
5801 * function of the slot and any subordinate buses behind the slot are reset
5802 * through this function. PCI config space of all devices in the slot and
5803 * behind the slot is saved before and restored after reset.
5804 *
5805 * Same as above except return -EAGAIN if the slot cannot be locked
5806 */
__pci_reset_slot(struct pci_slot * slot)5807 static int __pci_reset_slot(struct pci_slot *slot)
5808 {
5809 int rc;
5810
5811 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5812 if (rc)
5813 return rc;
5814
5815 if (pci_slot_trylock(slot)) {
5816 pci_slot_save_and_disable_locked(slot);
5817 might_sleep();
5818 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5819 pci_slot_restore_locked(slot);
5820 pci_slot_unlock(slot);
5821 } else
5822 rc = -EAGAIN;
5823
5824 return rc;
5825 }
5826
pci_bus_reset(struct pci_bus * bus,bool probe)5827 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5828 {
5829 int ret;
5830
5831 if (!bus->self || !pci_bus_resetable(bus))
5832 return -ENOTTY;
5833
5834 if (probe)
5835 return 0;
5836
5837 pci_bus_lock(bus);
5838
5839 might_sleep();
5840
5841 ret = pci_bridge_secondary_bus_reset(bus->self);
5842
5843 pci_bus_unlock(bus);
5844
5845 return ret;
5846 }
5847
5848 /**
5849 * pci_bus_error_reset - reset the bridge's subordinate bus
5850 * @bridge: The parent device that connects to the bus to reset
5851 *
5852 * This function will first try to reset the slots on this bus if the method is
5853 * available. If slot reset fails or is not available, this will fall back to a
5854 * secondary bus reset.
5855 */
pci_bus_error_reset(struct pci_dev * bridge)5856 int pci_bus_error_reset(struct pci_dev *bridge)
5857 {
5858 struct pci_bus *bus = bridge->subordinate;
5859 struct pci_slot *slot;
5860
5861 if (!bus)
5862 return -ENOTTY;
5863
5864 mutex_lock(&pci_slot_mutex);
5865 if (list_empty(&bus->slots))
5866 goto bus_reset;
5867
5868 list_for_each_entry(slot, &bus->slots, list)
5869 if (pci_probe_reset_slot(slot))
5870 goto bus_reset;
5871
5872 list_for_each_entry(slot, &bus->slots, list)
5873 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5874 goto bus_reset;
5875
5876 mutex_unlock(&pci_slot_mutex);
5877 return 0;
5878 bus_reset:
5879 mutex_unlock(&pci_slot_mutex);
5880 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5881 }
5882
5883 /**
5884 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5885 * @bus: PCI bus to probe
5886 *
5887 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5888 */
pci_probe_reset_bus(struct pci_bus * bus)5889 int pci_probe_reset_bus(struct pci_bus *bus)
5890 {
5891 return pci_bus_reset(bus, PCI_RESET_PROBE);
5892 }
5893 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5894
5895 /**
5896 * __pci_reset_bus - Try to reset a PCI bus
5897 * @bus: top level PCI bus to reset
5898 *
5899 * Same as above except return -EAGAIN if the bus cannot be locked
5900 */
__pci_reset_bus(struct pci_bus * bus)5901 static int __pci_reset_bus(struct pci_bus *bus)
5902 {
5903 int rc;
5904
5905 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5906 if (rc)
5907 return rc;
5908
5909 if (pci_bus_trylock(bus)) {
5910 pci_bus_save_and_disable_locked(bus);
5911 might_sleep();
5912 rc = pci_bridge_secondary_bus_reset(bus->self);
5913 pci_bus_restore_locked(bus);
5914 pci_bus_unlock(bus);
5915 } else
5916 rc = -EAGAIN;
5917
5918 return rc;
5919 }
5920
5921 /**
5922 * pci_reset_bus - Try to reset a PCI bus
5923 * @pdev: top level PCI device to reset via slot/bus
5924 *
5925 * Same as above except return -EAGAIN if the bus cannot be locked
5926 */
pci_reset_bus(struct pci_dev * pdev)5927 int pci_reset_bus(struct pci_dev *pdev)
5928 {
5929 return (!pci_probe_reset_slot(pdev->slot)) ?
5930 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5931 }
5932 EXPORT_SYMBOL_GPL(pci_reset_bus);
5933
5934 /**
5935 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5936 * @dev: PCI device to query
5937 *
5938 * Returns mmrbc: maximum designed memory read count in bytes or
5939 * appropriate error value.
5940 */
pcix_get_max_mmrbc(struct pci_dev * dev)5941 int pcix_get_max_mmrbc(struct pci_dev *dev)
5942 {
5943 int cap;
5944 u32 stat;
5945
5946 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5947 if (!cap)
5948 return -EINVAL;
5949
5950 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5951 return -EINVAL;
5952
5953 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5954 }
5955 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5956
5957 /**
5958 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5959 * @dev: PCI device to query
5960 *
5961 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5962 * value.
5963 */
pcix_get_mmrbc(struct pci_dev * dev)5964 int pcix_get_mmrbc(struct pci_dev *dev)
5965 {
5966 int cap;
5967 u16 cmd;
5968
5969 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5970 if (!cap)
5971 return -EINVAL;
5972
5973 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5974 return -EINVAL;
5975
5976 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5977 }
5978 EXPORT_SYMBOL(pcix_get_mmrbc);
5979
5980 /**
5981 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5982 * @dev: PCI device to query
5983 * @mmrbc: maximum memory read count in bytes
5984 * valid values are 512, 1024, 2048, 4096
5985 *
5986 * If possible sets maximum memory read byte count, some bridges have errata
5987 * that prevent this.
5988 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5989 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5990 {
5991 int cap;
5992 u32 stat, v, o;
5993 u16 cmd;
5994
5995 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5996 return -EINVAL;
5997
5998 v = ffs(mmrbc) - 10;
5999
6000 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6001 if (!cap)
6002 return -EINVAL;
6003
6004 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6005 return -EINVAL;
6006
6007 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
6008 return -E2BIG;
6009
6010 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6011 return -EINVAL;
6012
6013 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
6014 if (o != v) {
6015 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6016 return -EIO;
6017
6018 cmd &= ~PCI_X_CMD_MAX_READ;
6019 cmd |= v << 2;
6020 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6021 return -EIO;
6022 }
6023 return 0;
6024 }
6025 EXPORT_SYMBOL(pcix_set_mmrbc);
6026
6027 /**
6028 * pcie_get_readrq - get PCI Express read request size
6029 * @dev: PCI device to query
6030 *
6031 * Returns maximum memory read request in bytes or appropriate error value.
6032 */
pcie_get_readrq(struct pci_dev * dev)6033 int pcie_get_readrq(struct pci_dev *dev)
6034 {
6035 u16 ctl;
6036
6037 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6038
6039 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6040 }
6041 EXPORT_SYMBOL(pcie_get_readrq);
6042
6043 /**
6044 * pcie_set_readrq - set PCI Express maximum memory read request
6045 * @dev: PCI device to query
6046 * @rq: maximum memory read count in bytes
6047 * valid values are 128, 256, 512, 1024, 2048, 4096
6048 *
6049 * If possible sets maximum memory read request in bytes
6050 */
pcie_set_readrq(struct pci_dev * dev,int rq)6051 int pcie_set_readrq(struct pci_dev *dev, int rq)
6052 {
6053 u16 v;
6054 int ret;
6055
6056 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6057 return -EINVAL;
6058
6059 /*
6060 * If using the "performance" PCIe config, we clamp the read rq
6061 * size to the max packet size to keep the host bridge from
6062 * generating requests larger than we can cope with.
6063 */
6064 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6065 int mps = pcie_get_mps(dev);
6066
6067 if (mps < rq)
6068 rq = mps;
6069 }
6070
6071 v = (ffs(rq) - 8) << 12;
6072
6073 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6074 PCI_EXP_DEVCTL_READRQ, v);
6075
6076 return pcibios_err_to_errno(ret);
6077 }
6078 EXPORT_SYMBOL(pcie_set_readrq);
6079
6080 /**
6081 * pcie_get_mps - get PCI Express maximum payload size
6082 * @dev: PCI device to query
6083 *
6084 * Returns maximum payload size in bytes
6085 */
pcie_get_mps(struct pci_dev * dev)6086 int pcie_get_mps(struct pci_dev *dev)
6087 {
6088 u16 ctl;
6089
6090 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6091
6092 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6093 }
6094 EXPORT_SYMBOL(pcie_get_mps);
6095
6096 /**
6097 * pcie_set_mps - set PCI Express maximum payload size
6098 * @dev: PCI device to query
6099 * @mps: maximum payload size in bytes
6100 * valid values are 128, 256, 512, 1024, 2048, 4096
6101 *
6102 * If possible sets maximum payload size
6103 */
pcie_set_mps(struct pci_dev * dev,int mps)6104 int pcie_set_mps(struct pci_dev *dev, int mps)
6105 {
6106 u16 v;
6107 int ret;
6108
6109 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6110 return -EINVAL;
6111
6112 v = ffs(mps) - 8;
6113 if (v > dev->pcie_mpss)
6114 return -EINVAL;
6115 v <<= 5;
6116
6117 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6118 PCI_EXP_DEVCTL_PAYLOAD, v);
6119
6120 return pcibios_err_to_errno(ret);
6121 }
6122 EXPORT_SYMBOL(pcie_set_mps);
6123
6124 /**
6125 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6126 * device and its bandwidth limitation
6127 * @dev: PCI device to query
6128 * @limiting_dev: storage for device causing the bandwidth limitation
6129 * @speed: storage for speed of limiting device
6130 * @width: storage for width of limiting device
6131 *
6132 * Walk up the PCI device chain and find the point where the minimum
6133 * bandwidth is available. Return the bandwidth available there and (if
6134 * limiting_dev, speed, and width pointers are supplied) information about
6135 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6136 * raw bandwidth.
6137 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6138 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6139 enum pci_bus_speed *speed,
6140 enum pcie_link_width *width)
6141 {
6142 u16 lnksta;
6143 enum pci_bus_speed next_speed;
6144 enum pcie_link_width next_width;
6145 u32 bw, next_bw;
6146
6147 if (speed)
6148 *speed = PCI_SPEED_UNKNOWN;
6149 if (width)
6150 *width = PCIE_LNK_WIDTH_UNKNOWN;
6151
6152 bw = 0;
6153
6154 while (dev) {
6155 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6156
6157 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6158 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6159 PCI_EXP_LNKSTA_NLW_SHIFT;
6160
6161 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6162
6163 /* Check if current device limits the total bandwidth */
6164 if (!bw || next_bw <= bw) {
6165 bw = next_bw;
6166
6167 if (limiting_dev)
6168 *limiting_dev = dev;
6169 if (speed)
6170 *speed = next_speed;
6171 if (width)
6172 *width = next_width;
6173 }
6174
6175 dev = pci_upstream_bridge(dev);
6176 }
6177
6178 return bw;
6179 }
6180 EXPORT_SYMBOL(pcie_bandwidth_available);
6181
6182 /**
6183 * pcie_get_speed_cap - query for the PCI device's link speed capability
6184 * @dev: PCI device to query
6185 *
6186 * Query the PCI device speed capability. Return the maximum link speed
6187 * supported by the device.
6188 */
pcie_get_speed_cap(struct pci_dev * dev)6189 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6190 {
6191 u32 lnkcap2, lnkcap;
6192
6193 /*
6194 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
6195 * implementation note there recommends using the Supported Link
6196 * Speeds Vector in Link Capabilities 2 when supported.
6197 *
6198 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6199 * should use the Supported Link Speeds field in Link Capabilities,
6200 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6201 */
6202 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6203
6204 /* PCIe r3.0-compliant */
6205 if (lnkcap2)
6206 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6207
6208 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6209 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6210 return PCIE_SPEED_5_0GT;
6211 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6212 return PCIE_SPEED_2_5GT;
6213
6214 return PCI_SPEED_UNKNOWN;
6215 }
6216 EXPORT_SYMBOL(pcie_get_speed_cap);
6217
6218 /**
6219 * pcie_get_width_cap - query for the PCI device's link width capability
6220 * @dev: PCI device to query
6221 *
6222 * Query the PCI device width capability. Return the maximum link width
6223 * supported by the device.
6224 */
pcie_get_width_cap(struct pci_dev * dev)6225 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6226 {
6227 u32 lnkcap;
6228
6229 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6230 if (lnkcap)
6231 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6232
6233 return PCIE_LNK_WIDTH_UNKNOWN;
6234 }
6235 EXPORT_SYMBOL(pcie_get_width_cap);
6236
6237 /**
6238 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6239 * @dev: PCI device
6240 * @speed: storage for link speed
6241 * @width: storage for link width
6242 *
6243 * Calculate a PCI device's link bandwidth by querying for its link speed
6244 * and width, multiplying them, and applying encoding overhead. The result
6245 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6246 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6247 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6248 enum pcie_link_width *width)
6249 {
6250 *speed = pcie_get_speed_cap(dev);
6251 *width = pcie_get_width_cap(dev);
6252
6253 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6254 return 0;
6255
6256 return *width * PCIE_SPEED2MBS_ENC(*speed);
6257 }
6258
6259 /**
6260 * __pcie_print_link_status - Report the PCI device's link speed and width
6261 * @dev: PCI device to query
6262 * @verbose: Print info even when enough bandwidth is available
6263 *
6264 * If the available bandwidth at the device is less than the device is
6265 * capable of, report the device's maximum possible bandwidth and the
6266 * upstream link that limits its performance. If @verbose, always print
6267 * the available bandwidth, even if the device isn't constrained.
6268 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6269 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6270 {
6271 enum pcie_link_width width, width_cap;
6272 enum pci_bus_speed speed, speed_cap;
6273 struct pci_dev *limiting_dev = NULL;
6274 u32 bw_avail, bw_cap;
6275
6276 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6277 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6278
6279 if (bw_avail >= bw_cap && verbose)
6280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6281 bw_cap / 1000, bw_cap % 1000,
6282 pci_speed_string(speed_cap), width_cap);
6283 else if (bw_avail < bw_cap)
6284 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6285 bw_avail / 1000, bw_avail % 1000,
6286 pci_speed_string(speed), width,
6287 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6288 bw_cap / 1000, bw_cap % 1000,
6289 pci_speed_string(speed_cap), width_cap);
6290 }
6291
6292 /**
6293 * pcie_print_link_status - Report the PCI device's link speed and width
6294 * @dev: PCI device to query
6295 *
6296 * Report the available bandwidth at the device.
6297 */
pcie_print_link_status(struct pci_dev * dev)6298 void pcie_print_link_status(struct pci_dev *dev)
6299 {
6300 __pcie_print_link_status(dev, true);
6301 }
6302 EXPORT_SYMBOL(pcie_print_link_status);
6303
6304 /**
6305 * pci_select_bars - Make BAR mask from the type of resource
6306 * @dev: the PCI device for which BAR mask is made
6307 * @flags: resource type mask to be selected
6308 *
6309 * This helper routine makes bar mask from the type of resource.
6310 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6311 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6312 {
6313 int i, bars = 0;
6314 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6315 if (pci_resource_flags(dev, i) & flags)
6316 bars |= (1 << i);
6317 return bars;
6318 }
6319 EXPORT_SYMBOL(pci_select_bars);
6320
6321 /* Some architectures require additional programming to enable VGA */
6322 static arch_set_vga_state_t arch_set_vga_state;
6323
pci_register_set_vga_state(arch_set_vga_state_t func)6324 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6325 {
6326 arch_set_vga_state = func; /* NULL disables */
6327 }
6328
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6329 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6330 unsigned int command_bits, u32 flags)
6331 {
6332 if (arch_set_vga_state)
6333 return arch_set_vga_state(dev, decode, command_bits,
6334 flags);
6335 return 0;
6336 }
6337
6338 /**
6339 * pci_set_vga_state - set VGA decode state on device and parents if requested
6340 * @dev: the PCI device
6341 * @decode: true = enable decoding, false = disable decoding
6342 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6343 * @flags: traverse ancestors and change bridges
6344 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6345 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6346 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6347 unsigned int command_bits, u32 flags)
6348 {
6349 struct pci_bus *bus;
6350 struct pci_dev *bridge;
6351 u16 cmd;
6352 int rc;
6353
6354 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6355
6356 /* ARCH specific VGA enables */
6357 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6358 if (rc)
6359 return rc;
6360
6361 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6362 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6363 if (decode)
6364 cmd |= command_bits;
6365 else
6366 cmd &= ~command_bits;
6367 pci_write_config_word(dev, PCI_COMMAND, cmd);
6368 }
6369
6370 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6371 return 0;
6372
6373 bus = dev->bus;
6374 while (bus) {
6375 bridge = bus->self;
6376 if (bridge) {
6377 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6378 &cmd);
6379 if (decode)
6380 cmd |= PCI_BRIDGE_CTL_VGA;
6381 else
6382 cmd &= ~PCI_BRIDGE_CTL_VGA;
6383 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6384 cmd);
6385 }
6386 bus = bus->parent;
6387 }
6388 return 0;
6389 }
6390
6391 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6392 bool pci_pr3_present(struct pci_dev *pdev)
6393 {
6394 struct acpi_device *adev;
6395
6396 if (acpi_disabled)
6397 return false;
6398
6399 adev = ACPI_COMPANION(&pdev->dev);
6400 if (!adev)
6401 return false;
6402
6403 return adev->power.flags.power_resources &&
6404 acpi_has_method(adev->handle, "_PR3");
6405 }
6406 EXPORT_SYMBOL_GPL(pci_pr3_present);
6407 #endif
6408
6409 /**
6410 * pci_add_dma_alias - Add a DMA devfn alias for a device
6411 * @dev: the PCI device for which alias is added
6412 * @devfn_from: alias slot and function
6413 * @nr_devfns: number of subsequent devfns to alias
6414 *
6415 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6416 * which is used to program permissible bus-devfn source addresses for DMA
6417 * requests in an IOMMU. These aliases factor into IOMMU group creation
6418 * and are useful for devices generating DMA requests beyond or different
6419 * from their logical bus-devfn. Examples include device quirks where the
6420 * device simply uses the wrong devfn, as well as non-transparent bridges
6421 * where the alias may be a proxy for devices in another domain.
6422 *
6423 * IOMMU group creation is performed during device discovery or addition,
6424 * prior to any potential DMA mapping and therefore prior to driver probing
6425 * (especially for userspace assigned devices where IOMMU group definition
6426 * cannot be left as a userspace activity). DMA aliases should therefore
6427 * be configured via quirks, such as the PCI fixup header quirk.
6428 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6429 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6430 unsigned int nr_devfns)
6431 {
6432 int devfn_to;
6433
6434 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6435 devfn_to = devfn_from + nr_devfns - 1;
6436
6437 if (!dev->dma_alias_mask)
6438 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6439 if (!dev->dma_alias_mask) {
6440 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6441 return;
6442 }
6443
6444 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6445
6446 if (nr_devfns == 1)
6447 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6448 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6449 else if (nr_devfns > 1)
6450 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6451 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6452 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6453 }
6454
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6455 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6456 {
6457 return (dev1->dma_alias_mask &&
6458 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6459 (dev2->dma_alias_mask &&
6460 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6461 pci_real_dma_dev(dev1) == dev2 ||
6462 pci_real_dma_dev(dev2) == dev1;
6463 }
6464
pci_device_is_present(struct pci_dev * pdev)6465 bool pci_device_is_present(struct pci_dev *pdev)
6466 {
6467 u32 v;
6468
6469 if (pci_dev_is_disconnected(pdev))
6470 return false;
6471 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6472 }
6473 EXPORT_SYMBOL_GPL(pci_device_is_present);
6474
pci_ignore_hotplug(struct pci_dev * dev)6475 void pci_ignore_hotplug(struct pci_dev *dev)
6476 {
6477 struct pci_dev *bridge = dev->bus->self;
6478
6479 dev->ignore_hotplug = 1;
6480 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6481 if (bridge)
6482 bridge->ignore_hotplug = 1;
6483 }
6484 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6485
6486 /**
6487 * pci_real_dma_dev - Get PCI DMA device for PCI device
6488 * @dev: the PCI device that may have a PCI DMA alias
6489 *
6490 * Permits the platform to provide architecture-specific functionality to
6491 * devices needing to alias DMA to another PCI device on another PCI bus. If
6492 * the PCI device is on the same bus, it is recommended to use
6493 * pci_add_dma_alias(). This is the default implementation. Architecture
6494 * implementations can override this.
6495 */
pci_real_dma_dev(struct pci_dev * dev)6496 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6497 {
6498 return dev;
6499 }
6500
pcibios_default_alignment(void)6501 resource_size_t __weak pcibios_default_alignment(void)
6502 {
6503 return 0;
6504 }
6505
6506 /*
6507 * Arches that don't want to expose struct resource to userland as-is in
6508 * sysfs and /proc can implement their own pci_resource_to_user().
6509 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6510 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6511 const struct resource *rsrc,
6512 resource_size_t *start, resource_size_t *end)
6513 {
6514 *start = rsrc->start;
6515 *end = rsrc->end;
6516 }
6517
6518 static char *resource_alignment_param;
6519 static DEFINE_SPINLOCK(resource_alignment_lock);
6520
6521 /**
6522 * pci_specified_resource_alignment - get resource alignment specified by user.
6523 * @dev: the PCI device to get
6524 * @resize: whether or not to change resources' size when reassigning alignment
6525 *
6526 * RETURNS: Resource alignment if it is specified.
6527 * Zero if it is not specified.
6528 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6529 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6530 bool *resize)
6531 {
6532 int align_order, count;
6533 resource_size_t align = pcibios_default_alignment();
6534 const char *p;
6535 int ret;
6536
6537 spin_lock(&resource_alignment_lock);
6538 p = resource_alignment_param;
6539 if (!p || !*p)
6540 goto out;
6541 if (pci_has_flag(PCI_PROBE_ONLY)) {
6542 align = 0;
6543 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6544 goto out;
6545 }
6546
6547 while (*p) {
6548 count = 0;
6549 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6550 p[count] == '@') {
6551 p += count + 1;
6552 if (align_order > 63) {
6553 pr_err("PCI: Invalid requested alignment (order %d)\n",
6554 align_order);
6555 align_order = PAGE_SHIFT;
6556 }
6557 } else {
6558 align_order = PAGE_SHIFT;
6559 }
6560
6561 ret = pci_dev_str_match(dev, p, &p);
6562 if (ret == 1) {
6563 *resize = true;
6564 align = 1ULL << align_order;
6565 break;
6566 } else if (ret < 0) {
6567 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6568 p);
6569 break;
6570 }
6571
6572 if (*p != ';' && *p != ',') {
6573 /* End of param or invalid format */
6574 break;
6575 }
6576 p++;
6577 }
6578 out:
6579 spin_unlock(&resource_alignment_lock);
6580 return align;
6581 }
6582
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6583 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6584 resource_size_t align, bool resize)
6585 {
6586 struct resource *r = &dev->resource[bar];
6587 resource_size_t size;
6588
6589 if (!(r->flags & IORESOURCE_MEM))
6590 return;
6591
6592 if (r->flags & IORESOURCE_PCI_FIXED) {
6593 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6594 bar, r, (unsigned long long)align);
6595 return;
6596 }
6597
6598 size = resource_size(r);
6599 if (size >= align)
6600 return;
6601
6602 /*
6603 * Increase the alignment of the resource. There are two ways we
6604 * can do this:
6605 *
6606 * 1) Increase the size of the resource. BARs are aligned on their
6607 * size, so when we reallocate space for this resource, we'll
6608 * allocate it with the larger alignment. This also prevents
6609 * assignment of any other BARs inside the alignment region, so
6610 * if we're requesting page alignment, this means no other BARs
6611 * will share the page.
6612 *
6613 * The disadvantage is that this makes the resource larger than
6614 * the hardware BAR, which may break drivers that compute things
6615 * based on the resource size, e.g., to find registers at a
6616 * fixed offset before the end of the BAR.
6617 *
6618 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6619 * set r->start to the desired alignment. By itself this
6620 * doesn't prevent other BARs being put inside the alignment
6621 * region, but if we realign *every* resource of every device in
6622 * the system, none of them will share an alignment region.
6623 *
6624 * When the user has requested alignment for only some devices via
6625 * the "pci=resource_alignment" argument, "resize" is true and we
6626 * use the first method. Otherwise we assume we're aligning all
6627 * devices and we use the second.
6628 */
6629
6630 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6631 bar, r, (unsigned long long)align);
6632
6633 if (resize) {
6634 r->start = 0;
6635 r->end = align - 1;
6636 } else {
6637 r->flags &= ~IORESOURCE_SIZEALIGN;
6638 r->flags |= IORESOURCE_STARTALIGN;
6639 r->start = align;
6640 r->end = r->start + size - 1;
6641 }
6642 r->flags |= IORESOURCE_UNSET;
6643 }
6644
6645 /*
6646 * This function disables memory decoding and releases memory resources
6647 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6648 * It also rounds up size to specified alignment.
6649 * Later on, the kernel will assign page-aligned memory resource back
6650 * to the device.
6651 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6652 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6653 {
6654 int i;
6655 struct resource *r;
6656 resource_size_t align;
6657 u16 command;
6658 bool resize = false;
6659
6660 /*
6661 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6662 * 3.4.1.11. Their resources are allocated from the space
6663 * described by the VF BARx register in the PF's SR-IOV capability.
6664 * We can't influence their alignment here.
6665 */
6666 if (dev->is_virtfn)
6667 return;
6668
6669 /* check if specified PCI is target device to reassign */
6670 align = pci_specified_resource_alignment(dev, &resize);
6671 if (!align)
6672 return;
6673
6674 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6675 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6676 pci_warn(dev, "Can't reassign resources to host bridge\n");
6677 return;
6678 }
6679
6680 pci_read_config_word(dev, PCI_COMMAND, &command);
6681 command &= ~PCI_COMMAND_MEMORY;
6682 pci_write_config_word(dev, PCI_COMMAND, command);
6683
6684 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6685 pci_request_resource_alignment(dev, i, align, resize);
6686
6687 /*
6688 * Need to disable bridge's resource window,
6689 * to enable the kernel to reassign new resource
6690 * window later on.
6691 */
6692 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6693 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6694 r = &dev->resource[i];
6695 if (!(r->flags & IORESOURCE_MEM))
6696 continue;
6697 r->flags |= IORESOURCE_UNSET;
6698 r->end = resource_size(r) - 1;
6699 r->start = 0;
6700 }
6701 pci_disable_bridge_window(dev);
6702 }
6703 }
6704
resource_alignment_show(struct bus_type * bus,char * buf)6705 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6706 {
6707 size_t count = 0;
6708
6709 spin_lock(&resource_alignment_lock);
6710 if (resource_alignment_param)
6711 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6712 spin_unlock(&resource_alignment_lock);
6713
6714 return count;
6715 }
6716
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6717 static ssize_t resource_alignment_store(struct bus_type *bus,
6718 const char *buf, size_t count)
6719 {
6720 char *param, *old, *end;
6721
6722 if (count >= (PAGE_SIZE - 1))
6723 return -EINVAL;
6724
6725 param = kstrndup(buf, count, GFP_KERNEL);
6726 if (!param)
6727 return -ENOMEM;
6728
6729 end = strchr(param, '\n');
6730 if (end)
6731 *end = '\0';
6732
6733 spin_lock(&resource_alignment_lock);
6734 old = resource_alignment_param;
6735 if (strlen(param)) {
6736 resource_alignment_param = param;
6737 } else {
6738 kfree(param);
6739 resource_alignment_param = NULL;
6740 }
6741 spin_unlock(&resource_alignment_lock);
6742
6743 kfree(old);
6744
6745 return count;
6746 }
6747
6748 static BUS_ATTR_RW(resource_alignment);
6749
pci_resource_alignment_sysfs_init(void)6750 static int __init pci_resource_alignment_sysfs_init(void)
6751 {
6752 return bus_create_file(&pci_bus_type,
6753 &bus_attr_resource_alignment);
6754 }
6755 late_initcall(pci_resource_alignment_sysfs_init);
6756
pci_no_domains(void)6757 static void pci_no_domains(void)
6758 {
6759 #ifdef CONFIG_PCI_DOMAINS
6760 pci_domains_supported = 0;
6761 #endif
6762 }
6763
6764 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6765 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6766
pci_get_new_domain_nr(void)6767 static int pci_get_new_domain_nr(void)
6768 {
6769 return atomic_inc_return(&__domain_nr);
6770 }
6771
of_pci_bus_find_domain_nr(struct device * parent)6772 static int of_pci_bus_find_domain_nr(struct device *parent)
6773 {
6774 static int use_dt_domains = -1;
6775 int domain = -1;
6776
6777 if (parent)
6778 domain = of_get_pci_domain_nr(parent->of_node);
6779
6780 /*
6781 * Check DT domain and use_dt_domains values.
6782 *
6783 * If DT domain property is valid (domain >= 0) and
6784 * use_dt_domains != 0, the DT assignment is valid since this means
6785 * we have not previously allocated a domain number by using
6786 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6787 * 1, to indicate that we have just assigned a domain number from
6788 * DT.
6789 *
6790 * If DT domain property value is not valid (ie domain < 0), and we
6791 * have not previously assigned a domain number from DT
6792 * (use_dt_domains != 1) we should assign a domain number by
6793 * using the:
6794 *
6795 * pci_get_new_domain_nr()
6796 *
6797 * API and update the use_dt_domains value to keep track of method we
6798 * are using to assign domain numbers (use_dt_domains = 0).
6799 *
6800 * All other combinations imply we have a platform that is trying
6801 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6802 * which is a recipe for domain mishandling and it is prevented by
6803 * invalidating the domain value (domain = -1) and printing a
6804 * corresponding error.
6805 */
6806 if (domain >= 0 && use_dt_domains) {
6807 use_dt_domains = 1;
6808 } else if (domain < 0 && use_dt_domains != 1) {
6809 use_dt_domains = 0;
6810 domain = pci_get_new_domain_nr();
6811 } else {
6812 if (parent)
6813 pr_err("Node %pOF has ", parent->of_node);
6814 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6815 domain = -1;
6816 }
6817
6818 return domain;
6819 }
6820
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6821 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6822 {
6823 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6824 acpi_pci_bus_find_domain_nr(bus);
6825 }
6826 #endif
6827
6828 /**
6829 * pci_ext_cfg_avail - can we access extended PCI config space?
6830 *
6831 * Returns 1 if we can access PCI extended config space (offsets
6832 * greater than 0xff). This is the default implementation. Architecture
6833 * implementations can override this.
6834 */
pci_ext_cfg_avail(void)6835 int __weak pci_ext_cfg_avail(void)
6836 {
6837 return 1;
6838 }
6839
pci_fixup_cardbus(struct pci_bus * bus)6840 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6841 {
6842 }
6843 EXPORT_SYMBOL(pci_fixup_cardbus);
6844
pci_setup(char * str)6845 static int __init pci_setup(char *str)
6846 {
6847 while (str) {
6848 char *k = strchr(str, ',');
6849 if (k)
6850 *k++ = 0;
6851 if (*str && (str = pcibios_setup(str)) && *str) {
6852 if (!strcmp(str, "nomsi")) {
6853 pci_no_msi();
6854 } else if (!strncmp(str, "noats", 5)) {
6855 pr_info("PCIe: ATS is disabled\n");
6856 pcie_ats_disabled = true;
6857 } else if (!strcmp(str, "noaer")) {
6858 pci_no_aer();
6859 } else if (!strcmp(str, "earlydump")) {
6860 pci_early_dump = true;
6861 } else if (!strncmp(str, "realloc=", 8)) {
6862 pci_realloc_get_opt(str + 8);
6863 } else if (!strncmp(str, "realloc", 7)) {
6864 pci_realloc_get_opt("on");
6865 } else if (!strcmp(str, "nodomains")) {
6866 pci_no_domains();
6867 } else if (!strncmp(str, "noari", 5)) {
6868 pcie_ari_disabled = true;
6869 } else if (!strncmp(str, "cbiosize=", 9)) {
6870 pci_cardbus_io_size = memparse(str + 9, &str);
6871 } else if (!strncmp(str, "cbmemsize=", 10)) {
6872 pci_cardbus_mem_size = memparse(str + 10, &str);
6873 } else if (!strncmp(str, "resource_alignment=", 19)) {
6874 resource_alignment_param = str + 19;
6875 } else if (!strncmp(str, "ecrc=", 5)) {
6876 pcie_ecrc_get_policy(str + 5);
6877 } else if (!strncmp(str, "hpiosize=", 9)) {
6878 pci_hotplug_io_size = memparse(str + 9, &str);
6879 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6880 pci_hotplug_mmio_size = memparse(str + 11, &str);
6881 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6882 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6883 } else if (!strncmp(str, "hpmemsize=", 10)) {
6884 pci_hotplug_mmio_size = memparse(str + 10, &str);
6885 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6886 } else if (!strncmp(str, "hpbussize=", 10)) {
6887 pci_hotplug_bus_size =
6888 simple_strtoul(str + 10, &str, 0);
6889 if (pci_hotplug_bus_size > 0xff)
6890 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6891 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6892 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6893 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6894 pcie_bus_config = PCIE_BUS_SAFE;
6895 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6896 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6897 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6898 pcie_bus_config = PCIE_BUS_PEER2PEER;
6899 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6900 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6901 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6902 disable_acs_redir_param = str + 18;
6903 } else {
6904 pr_err("PCI: Unknown option `%s'\n", str);
6905 }
6906 }
6907 str = k;
6908 }
6909 return 0;
6910 }
6911 early_param("pci", pci_setup);
6912
6913 /*
6914 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6915 * in pci_setup(), above, to point to data in the __initdata section which
6916 * will be freed after the init sequence is complete. We can't allocate memory
6917 * in pci_setup() because some architectures do not have any memory allocation
6918 * service available during an early_param() call. So we allocate memory and
6919 * copy the variable here before the init section is freed.
6920 *
6921 */
pci_realloc_setup_params(void)6922 static int __init pci_realloc_setup_params(void)
6923 {
6924 resource_alignment_param = kstrdup(resource_alignment_param,
6925 GFP_KERNEL);
6926 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6927
6928 return 0;
6929 }
6930 pure_initcall(pci_realloc_setup_params);
6931