1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
47 PCI_STATUS_SIG_SYSTEM_ERROR | \
48 PCI_STATUS_REC_MASTER_ABORT | \
49 PCI_STATUS_REC_TARGET_ABORT | \
50 PCI_STATUS_SIG_TARGET_ABORT | \
51 PCI_STATUS_PARITY)
52
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 7
55
56 #define PCI_RESET_PROBE true
57 #define PCI_RESET_DO_RESET false
58
59 /*
60 * The PCI interface treats multi-function devices as independent
61 * devices. The slot/function address of each device is encoded
62 * in a single byte as follows:
63 *
64 * 7:3 = slot
65 * 2:0 = function
66 *
67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68 * In the interest of not exposing interfaces to user-space unnecessarily,
69 * the following kernel-only defines are being added here.
70 */
71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 struct pci_bus *bus; /* Bus this slot is on */
78 struct list_head list; /* Node in list of slots */
79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
81 struct kobject kobj;
82 };
83
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 return kobject_name(&slot->kobj);
87 }
88
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 pci_mmap_io,
92 pci_mmap_mem
93 };
94
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 /* #0-5: standard PCI resources */
98 PCI_STD_RESOURCES,
99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100
101 /* #6: expansion ROM resource */
102 PCI_ROM_RESOURCE,
103
104 /* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 PCI_IOV_RESOURCES,
107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
114
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
120
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123
124 /* Resources assigned to buses behind the bridge */
125 PCI_BRIDGE_RESOURCES,
126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 PCI_BRIDGE_RESOURCE_NUM - 1,
128
129 /* Total resources associated with a PCI device */
130 PCI_NUM_RESOURCES,
131
132 /* Preserve this for compatibility */
133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135
136 /**
137 * enum pci_interrupt_pin - PCI INTx interrupt values
138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139 * @PCI_INTERRUPT_INTA: PCI INTA pin
140 * @PCI_INTERRUPT_INTB: PCI INTB pin
141 * @PCI_INTERRUPT_INTC: PCI INTC pin
142 * @PCI_INTERRUPT_INTD: PCI INTD pin
143 *
144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145 * PCI_INTERRUPT_PIN register.
146 */
147 enum pci_interrupt_pin {
148 PCI_INTERRUPT_UNKNOWN,
149 PCI_INTERRUPT_INTA,
150 PCI_INTERRUPT_INTB,
151 PCI_INTERRUPT_INTC,
152 PCI_INTERRUPT_INTD,
153 };
154
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX 4
157
158 /*
159 * Reading from a device that doesn't respond typically returns ~0. A
160 * successful read from a device may also return ~0, so you need additional
161 * information to reliably identify errors.
162 */
163 #define PCI_ERROR_RESPONSE (~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166
167 /*
168 * pci_power_t values must match the bits in the Capabilities PME_Support
169 * and Control/Status PowerState fields in the Power Management capability.
170 */
171 typedef int __bitwise pci_power_t;
172
173 #define PCI_D0 ((pci_power_t __force) 0)
174 #define PCI_D1 ((pci_power_t __force) 1)
175 #define PCI_D2 ((pci_power_t __force) 2)
176 #define PCI_D3hot ((pci_power_t __force) 3)
177 #define PCI_D3cold ((pci_power_t __force) 4)
178 #define PCI_UNKNOWN ((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
180
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 return pci_power_names[1 + (__force int) state];
187 }
188
189 /**
190 * typedef pci_channel_state_t
191 *
192 * The pci_channel state describes connectivity between the CPU and
193 * the PCI device. If some PCI bus between here and the PCI device
194 * has crashed or locked up, this info is reflected here.
195 */
196 typedef unsigned int __bitwise pci_channel_state_t;
197
198 enum {
199 /* I/O channel is in normal state */
200 pci_channel_io_normal = (__force pci_channel_state_t) 1,
201
202 /* I/O to channel is blocked */
203 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204
205 /* PCI card is dead */
206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208
209 typedef unsigned int __bitwise pcie_reset_state_t;
210
211 enum pcie_reset_state {
212 /* Reset is NOT asserted (Use to deassert reset) */
213 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214
215 /* Use #PERST to reset PCIe device */
216 pcie_warm_reset = (__force pcie_reset_state_t) 2,
217
218 /* Use PCIe Hot Reset to reset device */
219 pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 /* Device configuration is irrevocably lost if disabled into D3 */
227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 /* Provide indication device is assigned by a Virtual Machine Manager */
229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 /* Flag for quirk use to store if quirk-specific ACS is enabled */
231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 /* Do not use bus resets for device */
235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 /* Do not use PM reset even if device advertises NoSoftRst- */
237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 /* Get VPD from function 0 VPD */
239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 /* A non-root bridge where translation occurs, stop alias search here */
241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 /* Do not use FLR even if device advertises PCI_AF_CAP */
243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 /* Don't use Relaxed Ordering for TLPs directed at this device */
245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 /* Device does honor MSI masking despite saying otherwise */
247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 };
249
250 enum pci_irq_reroute_variant {
251 INTEL_IRQ_REROUTE_VARIANT = 1,
252 MAX_IRQ_REROUTE_VARIANTS = 3
253 };
254
255 typedef unsigned short __bitwise pci_bus_flags_t;
256 enum pci_bus_flags {
257 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
258 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
259 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
260 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
261 };
262
263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
264 enum pcie_link_width {
265 PCIE_LNK_WIDTH_RESRV = 0x00,
266 PCIE_LNK_X1 = 0x01,
267 PCIE_LNK_X2 = 0x02,
268 PCIE_LNK_X4 = 0x04,
269 PCIE_LNK_X8 = 0x08,
270 PCIE_LNK_X12 = 0x0c,
271 PCIE_LNK_X16 = 0x10,
272 PCIE_LNK_X32 = 0x20,
273 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
274 };
275
276 /* See matching string table in pci_speed_string() */
277 enum pci_bus_speed {
278 PCI_SPEED_33MHz = 0x00,
279 PCI_SPEED_66MHz = 0x01,
280 PCI_SPEED_66MHz_PCIX = 0x02,
281 PCI_SPEED_100MHz_PCIX = 0x03,
282 PCI_SPEED_133MHz_PCIX = 0x04,
283 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
284 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
285 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
286 PCI_SPEED_66MHz_PCIX_266 = 0x09,
287 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
288 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
289 AGP_UNKNOWN = 0x0c,
290 AGP_1X = 0x0d,
291 AGP_2X = 0x0e,
292 AGP_4X = 0x0f,
293 AGP_8X = 0x10,
294 PCI_SPEED_66MHz_PCIX_533 = 0x11,
295 PCI_SPEED_100MHz_PCIX_533 = 0x12,
296 PCI_SPEED_133MHz_PCIX_533 = 0x13,
297 PCIE_SPEED_2_5GT = 0x14,
298 PCIE_SPEED_5_0GT = 0x15,
299 PCIE_SPEED_8_0GT = 0x16,
300 PCIE_SPEED_16_0GT = 0x17,
301 PCIE_SPEED_32_0GT = 0x18,
302 PCIE_SPEED_64_0GT = 0x19,
303 PCI_SPEED_UNKNOWN = 0xff,
304 };
305
306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
308
309 struct pci_vpd {
310 struct mutex lock;
311 unsigned int len;
312 u8 cap;
313 };
314
315 struct irq_affinity;
316 struct pcie_link_state;
317 struct pci_sriov;
318 struct pci_p2pdma;
319 struct rcec_ea;
320
321 /* The pci_dev structure describes PCI devices */
322 struct pci_dev {
323 struct list_head bus_list; /* Node in per-bus list */
324 struct pci_bus *bus; /* Bus this device is on */
325 struct pci_bus *subordinate; /* Bus this device bridges to */
326
327 void *sysdata; /* Hook for sys-specific extension */
328 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
329 struct pci_slot *slot; /* Physical slot this device is in */
330
331 unsigned int devfn; /* Encoded device & function index */
332 unsigned short vendor;
333 unsigned short device;
334 unsigned short subsystem_vendor;
335 unsigned short subsystem_device;
336 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
337 u8 revision; /* PCI revision, low byte of class word */
338 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
339 #ifdef CONFIG_PCIEAER
340 u16 aer_cap; /* AER capability offset */
341 struct aer_stats *aer_stats; /* AER stats for this device */
342 #endif
343 #ifdef CONFIG_PCIEPORTBUS
344 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
345 struct pci_dev *rcec; /* Associated RCEC device */
346 #endif
347 u32 devcap; /* PCIe Device Capabilities */
348 u8 pcie_cap; /* PCIe capability offset */
349 u8 msi_cap; /* MSI capability offset */
350 u8 msix_cap; /* MSI-X capability offset */
351 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
352 u8 rom_base_reg; /* Config register controlling ROM */
353 u8 pin; /* Interrupt pin this device uses */
354 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
355 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
356
357 struct pci_driver *driver; /* Driver bound to this device */
358 u64 dma_mask; /* Mask of the bits of bus address this
359 device implements. Normally this is
360 0xffffffff. You only need to change
361 this if your device has broken DMA
362 or supports 64-bit transfers. */
363
364 struct device_dma_parameters dma_parms;
365
366 pci_power_t current_state; /* Current operating state. In ACPI,
367 this is D0-D3, D0 being fully
368 functional, and D3 being off. */
369 u8 pm_cap; /* PM capability offset */
370 unsigned int imm_ready:1; /* Supports Immediate Readiness */
371 unsigned int pme_support:5; /* Bitmask of states from which PME#
372 can be generated */
373 unsigned int pme_poll:1; /* Poll device's PME status bit */
374 unsigned int d1_support:1; /* Low power state D1 is supported */
375 unsigned int d2_support:1; /* Low power state D2 is supported */
376 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
377 unsigned int no_d3cold:1; /* D3cold is forbidden */
378 unsigned int bridge_d3:1; /* Allow D3 for bridge */
379 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
380 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
381 decoding during BAR sizing */
382 unsigned int wakeup_prepared:1;
383 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
384 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
385 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
386 controlled exclusively by
387 user sysfs */
388 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
389 bit manually */
390 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
391 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
392
393 #ifdef CONFIG_PCIEASPM
394 struct pcie_link_state *link_state; /* ASPM link state */
395 u16 l1ss; /* L1SS Capability pointer */
396 unsigned int ltr_path:1; /* Latency Tolerance Reporting
397 supported from root to here */
398 #endif
399 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
400 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
401
402 pci_channel_state_t error_state; /* Current connectivity state */
403 struct device dev; /* Generic device interface */
404
405 int cfg_size; /* Size of config space */
406
407 /*
408 * Instead of touching interrupt line and base address registers
409 * directly, use the values stored here. They might be different!
410 */
411 unsigned int irq;
412 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
413 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
414
415 bool match_driver; /* Skip attaching driver */
416
417 unsigned int transparent:1; /* Subtractive decode bridge */
418 unsigned int io_window:1; /* Bridge has I/O window */
419 unsigned int pref_window:1; /* Bridge has pref mem window */
420 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
421 unsigned int multifunction:1; /* Multi-function device */
422
423 unsigned int is_busmaster:1; /* Is busmaster */
424 unsigned int no_msi:1; /* May not use MSI */
425 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
426 unsigned int block_cfg_access:1; /* Config space access blocked */
427 unsigned int broken_parity_status:1; /* Generates false positive parity */
428 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
429 unsigned int msi_enabled:1;
430 unsigned int msix_enabled:1;
431 unsigned int ari_enabled:1; /* ARI forwarding */
432 unsigned int ats_enabled:1; /* Address Translation Svc */
433 unsigned int pasid_enabled:1; /* Process Address Space ID */
434 unsigned int pri_enabled:1; /* Page Request Interface */
435 unsigned int is_managed:1; /* Managed via devres */
436 unsigned int is_msi_managed:1; /* MSI release via devres installed */
437 unsigned int needs_freset:1; /* Requires fundamental reset */
438 unsigned int state_saved:1;
439 unsigned int is_physfn:1;
440 unsigned int is_virtfn:1;
441 unsigned int is_hotplug_bridge:1;
442 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
443 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
444 /*
445 * Devices marked being untrusted are the ones that can potentially
446 * execute DMA attacks and similar. They are typically connected
447 * through external ports such as Thunderbolt but not limited to
448 * that. When an IOMMU is enabled they should be getting full
449 * mappings to make sure they cannot access arbitrary memory.
450 */
451 unsigned int untrusted:1;
452 /*
453 * Info from the platform, e.g., ACPI or device tree, may mark a
454 * device as "external-facing". An external-facing device is
455 * itself internal but devices downstream from it are external.
456 */
457 unsigned int external_facing:1;
458 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
459 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
460 unsigned int irq_managed:1;
461 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
462 unsigned int is_probed:1; /* Device probing in progress */
463 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
464 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
465 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
466 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
467 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
468 pci_dev_flags_t dev_flags;
469 atomic_t enable_cnt; /* pci_enable_device has been called */
470
471 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
472 u32 saved_config_space[16]; /* Config space saved at suspend time */
473 struct hlist_head saved_cap_space;
474 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
475 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
476
477 #ifdef CONFIG_HOTPLUG_PCI_PCIE
478 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
479 #endif
480 #ifdef CONFIG_PCIE_PTM
481 u16 ptm_cap; /* PTM Capability */
482 unsigned int ptm_root:1;
483 unsigned int ptm_enabled:1;
484 u8 ptm_granularity;
485 #endif
486 #ifdef CONFIG_PCI_MSI
487 void __iomem *msix_base;
488 raw_spinlock_t msi_lock;
489 #endif
490 struct pci_vpd vpd;
491 #ifdef CONFIG_PCIE_DPC
492 u16 dpc_cap;
493 unsigned int dpc_rp_extensions:1;
494 u8 dpc_rp_log_size;
495 #endif
496 #ifdef CONFIG_PCI_ATS
497 union {
498 struct pci_sriov *sriov; /* PF: SR-IOV info */
499 struct pci_dev *physfn; /* VF: related PF */
500 };
501 u16 ats_cap; /* ATS Capability offset */
502 u8 ats_stu; /* ATS Smallest Translation Unit */
503 #endif
504 #ifdef CONFIG_PCI_PRI
505 u16 pri_cap; /* PRI Capability offset */
506 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
507 unsigned int pasid_required:1; /* PRG Response PASID Required */
508 #endif
509 #ifdef CONFIG_PCI_PASID
510 u16 pasid_cap; /* PASID Capability offset */
511 u16 pasid_features;
512 #endif
513 #ifdef CONFIG_PCI_P2PDMA
514 struct pci_p2pdma __rcu *p2pdma;
515 #endif
516 #ifdef CONFIG_PCI_DOE
517 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
518 #endif
519 u16 acs_cap; /* ACS Capability offset */
520 phys_addr_t rom; /* Physical address if not from BAR */
521 size_t romlen; /* Length if not from BAR */
522 /*
523 * Driver name to force a match. Do not set directly, because core
524 * frees it. Use driver_set_override() to set or clear it.
525 */
526 const char *driver_override;
527
528 unsigned long priv_flags; /* Private flags for the PCI driver */
529
530 /* These methods index pci_reset_fn_methods[] */
531 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
532 };
533
pci_physfn(struct pci_dev * dev)534 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
535 {
536 #ifdef CONFIG_PCI_IOV
537 if (dev->is_virtfn)
538 dev = dev->physfn;
539 #endif
540 return dev;
541 }
542
543 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
544
545 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
546 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
547
pci_channel_offline(struct pci_dev * pdev)548 static inline int pci_channel_offline(struct pci_dev *pdev)
549 {
550 return (pdev->error_state != pci_channel_io_normal);
551 }
552
553 /*
554 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
555 * Group number is limited to a 16-bit value, therefore (int)-1 is
556 * not a valid PCI domain number, and can be used as a sentinel
557 * value indicating ->domain_nr is not set by the driver (and
558 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
559 * pci_bus_find_domain_nr()).
560 */
561 #define PCI_DOMAIN_NR_NOT_SET (-1)
562
563 struct pci_host_bridge {
564 struct device dev;
565 struct pci_bus *bus; /* Root bus */
566 struct pci_ops *ops;
567 struct pci_ops *child_ops;
568 void *sysdata;
569 int busnr;
570 int domain_nr;
571 struct list_head windows; /* resource_entry */
572 struct list_head dma_ranges; /* dma ranges resource list */
573 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
574 int (*map_irq)(const struct pci_dev *, u8, u8);
575 void (*release_fn)(struct pci_host_bridge *);
576 void *release_data;
577 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
578 unsigned int no_ext_tags:1; /* No Extended Tags */
579 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
580 unsigned int native_aer:1; /* OS may use PCIe AER */
581 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
582 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
583 unsigned int native_pme:1; /* OS may use PCIe PME */
584 unsigned int native_ltr:1; /* OS may use PCIe LTR */
585 unsigned int native_dpc:1; /* OS may use PCIe DPC */
586 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
587 unsigned int preserve_config:1; /* Preserve FW resource setup */
588 unsigned int size_windows:1; /* Enable root bus sizing */
589 unsigned int msi_domain:1; /* Bridge wants MSI domain */
590
591 /* Resource alignment requirements */
592 resource_size_t (*align_resource)(struct pci_dev *dev,
593 const struct resource *res,
594 resource_size_t start,
595 resource_size_t size,
596 resource_size_t align);
597 unsigned long private[] ____cacheline_aligned;
598 };
599
600 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
601
pci_host_bridge_priv(struct pci_host_bridge * bridge)602 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
603 {
604 return (void *)bridge->private;
605 }
606
pci_host_bridge_from_priv(void * priv)607 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
608 {
609 return container_of(priv, struct pci_host_bridge, private);
610 }
611
612 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
613 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
614 size_t priv);
615 void pci_free_host_bridge(struct pci_host_bridge *bridge);
616 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
617
618 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
619 void (*release_fn)(struct pci_host_bridge *),
620 void *release_data);
621
622 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
623
624 /*
625 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
626 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
627 * buses below host bridges or subtractive decode bridges) go in the list.
628 * Use pci_bus_for_each_resource() to iterate through all the resources.
629 */
630
631 /*
632 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
633 * and there's no way to program the bridge with the details of the window.
634 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
635 * decode bit set, because they are explicit and can be programmed with _SRS.
636 */
637 #define PCI_SUBTRACTIVE_DECODE 0x1
638
639 struct pci_bus_resource {
640 struct list_head list;
641 struct resource *res;
642 unsigned int flags;
643 };
644
645 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
646
647 struct pci_bus {
648 struct list_head node; /* Node in list of buses */
649 struct pci_bus *parent; /* Parent bus this bridge is on */
650 struct list_head children; /* List of child buses */
651 struct list_head devices; /* List of devices on this bus */
652 struct pci_dev *self; /* Bridge device as seen by parent */
653 struct list_head slots; /* List of slots on this bus;
654 protected by pci_slot_mutex */
655 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
656 struct list_head resources; /* Address space routed to this bus */
657 struct resource busn_res; /* Bus numbers routed to this bus */
658
659 struct pci_ops *ops; /* Configuration access functions */
660 void *sysdata; /* Hook for sys-specific extension */
661 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
662
663 unsigned char number; /* Bus number */
664 unsigned char primary; /* Number of primary bridge */
665 unsigned char max_bus_speed; /* enum pci_bus_speed */
666 unsigned char cur_bus_speed; /* enum pci_bus_speed */
667 #ifdef CONFIG_PCI_DOMAINS_GENERIC
668 int domain_nr;
669 #endif
670
671 char name[48];
672
673 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
674 pci_bus_flags_t bus_flags; /* Inherited by child buses */
675 struct device *bridge;
676 struct device dev;
677 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
678 struct bin_attribute *legacy_mem; /* Legacy mem */
679 unsigned int is_added:1;
680 unsigned int unsafe_warn:1; /* warned about RW1C config write */
681 };
682
683 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
684
pci_dev_id(struct pci_dev * dev)685 static inline u16 pci_dev_id(struct pci_dev *dev)
686 {
687 return PCI_DEVID(dev->bus->number, dev->devfn);
688 }
689
690 /*
691 * Returns true if the PCI bus is root (behind host-PCI bridge),
692 * false otherwise
693 *
694 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
695 * This is incorrect because "virtual" buses added for SR-IOV (via
696 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
697 */
pci_is_root_bus(struct pci_bus * pbus)698 static inline bool pci_is_root_bus(struct pci_bus *pbus)
699 {
700 return !(pbus->parent);
701 }
702
703 /**
704 * pci_is_bridge - check if the PCI device is a bridge
705 * @dev: PCI device
706 *
707 * Return true if the PCI device is bridge whether it has subordinate
708 * or not.
709 */
pci_is_bridge(struct pci_dev * dev)710 static inline bool pci_is_bridge(struct pci_dev *dev)
711 {
712 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
713 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
714 }
715
716 #define for_each_pci_bridge(dev, bus) \
717 list_for_each_entry(dev, &bus->devices, bus_list) \
718 if (!pci_is_bridge(dev)) {} else
719
pci_upstream_bridge(struct pci_dev * dev)720 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
721 {
722 dev = pci_physfn(dev);
723 if (pci_is_root_bus(dev->bus))
724 return NULL;
725
726 return dev->bus->self;
727 }
728
729 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)730 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
731 {
732 return pci_dev->msi_enabled || pci_dev->msix_enabled;
733 }
734 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)735 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
736 #endif
737
738 /* Error values that may be returned by PCI functions */
739 #define PCIBIOS_SUCCESSFUL 0x00
740 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
741 #define PCIBIOS_BAD_VENDOR_ID 0x83
742 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
743 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
744 #define PCIBIOS_SET_FAILED 0x88
745 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
746
747 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)748 static inline int pcibios_err_to_errno(int err)
749 {
750 if (err <= PCIBIOS_SUCCESSFUL)
751 return err; /* Assume already errno */
752
753 switch (err) {
754 case PCIBIOS_FUNC_NOT_SUPPORTED:
755 return -ENOENT;
756 case PCIBIOS_BAD_VENDOR_ID:
757 return -ENOTTY;
758 case PCIBIOS_DEVICE_NOT_FOUND:
759 return -ENODEV;
760 case PCIBIOS_BAD_REGISTER_NUMBER:
761 return -EFAULT;
762 case PCIBIOS_SET_FAILED:
763 return -EIO;
764 case PCIBIOS_BUFFER_TOO_SMALL:
765 return -ENOSPC;
766 }
767
768 return -ERANGE;
769 }
770
771 /* Low-level architecture-dependent routines */
772
773 struct pci_ops {
774 int (*add_bus)(struct pci_bus *bus);
775 void (*remove_bus)(struct pci_bus *bus);
776 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
777 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
778 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
779 };
780
781 /*
782 * ACPI needs to be able to access PCI config space before we've done a
783 * PCI bus scan and created pci_bus structures.
784 */
785 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
786 int reg, int len, u32 *val);
787 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
788 int reg, int len, u32 val);
789
790 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
791 typedef u64 pci_bus_addr_t;
792 #else
793 typedef u32 pci_bus_addr_t;
794 #endif
795
796 struct pci_bus_region {
797 pci_bus_addr_t start;
798 pci_bus_addr_t end;
799 };
800
801 struct pci_dynids {
802 spinlock_t lock; /* Protects list, index */
803 struct list_head list; /* For IDs added at runtime */
804 };
805
806
807 /*
808 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
809 * a set of callbacks in struct pci_error_handlers, that device driver
810 * will be notified of PCI bus errors, and will be driven to recovery
811 * when an error occurs.
812 */
813
814 typedef unsigned int __bitwise pci_ers_result_t;
815
816 enum pci_ers_result {
817 /* No result/none/not supported in device driver */
818 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
819
820 /* Device driver can recover without slot reset */
821 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
822
823 /* Device driver wants slot to be reset */
824 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
825
826 /* Device has completely failed, is unrecoverable */
827 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
828
829 /* Device driver is fully recovered and operational */
830 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
831
832 /* No AER capabilities registered for the driver */
833 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
834 };
835
836 /* PCI bus error event callbacks */
837 struct pci_error_handlers {
838 /* PCI bus error detected on this device */
839 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
840 pci_channel_state_t error);
841
842 /* MMIO has been re-enabled, but not DMA */
843 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
844
845 /* PCI slot has been reset */
846 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
847
848 /* PCI function reset prepare or completed */
849 void (*reset_prepare)(struct pci_dev *dev);
850 void (*reset_done)(struct pci_dev *dev);
851
852 /* Device driver may resume normal operations */
853 void (*resume)(struct pci_dev *dev);
854
855 /* Allow device driver to record more details of a correctable error */
856 void (*cor_error_detected)(struct pci_dev *dev);
857 };
858
859
860 struct module;
861
862 /**
863 * struct pci_driver - PCI driver structure
864 * @node: List of driver structures.
865 * @name: Driver name.
866 * @id_table: Pointer to table of device IDs the driver is
867 * interested in. Most drivers should export this
868 * table using MODULE_DEVICE_TABLE(pci,...).
869 * @probe: This probing function gets called (during execution
870 * of pci_register_driver() for already existing
871 * devices or later if a new device gets inserted) for
872 * all PCI devices which match the ID table and are not
873 * "owned" by the other drivers yet. This function gets
874 * passed a "struct pci_dev \*" for each device whose
875 * entry in the ID table matches the device. The probe
876 * function returns zero when the driver chooses to
877 * take "ownership" of the device or an error code
878 * (negative number) otherwise.
879 * The probe function always gets called from process
880 * context, so it can sleep.
881 * @remove: The remove() function gets called whenever a device
882 * being handled by this driver is removed (either during
883 * deregistration of the driver or when it's manually
884 * pulled out of a hot-pluggable slot).
885 * The remove function always gets called from process
886 * context, so it can sleep.
887 * @suspend: Put device into low power state.
888 * @resume: Wake device from low power state.
889 * (Please see Documentation/power/pci.rst for descriptions
890 * of PCI Power Management and the related functions.)
891 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
892 * Intended to stop any idling DMA operations.
893 * Useful for enabling wake-on-lan (NIC) or changing
894 * the power state of a device before reboot.
895 * e.g. drivers/net/e100.c.
896 * @sriov_configure: Optional driver callback to allow configuration of
897 * number of VFs to enable via sysfs "sriov_numvfs" file.
898 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
899 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
900 * This will change MSI-X Table Size in the VF Message Control
901 * registers.
902 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
903 * MSI-X vectors available for distribution to the VFs.
904 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
905 * @groups: Sysfs attribute groups.
906 * @dev_groups: Attributes attached to the device that will be
907 * created once it is bound to the driver.
908 * @driver: Driver model structure.
909 * @dynids: List of dynamically added device IDs.
910 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
911 * For most device drivers, no need to care about this flag
912 * as long as all DMAs are handled through the kernel DMA API.
913 * For some special ones, for example VFIO drivers, they know
914 * how to manage the DMA themselves and set this flag so that
915 * the IOMMU layer will allow them to setup and manage their
916 * own I/O address space.
917 */
918 struct pci_driver {
919 struct list_head node;
920 const char *name;
921 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
922 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
923 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
924 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
925 int (*resume)(struct pci_dev *dev); /* Device woken up */
926 void (*shutdown)(struct pci_dev *dev);
927 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
928 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
929 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
930 const struct pci_error_handlers *err_handler;
931 const struct attribute_group **groups;
932 const struct attribute_group **dev_groups;
933 struct device_driver driver;
934 struct pci_dynids dynids;
935 bool driver_managed_dma;
936 };
937
to_pci_driver(struct device_driver * drv)938 static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
939 {
940 return drv ? container_of(drv, struct pci_driver, driver) : NULL;
941 }
942
943 /**
944 * PCI_DEVICE - macro used to describe a specific PCI device
945 * @vend: the 16 bit PCI Vendor ID
946 * @dev: the 16 bit PCI Device ID
947 *
948 * This macro is used to create a struct pci_device_id that matches a
949 * specific device. The subvendor and subdevice fields will be set to
950 * PCI_ANY_ID.
951 */
952 #define PCI_DEVICE(vend,dev) \
953 .vendor = (vend), .device = (dev), \
954 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
955
956 /**
957 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
958 * override_only flags.
959 * @vend: the 16 bit PCI Vendor ID
960 * @dev: the 16 bit PCI Device ID
961 * @driver_override: the 32 bit PCI Device override_only
962 *
963 * This macro is used to create a struct pci_device_id that matches only a
964 * driver_override device. The subvendor and subdevice fields will be set to
965 * PCI_ANY_ID.
966 */
967 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
968 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
969 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
970
971 /**
972 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
973 * "driver_override" PCI device.
974 * @vend: the 16 bit PCI Vendor ID
975 * @dev: the 16 bit PCI Device ID
976 *
977 * This macro is used to create a struct pci_device_id that matches a
978 * specific device. The subvendor and subdevice fields will be set to
979 * PCI_ANY_ID and the driver_override will be set to
980 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
981 */
982 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
983 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
984
985 /**
986 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
987 * @vend: the 16 bit PCI Vendor ID
988 * @dev: the 16 bit PCI Device ID
989 * @subvend: the 16 bit PCI Subvendor ID
990 * @subdev: the 16 bit PCI Subdevice ID
991 *
992 * This macro is used to create a struct pci_device_id that matches a
993 * specific device with subsystem information.
994 */
995 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
996 .vendor = (vend), .device = (dev), \
997 .subvendor = (subvend), .subdevice = (subdev)
998
999 /**
1000 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1001 * @dev_class: the class, subclass, prog-if triple for this device
1002 * @dev_class_mask: the class mask for this device
1003 *
1004 * This macro is used to create a struct pci_device_id that matches a
1005 * specific PCI class. The vendor, device, subvendor, and subdevice
1006 * fields will be set to PCI_ANY_ID.
1007 */
1008 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1009 .class = (dev_class), .class_mask = (dev_class_mask), \
1010 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1011 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1012
1013 /**
1014 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1015 * @vend: the vendor name
1016 * @dev: the 16 bit PCI Device ID
1017 *
1018 * This macro is used to create a struct pci_device_id that matches a
1019 * specific PCI device. The subvendor, and subdevice fields will be set
1020 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1021 * private data.
1022 */
1023 #define PCI_VDEVICE(vend, dev) \
1024 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1025 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1026
1027 /**
1028 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1029 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1030 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1031 * @data: the driver data to be filled
1032 *
1033 * This macro is used to create a struct pci_device_id that matches a
1034 * specific PCI device. The subvendor, and subdevice fields will be set
1035 * to PCI_ANY_ID.
1036 */
1037 #define PCI_DEVICE_DATA(vend, dev, data) \
1038 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1039 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1040 .driver_data = (kernel_ulong_t)(data)
1041
1042 enum {
1043 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1044 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1045 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1046 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1047 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1048 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1049 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1050 };
1051
1052 #define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
1053 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1054 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1055 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1056
1057 /* These external functions are only available when PCI support is enabled */
1058 #ifdef CONFIG_PCI
1059
1060 extern unsigned int pci_flags;
1061
pci_set_flags(int flags)1062 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1063 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1064 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1065 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1066
1067 void pcie_bus_configure_settings(struct pci_bus *bus);
1068
1069 enum pcie_bus_config_types {
1070 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1071 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1072 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1073 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1074 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1075 };
1076
1077 extern enum pcie_bus_config_types pcie_bus_config;
1078
1079 extern struct bus_type pci_bus_type;
1080
1081 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1082 * code, or PCI core code. */
1083 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1084 /* Some device drivers need know if PCI is initiated */
1085 int no_pci_devices(void);
1086
1087 void pcibios_resource_survey_bus(struct pci_bus *bus);
1088 void pcibios_bus_add_device(struct pci_dev *pdev);
1089 void pcibios_add_bus(struct pci_bus *bus);
1090 void pcibios_remove_bus(struct pci_bus *bus);
1091 void pcibios_fixup_bus(struct pci_bus *);
1092 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1093 /* Architecture-specific versions may override this (weak) */
1094 char *pcibios_setup(char *str);
1095
1096 /* Used only when drivers/pci/setup.c is used */
1097 resource_size_t pcibios_align_resource(void *, const struct resource *,
1098 resource_size_t,
1099 resource_size_t);
1100
1101 /* Weak but can be overridden by arch */
1102 void pci_fixup_cardbus(struct pci_bus *);
1103
1104 /* Generic PCI functions used internally */
1105
1106 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1107 struct resource *res);
1108 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1109 struct pci_bus_region *region);
1110 void pcibios_scan_specific_bus(int busn);
1111 struct pci_bus *pci_find_bus(int domain, int busnr);
1112 void pci_bus_add_devices(const struct pci_bus *bus);
1113 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1114 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1115 struct pci_ops *ops, void *sysdata,
1116 struct list_head *resources);
1117 int pci_host_probe(struct pci_host_bridge *bridge);
1118 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1119 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1120 void pci_bus_release_busn_res(struct pci_bus *b);
1121 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1122 struct pci_ops *ops, void *sysdata,
1123 struct list_head *resources);
1124 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1125 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1126 int busnr);
1127 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1128 const char *name,
1129 struct hotplug_slot *hotplug);
1130 void pci_destroy_slot(struct pci_slot *slot);
1131 #ifdef CONFIG_SYSFS
1132 void pci_dev_assign_slot(struct pci_dev *dev);
1133 #else
pci_dev_assign_slot(struct pci_dev * dev)1134 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1135 #endif
1136 int pci_scan_slot(struct pci_bus *bus, int devfn);
1137 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1138 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1139 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1140 void pci_bus_add_device(struct pci_dev *dev);
1141 void pci_read_bridge_bases(struct pci_bus *child);
1142 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1143 struct resource *res);
1144 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1145 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1146 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1147 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1148 void pci_dev_put(struct pci_dev *dev);
1149 void pci_remove_bus(struct pci_bus *b);
1150 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1151 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1152 void pci_stop_root_bus(struct pci_bus *bus);
1153 void pci_remove_root_bus(struct pci_bus *bus);
1154 void pci_setup_cardbus(struct pci_bus *bus);
1155 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1156 void pci_sort_breadthfirst(void);
1157 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1158 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1159
1160 /* Generic PCI functions exported to card drivers */
1161
1162 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1163 u8 pci_find_capability(struct pci_dev *dev, int cap);
1164 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1165 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1166 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1167 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1168 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1169 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1170 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1171 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1172
1173 u64 pci_get_dsn(struct pci_dev *dev);
1174
1175 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1176 struct pci_dev *from);
1177 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1178 unsigned int ss_vendor, unsigned int ss_device,
1179 struct pci_dev *from);
1180 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1181 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1182 unsigned int devfn);
1183 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1184 int pci_dev_present(const struct pci_device_id *ids);
1185
1186 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1187 int where, u8 *val);
1188 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1189 int where, u16 *val);
1190 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1191 int where, u32 *val);
1192 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1193 int where, u8 val);
1194 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1195 int where, u16 val);
1196 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1197 int where, u32 val);
1198
1199 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1200 int where, int size, u32 *val);
1201 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1202 int where, int size, u32 val);
1203 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1204 int where, int size, u32 *val);
1205 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1206 int where, int size, u32 val);
1207
1208 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1209
1210 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1211 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1212 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1213 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1214 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1215 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1216
1217 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1218 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1219 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1220 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1221 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1222 u16 clear, u16 set);
1223 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1224 u16 clear, u16 set);
1225 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1226 u32 clear, u32 set);
1227
1228 /**
1229 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1230 * @dev: PCI device structure of the PCI Express device
1231 * @pos: PCI Express Capability Register
1232 * @clear: Clear bitmask
1233 * @set: Set bitmask
1234 *
1235 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1236 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1237 * Capability Registers are accessed concurrently in RMW fashion, hence
1238 * require locking which is handled transparently to the caller.
1239 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1240 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1241 int pos,
1242 u16 clear, u16 set)
1243 {
1244 switch (pos) {
1245 case PCI_EXP_LNKCTL:
1246 case PCI_EXP_RTCTL:
1247 return pcie_capability_clear_and_set_word_locked(dev, pos,
1248 clear, set);
1249 default:
1250 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1251 clear, set);
1252 }
1253 }
1254
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1255 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1256 u16 set)
1257 {
1258 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1259 }
1260
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1261 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1262 u32 set)
1263 {
1264 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1265 }
1266
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1267 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1268 u16 clear)
1269 {
1270 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1271 }
1272
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1273 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1274 u32 clear)
1275 {
1276 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1277 }
1278
1279 /* User-space driven config access */
1280 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1281 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1282 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1283 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1284 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1285 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1286
1287 int __must_check pci_enable_device(struct pci_dev *dev);
1288 int __must_check pci_enable_device_io(struct pci_dev *dev);
1289 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1290 int __must_check pci_reenable_device(struct pci_dev *);
1291 int __must_check pcim_enable_device(struct pci_dev *pdev);
1292 void pcim_pin_device(struct pci_dev *pdev);
1293
pci_intx_mask_supported(struct pci_dev * pdev)1294 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1295 {
1296 /*
1297 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1298 * writable and no quirk has marked the feature broken.
1299 */
1300 return !pdev->broken_intx_masking;
1301 }
1302
pci_is_enabled(struct pci_dev * pdev)1303 static inline int pci_is_enabled(struct pci_dev *pdev)
1304 {
1305 return (atomic_read(&pdev->enable_cnt) > 0);
1306 }
1307
pci_is_managed(struct pci_dev * pdev)1308 static inline int pci_is_managed(struct pci_dev *pdev)
1309 {
1310 return pdev->is_managed;
1311 }
1312
1313 void pci_disable_device(struct pci_dev *dev);
1314
1315 extern unsigned int pcibios_max_latency;
1316 void pci_set_master(struct pci_dev *dev);
1317 void pci_clear_master(struct pci_dev *dev);
1318
1319 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1320 int pci_set_cacheline_size(struct pci_dev *dev);
1321 int __must_check pci_set_mwi(struct pci_dev *dev);
1322 int __must_check pcim_set_mwi(struct pci_dev *dev);
1323 int pci_try_set_mwi(struct pci_dev *dev);
1324 void pci_clear_mwi(struct pci_dev *dev);
1325 void pci_disable_parity(struct pci_dev *dev);
1326 void pci_intx(struct pci_dev *dev, int enable);
1327 bool pci_check_and_mask_intx(struct pci_dev *dev);
1328 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1329 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1330 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1331 int pcix_get_max_mmrbc(struct pci_dev *dev);
1332 int pcix_get_mmrbc(struct pci_dev *dev);
1333 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1334 int pcie_get_readrq(struct pci_dev *dev);
1335 int pcie_set_readrq(struct pci_dev *dev, int rq);
1336 int pcie_get_mps(struct pci_dev *dev);
1337 int pcie_set_mps(struct pci_dev *dev, int mps);
1338 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1339 enum pci_bus_speed *speed,
1340 enum pcie_link_width *width);
1341 void pcie_print_link_status(struct pci_dev *dev);
1342 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1343 int pcie_flr(struct pci_dev *dev);
1344 int __pci_reset_function_locked(struct pci_dev *dev);
1345 int pci_reset_function(struct pci_dev *dev);
1346 int pci_reset_function_locked(struct pci_dev *dev);
1347 int pci_try_reset_function(struct pci_dev *dev);
1348 int pci_probe_reset_slot(struct pci_slot *slot);
1349 int pci_probe_reset_bus(struct pci_bus *bus);
1350 int pci_reset_bus(struct pci_dev *dev);
1351 void pci_reset_secondary_bus(struct pci_dev *dev);
1352 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1353 void pci_update_resource(struct pci_dev *dev, int resno);
1354 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1355 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1356 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1357 static inline int pci_rebar_bytes_to_size(u64 bytes)
1358 {
1359 bytes = roundup_pow_of_two(bytes);
1360
1361 /* Return BAR size as defined in the resizable BAR specification */
1362 return max(ilog2(bytes), 20) - 20;
1363 }
1364
1365 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1366 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1367 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1368 bool pci_device_is_present(struct pci_dev *pdev);
1369 void pci_ignore_hotplug(struct pci_dev *dev);
1370 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1371 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1372
1373 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1374 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1375 const char *fmt, ...);
1376 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1377
1378 /* ROM control related routines */
1379 int pci_enable_rom(struct pci_dev *pdev);
1380 void pci_disable_rom(struct pci_dev *pdev);
1381 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1382 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1383
1384 /* Power management related routines */
1385 int pci_save_state(struct pci_dev *dev);
1386 void pci_restore_state(struct pci_dev *dev);
1387 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1388 int pci_load_saved_state(struct pci_dev *dev,
1389 struct pci_saved_state *state);
1390 int pci_load_and_free_saved_state(struct pci_dev *dev,
1391 struct pci_saved_state **state);
1392 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1393 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1394 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1395 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1396 void pci_pme_active(struct pci_dev *dev, bool enable);
1397 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1398 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1399 int pci_prepare_to_sleep(struct pci_dev *dev);
1400 int pci_back_from_sleep(struct pci_dev *dev);
1401 bool pci_dev_run_wake(struct pci_dev *dev);
1402 void pci_d3cold_enable(struct pci_dev *dev);
1403 void pci_d3cold_disable(struct pci_dev *dev);
1404 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1405 void pci_resume_bus(struct pci_bus *bus);
1406 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1407
1408 /* For use by arch with custom probe code */
1409 void set_pcie_port_type(struct pci_dev *pdev);
1410 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1411
1412 /* Functions for PCI Hotplug drivers to use */
1413 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1414 unsigned int pci_rescan_bus(struct pci_bus *bus);
1415 void pci_lock_rescan_remove(void);
1416 void pci_unlock_rescan_remove(void);
1417
1418 /* Vital Product Data routines */
1419 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1420 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1421 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1422 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1423
1424 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1425 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1426 void pci_bus_assign_resources(const struct pci_bus *bus);
1427 void pci_bus_claim_resources(struct pci_bus *bus);
1428 void pci_bus_size_bridges(struct pci_bus *bus);
1429 int pci_claim_resource(struct pci_dev *, int);
1430 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1431 void pci_assign_unassigned_resources(void);
1432 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1433 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1434 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1435 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1436 int pci_enable_resources(struct pci_dev *, int mask);
1437 void pci_assign_irq(struct pci_dev *dev);
1438 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1439 #define HAVE_PCI_REQ_REGIONS 2
1440 int __must_check pci_request_regions(struct pci_dev *, const char *);
1441 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1442 void pci_release_regions(struct pci_dev *);
1443 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1444 void pci_release_region(struct pci_dev *, int);
1445 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1446 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1447 void pci_release_selected_regions(struct pci_dev *, int);
1448
1449 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1450 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1451 unsigned int len, const char *name)
1452 {
1453 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1454 name, IORESOURCE_EXCLUSIVE);
1455 }
1456
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1457 static inline void pci_release_config_region(struct pci_dev *pdev,
1458 unsigned int offset,
1459 unsigned int len)
1460 {
1461 __release_region(&pdev->driver_exclusive_resource, offset, len);
1462 }
1463
1464 /* drivers/pci/bus.c */
1465 void pci_add_resource(struct list_head *resources, struct resource *res);
1466 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1467 resource_size_t offset);
1468 void pci_free_resource_list(struct list_head *resources);
1469 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1470 unsigned int flags);
1471 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1472 void pci_bus_remove_resources(struct pci_bus *bus);
1473 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1474 int devm_request_pci_bus_resources(struct device *dev,
1475 struct list_head *resources);
1476
1477 /* Temporary until new and working PCI SBR API in place */
1478 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1479
1480 #define __pci_bus_for_each_res0(bus, res, ...) \
1481 for (unsigned int __b = 0; \
1482 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1483 __b++)
1484
1485 #define __pci_bus_for_each_res1(bus, res, __b) \
1486 for (__b = 0; \
1487 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1488 __b++)
1489
1490 /**
1491 * pci_bus_for_each_resource - iterate over PCI bus resources
1492 * @bus: the PCI bus
1493 * @res: pointer to the current resource
1494 * @...: optional index of the current resource
1495 *
1496 * Iterate over PCI bus resources. The first part is to go over PCI bus
1497 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1498 * After that continue with the separate list of the additional resources,
1499 * if not empty. That's why the Logical OR is being used.
1500 *
1501 * Possible usage:
1502 *
1503 * struct pci_bus *bus = ...;
1504 * struct resource *res;
1505 * unsigned int i;
1506 *
1507 * // With optional index
1508 * pci_bus_for_each_resource(bus, res, i)
1509 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1510 *
1511 * // Without index
1512 * pci_bus_for_each_resource(bus, res)
1513 * _do_something_(res);
1514 */
1515 #define pci_bus_for_each_resource(bus, res, ...) \
1516 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1517 (bus, res, __VA_ARGS__)
1518
1519 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1520 struct resource *res, resource_size_t size,
1521 resource_size_t align, resource_size_t min,
1522 unsigned long type_mask,
1523 resource_size_t (*alignf)(void *,
1524 const struct resource *,
1525 resource_size_t,
1526 resource_size_t),
1527 void *alignf_data);
1528
1529
1530 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1531 resource_size_t size);
1532 unsigned long pci_address_to_pio(phys_addr_t addr);
1533 phys_addr_t pci_pio_to_address(unsigned long pio);
1534 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1535 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1536 phys_addr_t phys_addr);
1537 void pci_unmap_iospace(struct resource *res);
1538 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1539 resource_size_t offset,
1540 resource_size_t size);
1541 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1542 struct resource *res);
1543
pci_bus_address(struct pci_dev * pdev,int bar)1544 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1545 {
1546 struct pci_bus_region region;
1547
1548 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1549 return region.start;
1550 }
1551
1552 /* Proper probing supporting hot-pluggable devices */
1553 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1554 const char *mod_name);
1555
1556 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1557 #define pci_register_driver(driver) \
1558 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1559
1560 void pci_unregister_driver(struct pci_driver *dev);
1561
1562 /**
1563 * module_pci_driver() - Helper macro for registering a PCI driver
1564 * @__pci_driver: pci_driver struct
1565 *
1566 * Helper macro for PCI drivers which do not do anything special in module
1567 * init/exit. This eliminates a lot of boilerplate. Each module may only
1568 * use this macro once, and calling it replaces module_init() and module_exit()
1569 */
1570 #define module_pci_driver(__pci_driver) \
1571 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1572
1573 /**
1574 * builtin_pci_driver() - Helper macro for registering a PCI driver
1575 * @__pci_driver: pci_driver struct
1576 *
1577 * Helper macro for PCI drivers which do not do anything special in their
1578 * init code. This eliminates a lot of boilerplate. Each driver may only
1579 * use this macro once, and calling it replaces device_initcall(...)
1580 */
1581 #define builtin_pci_driver(__pci_driver) \
1582 builtin_driver(__pci_driver, pci_register_driver)
1583
1584 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1585 int pci_add_dynid(struct pci_driver *drv,
1586 unsigned int vendor, unsigned int device,
1587 unsigned int subvendor, unsigned int subdevice,
1588 unsigned int class, unsigned int class_mask,
1589 unsigned long driver_data);
1590 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1591 struct pci_dev *dev);
1592 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1593 int pass);
1594
1595 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1596 void *userdata);
1597 int pci_cfg_space_size(struct pci_dev *dev);
1598 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1599 void pci_setup_bridge(struct pci_bus *bus);
1600 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1601 unsigned long type);
1602
1603 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1604 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1605
1606 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1607 unsigned int command_bits, u32 flags);
1608
1609 /*
1610 * Virtual interrupts allow for more interrupts to be allocated
1611 * than the device has interrupts for. These are not programmed
1612 * into the device's MSI-X table and must be handled by some
1613 * other driver means.
1614 */
1615 #define PCI_IRQ_VIRTUAL (1 << 4)
1616
1617 #define PCI_IRQ_ALL_TYPES \
1618 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1619
1620 #include <linux/dmapool.h>
1621
1622 struct msix_entry {
1623 u32 vector; /* Kernel uses to write allocated vector */
1624 u16 entry; /* Driver uses to specify entry, OS writes */
1625 };
1626
1627 struct msi_domain_template;
1628
1629 #ifdef CONFIG_PCI_MSI
1630 int pci_msi_vec_count(struct pci_dev *dev);
1631 void pci_disable_msi(struct pci_dev *dev);
1632 int pci_msix_vec_count(struct pci_dev *dev);
1633 void pci_disable_msix(struct pci_dev *dev);
1634 void pci_restore_msi_state(struct pci_dev *dev);
1635 int pci_msi_enabled(void);
1636 int pci_enable_msi(struct pci_dev *dev);
1637 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1638 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1639 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1640 struct msix_entry *entries, int nvec)
1641 {
1642 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1643 if (rc < 0)
1644 return rc;
1645 return 0;
1646 }
1647 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1648 unsigned int max_vecs, unsigned int flags);
1649 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1650 unsigned int max_vecs, unsigned int flags,
1651 struct irq_affinity *affd);
1652
1653 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1654 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1655 const struct irq_affinity_desc *affdesc);
1656 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1657
1658 void pci_free_irq_vectors(struct pci_dev *dev);
1659 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1660 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1661 bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
1662 unsigned int hwsize, void *data);
1663 struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
1664 const struct irq_affinity_desc *affdesc);
1665 void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
1666
1667 #else
pci_msi_vec_count(struct pci_dev * dev)1668 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1669 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1670 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1671 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1672 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1673 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1674 static inline int pci_enable_msi(struct pci_dev *dev)
1675 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1676 static inline int pci_enable_msix_range(struct pci_dev *dev,
1677 struct msix_entry *entries, int minvec, int maxvec)
1678 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1679 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1680 struct msix_entry *entries, int nvec)
1681 { return -ENOSYS; }
1682
1683 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1684 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1685 unsigned int max_vecs, unsigned int flags,
1686 struct irq_affinity *aff_desc)
1687 {
1688 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1689 return 1;
1690 return -ENOSPC;
1691 }
1692 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1693 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1694 unsigned int max_vecs, unsigned int flags)
1695 {
1696 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1697 flags, NULL);
1698 }
1699
pci_msix_can_alloc_dyn(struct pci_dev * dev)1700 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1701 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1702 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1703 const struct irq_affinity_desc *affdesc)
1704 {
1705 struct msi_map map = { .index = -ENOSYS, };
1706
1707 return map;
1708 }
1709
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1710 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1711 {
1712 }
1713
pci_free_irq_vectors(struct pci_dev * dev)1714 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1715 {
1716 }
1717
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1718 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1719 {
1720 if (WARN_ON_ONCE(nr > 0))
1721 return -EINVAL;
1722 return dev->irq;
1723 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1724 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1725 int vec)
1726 {
1727 return cpu_possible_mask;
1728 }
1729
pci_create_ims_domain(struct pci_dev * pdev,const struct msi_domain_template * template,unsigned int hwsize,void * data)1730 static inline bool pci_create_ims_domain(struct pci_dev *pdev,
1731 const struct msi_domain_template *template,
1732 unsigned int hwsize, void *data)
1733 { return false; }
1734
pci_ims_alloc_irq(struct pci_dev * pdev,union msi_instance_cookie * icookie,const struct irq_affinity_desc * affdesc)1735 static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
1736 union msi_instance_cookie *icookie,
1737 const struct irq_affinity_desc *affdesc)
1738 {
1739 struct msi_map map = { .index = -ENOSYS, };
1740
1741 return map;
1742 }
1743
pci_ims_free_irq(struct pci_dev * pdev,struct msi_map map)1744 static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
1745 {
1746 }
1747
1748 #endif
1749
1750 /**
1751 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1752 * @d: the INTx IRQ domain
1753 * @node: the DT node for the device whose interrupt we're translating
1754 * @intspec: the interrupt specifier data from the DT
1755 * @intsize: the number of entries in @intspec
1756 * @out_hwirq: pointer at which to write the hwirq number
1757 * @out_type: pointer at which to write the interrupt type
1758 *
1759 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1760 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1761 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1762 * INTx value to obtain the hwirq number.
1763 *
1764 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1765 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1766 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1767 struct device_node *node,
1768 const u32 *intspec,
1769 unsigned int intsize,
1770 unsigned long *out_hwirq,
1771 unsigned int *out_type)
1772 {
1773 const u32 intx = intspec[0];
1774
1775 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1776 return -EINVAL;
1777
1778 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1779 return 0;
1780 }
1781
1782 #ifdef CONFIG_PCIEPORTBUS
1783 extern bool pcie_ports_disabled;
1784 extern bool pcie_ports_native;
1785 #else
1786 #define pcie_ports_disabled true
1787 #define pcie_ports_native false
1788 #endif
1789
1790 #define PCIE_LINK_STATE_L0S BIT(0)
1791 #define PCIE_LINK_STATE_L1 BIT(1)
1792 #define PCIE_LINK_STATE_CLKPM BIT(2)
1793 #define PCIE_LINK_STATE_L1_1 BIT(3)
1794 #define PCIE_LINK_STATE_L1_2 BIT(4)
1795 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
1796 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
1797 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
1798 PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
1799 PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
1800 PCIE_LINK_STATE_L1_2_PCIPM)
1801
1802 #ifdef CONFIG_PCIEASPM
1803 int pci_disable_link_state(struct pci_dev *pdev, int state);
1804 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1805 int pci_enable_link_state(struct pci_dev *pdev, int state);
1806 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1807 void pcie_no_aspm(void);
1808 bool pcie_aspm_support_enabled(void);
1809 bool pcie_aspm_enabled(struct pci_dev *pdev);
1810 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1811 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1812 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1813 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1814 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1815 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1816 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1817 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1818 { return 0; }
pcie_no_aspm(void)1819 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1820 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1821 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1822 #endif
1823
1824 #ifdef CONFIG_PCIEAER
1825 bool pci_aer_available(void);
1826 #else
pci_aer_available(void)1827 static inline bool pci_aer_available(void) { return false; }
1828 #endif
1829
1830 bool pci_ats_disabled(void);
1831
1832 #ifdef CONFIG_PCIE_PTM
1833 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1834 void pci_disable_ptm(struct pci_dev *dev);
1835 bool pcie_ptm_enabled(struct pci_dev *dev);
1836 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1837 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1838 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1839 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1840 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1841 { return false; }
1842 #endif
1843
1844 void pci_cfg_access_lock(struct pci_dev *dev);
1845 bool pci_cfg_access_trylock(struct pci_dev *dev);
1846 void pci_cfg_access_unlock(struct pci_dev *dev);
1847
1848 void pci_dev_lock(struct pci_dev *dev);
1849 int pci_dev_trylock(struct pci_dev *dev);
1850 void pci_dev_unlock(struct pci_dev *dev);
1851
1852 /*
1853 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1854 * a PCI domain is defined to be a set of PCI buses which share
1855 * configuration space.
1856 */
1857 #ifdef CONFIG_PCI_DOMAINS
1858 extern int pci_domains_supported;
1859 #else
1860 enum { pci_domains_supported = 0 };
pci_domain_nr(struct pci_bus * bus)1861 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_proc_domain(struct pci_bus * bus)1862 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1863 #endif /* CONFIG_PCI_DOMAINS */
1864
1865 /*
1866 * Generic implementation for PCI domain support. If your
1867 * architecture does not need custom management of PCI
1868 * domains then this implementation will be used
1869 */
1870 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1871 static inline int pci_domain_nr(struct pci_bus *bus)
1872 {
1873 return bus->domain_nr;
1874 }
1875 #ifdef CONFIG_ACPI
1876 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1877 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1878 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1879 { return 0; }
1880 #endif
1881 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1882 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
1883 #endif
1884
1885 /* Some architectures require additional setup to direct VGA traffic */
1886 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1887 unsigned int command_bits, u32 flags);
1888 void pci_register_set_vga_state(arch_set_vga_state_t func);
1889
1890 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1891 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1892 {
1893 return pci_request_selected_regions(pdev,
1894 pci_select_bars(pdev, IORESOURCE_IO), name);
1895 }
1896
1897 static inline void
pci_release_io_regions(struct pci_dev * pdev)1898 pci_release_io_regions(struct pci_dev *pdev)
1899 {
1900 return pci_release_selected_regions(pdev,
1901 pci_select_bars(pdev, IORESOURCE_IO));
1902 }
1903
1904 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1905 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1906 {
1907 return pci_request_selected_regions(pdev,
1908 pci_select_bars(pdev, IORESOURCE_MEM), name);
1909 }
1910
1911 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1912 pci_release_mem_regions(struct pci_dev *pdev)
1913 {
1914 return pci_release_selected_regions(pdev,
1915 pci_select_bars(pdev, IORESOURCE_MEM));
1916 }
1917
1918 #else /* CONFIG_PCI is not enabled */
1919
pci_set_flags(int flags)1920 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1921 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1922 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1923 static inline int pci_has_flag(int flag) { return 0; }
1924
1925 /*
1926 * If the system does not have PCI, clearly these return errors. Define
1927 * these as simple inline functions to avoid hair in drivers.
1928 */
1929 #define _PCI_NOP(o, s, t) \
1930 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1931 int where, t val) \
1932 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1933
1934 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1935 _PCI_NOP(o, word, u16 x) \
1936 _PCI_NOP(o, dword, u32 x)
1937 _PCI_NOP_ALL(read, *)
1938 _PCI_NOP_ALL(write,)
1939
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1940 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1941 unsigned int device,
1942 struct pci_dev *from)
1943 { return NULL; }
1944
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1945 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1946 unsigned int device,
1947 unsigned int ss_vendor,
1948 unsigned int ss_device,
1949 struct pci_dev *from)
1950 { return NULL; }
1951
pci_get_class(unsigned int class,struct pci_dev * from)1952 static inline struct pci_dev *pci_get_class(unsigned int class,
1953 struct pci_dev *from)
1954 { return NULL; }
1955
1956
pci_dev_present(const struct pci_device_id * ids)1957 static inline int pci_dev_present(const struct pci_device_id *ids)
1958 { return 0; }
1959
1960 #define no_pci_devices() (1)
1961 #define pci_dev_put(dev) do { } while (0)
1962
pci_set_master(struct pci_dev * dev)1963 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1964 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1965 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1966 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)1967 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)1968 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1969 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)1970 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
1971 struct module *owner,
1972 const char *mod_name)
1973 { return 0; }
pci_register_driver(struct pci_driver * drv)1974 static inline int pci_register_driver(struct pci_driver *drv)
1975 { return 0; }
pci_unregister_driver(struct pci_driver * drv)1976 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)1977 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
1978 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)1979 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1980 int cap)
1981 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)1982 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1983 { return 0; }
1984
pci_get_dsn(struct pci_dev * dev)1985 static inline u64 pci_get_dsn(struct pci_dev *dev)
1986 { return 0; }
1987
1988 /* Power management related routines */
pci_save_state(struct pci_dev * dev)1989 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)1990 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1991 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1992 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)1993 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1994 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)1995 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1996 pm_message_t state)
1997 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)1998 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1999 int enable)
2000 { return 0; }
2001
pci_find_resource(struct pci_dev * dev,struct resource * res)2002 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2003 struct resource *res)
2004 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2005 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2006 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2007 static inline void pci_release_regions(struct pci_dev *dev) { }
2008
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2009 static inline int pci_register_io_range(struct fwnode_handle *fwnode,
2010 phys_addr_t addr, resource_size_t size)
2011 { return -EINVAL; }
2012
pci_address_to_pio(phys_addr_t addr)2013 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2014
pci_find_next_bus(const struct pci_bus * from)2015 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2016 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2017 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2018 unsigned int devfn)
2019 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2020 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2021 unsigned int bus, unsigned int devfn)
2022 { return NULL; }
2023
pci_domain_nr(struct pci_bus * bus)2024 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2025 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2026
2027 #define dev_is_pci(d) (false)
2028 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2029 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2030 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2031 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2032 struct device_node *node,
2033 const u32 *intspec,
2034 unsigned int intsize,
2035 unsigned long *out_hwirq,
2036 unsigned int *out_type)
2037 { return -EINVAL; }
2038
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2039 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2040 struct pci_dev *dev)
2041 { return NULL; }
pci_ats_disabled(void)2042 static inline bool pci_ats_disabled(void) { return true; }
2043
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2044 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2045 {
2046 return -EINVAL;
2047 }
2048
2049 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2050 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2051 unsigned int max_vecs, unsigned int flags,
2052 struct irq_affinity *aff_desc)
2053 {
2054 return -ENOSPC;
2055 }
2056 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2057 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2058 unsigned int max_vecs, unsigned int flags)
2059 {
2060 return -ENOSPC;
2061 }
2062 #endif /* CONFIG_PCI */
2063
2064 /* Include architecture-dependent settings and functions */
2065
2066 #include <asm/pci.h>
2067
2068 /*
2069 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2070 * is expected to be an offset within that region.
2071 *
2072 */
2073 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2074 struct vm_area_struct *vma,
2075 enum pci_mmap_state mmap_state, int write_combine);
2076
2077 #ifndef arch_can_pci_mmap_wc
2078 #define arch_can_pci_mmap_wc() 0
2079 #endif
2080
2081 #ifndef arch_can_pci_mmap_io
2082 #define arch_can_pci_mmap_io() 0
2083 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2084 #else
2085 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2086 #endif
2087
2088 #ifndef pci_root_bus_fwnode
2089 #define pci_root_bus_fwnode(bus) NULL
2090 #endif
2091
2092 /*
2093 * These helpers provide future and backwards compatibility
2094 * for accessing popular PCI BAR info
2095 */
2096 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2097 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2098 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2099 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2100 #define pci_resource_len(dev,bar) \
2101 (pci_resource_end((dev), (bar)) ? \
2102 resource_size(pci_resource_n((dev), (bar))) : 0)
2103
2104 #define __pci_dev_for_each_res0(dev, res, ...) \
2105 for (unsigned int __b = 0; \
2106 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2107 __b++)
2108
2109 #define __pci_dev_for_each_res1(dev, res, __b) \
2110 for (__b = 0; \
2111 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2112 __b++)
2113
2114 #define pci_dev_for_each_resource(dev, res, ...) \
2115 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2116 (dev, res, __VA_ARGS__)
2117
2118 /*
2119 * Similar to the helpers above, these manipulate per-pci_dev
2120 * driver-specific data. They are really just a wrapper around
2121 * the generic device structure functions of these calls.
2122 */
pci_get_drvdata(struct pci_dev * pdev)2123 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2124 {
2125 return dev_get_drvdata(&pdev->dev);
2126 }
2127
pci_set_drvdata(struct pci_dev * pdev,void * data)2128 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2129 {
2130 dev_set_drvdata(&pdev->dev, data);
2131 }
2132
pci_name(const struct pci_dev * pdev)2133 static inline const char *pci_name(const struct pci_dev *pdev)
2134 {
2135 return dev_name(&pdev->dev);
2136 }
2137
2138 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2139 const struct resource *rsrc,
2140 resource_size_t *start, resource_size_t *end);
2141
2142 /*
2143 * The world is not perfect and supplies us with broken PCI devices.
2144 * For at least a part of these bugs we need a work-around, so both
2145 * generic (drivers/pci/quirks.c) and per-architecture code can define
2146 * fixup hooks to be called for particular buggy devices.
2147 */
2148
2149 struct pci_fixup {
2150 u16 vendor; /* Or PCI_ANY_ID */
2151 u16 device; /* Or PCI_ANY_ID */
2152 u32 class; /* Or PCI_ANY_ID */
2153 unsigned int class_shift; /* should be 0, 8, 16 */
2154 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2155 int hook_offset;
2156 #else
2157 void (*hook)(struct pci_dev *dev);
2158 #endif
2159 };
2160
2161 enum pci_fixup_pass {
2162 pci_fixup_early, /* Before probing BARs */
2163 pci_fixup_header, /* After reading configuration header */
2164 pci_fixup_final, /* Final phase of device fixups */
2165 pci_fixup_enable, /* pci_enable_device() time */
2166 pci_fixup_resume, /* pci_device_resume() */
2167 pci_fixup_suspend, /* pci_device_suspend() */
2168 pci_fixup_resume_early, /* pci_device_resume_early() */
2169 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2170 };
2171
2172 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2173 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2174 class_shift, hook) \
2175 __ADDRESSABLE(hook) \
2176 asm(".section " #sec ", \"a\" \n" \
2177 ".balign 16 \n" \
2178 ".short " #vendor ", " #device " \n" \
2179 ".long " #class ", " #class_shift " \n" \
2180 ".long " #hook " - . \n" \
2181 ".previous \n");
2182
2183 /*
2184 * Clang's LTO may rename static functions in C, but has no way to
2185 * handle such renamings when referenced from inline asm. To work
2186 * around this, create global C stubs for these cases.
2187 */
2188 #ifdef CONFIG_LTO_CLANG
2189 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2190 class_shift, hook, stub) \
2191 void stub(struct pci_dev *dev); \
2192 void stub(struct pci_dev *dev) \
2193 { \
2194 hook(dev); \
2195 } \
2196 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2197 class_shift, stub)
2198 #else
2199 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2200 class_shift, hook, stub) \
2201 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2202 class_shift, hook)
2203 #endif
2204
2205 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2206 class_shift, hook) \
2207 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2208 class_shift, hook, __UNIQUE_ID(hook))
2209 #else
2210 /* Anonymous variables would be nice... */
2211 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2212 class_shift, hook) \
2213 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2214 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2215 = { vendor, device, class, class_shift, hook };
2216 #endif
2217
2218 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2219 class_shift, hook) \
2220 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2221 hook, vendor, device, class, class_shift, hook)
2222 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2223 class_shift, hook) \
2224 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2225 hook, vendor, device, class, class_shift, hook)
2226 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2227 class_shift, hook) \
2228 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2229 hook, vendor, device, class, class_shift, hook)
2230 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2231 class_shift, hook) \
2232 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2233 hook, vendor, device, class, class_shift, hook)
2234 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2235 class_shift, hook) \
2236 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2237 resume##hook, vendor, device, class, class_shift, hook)
2238 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2239 class_shift, hook) \
2240 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2241 resume_early##hook, vendor, device, class, class_shift, hook)
2242 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2243 class_shift, hook) \
2244 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2245 suspend##hook, vendor, device, class, class_shift, hook)
2246 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2247 class_shift, hook) \
2248 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2249 suspend_late##hook, vendor, device, class, class_shift, hook)
2250
2251 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2252 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2253 hook, vendor, device, PCI_ANY_ID, 0, hook)
2254 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2255 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2256 hook, vendor, device, PCI_ANY_ID, 0, hook)
2257 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2258 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2259 hook, vendor, device, PCI_ANY_ID, 0, hook)
2260 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2261 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2262 hook, vendor, device, PCI_ANY_ID, 0, hook)
2263 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2264 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2265 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2266 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2267 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2268 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2269 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2270 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2271 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2272 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2273 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2274 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2275
2276 #ifdef CONFIG_PCI_QUIRKS
2277 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2278 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2279 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2280 struct pci_dev *dev) { }
2281 #endif
2282
2283 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2284 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2285 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2286 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2287 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2288 const char *name);
2289 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2290
2291 extern int pci_pci_problems;
2292 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2293 #define PCIPCI_TRITON 2
2294 #define PCIPCI_NATOMA 4
2295 #define PCIPCI_VIAETBF 8
2296 #define PCIPCI_VSFX 16
2297 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2298 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2299
2300 extern unsigned long pci_cardbus_io_size;
2301 extern unsigned long pci_cardbus_mem_size;
2302 extern u8 pci_dfl_cache_line_size;
2303 extern u8 pci_cache_line_size;
2304
2305 /* Architecture-specific versions may override these (weak) */
2306 void pcibios_disable_device(struct pci_dev *dev);
2307 void pcibios_set_master(struct pci_dev *dev);
2308 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2309 enum pcie_reset_state state);
2310 int pcibios_device_add(struct pci_dev *dev);
2311 void pcibios_release_device(struct pci_dev *dev);
2312 #ifdef CONFIG_PCI
2313 void pcibios_penalize_isa_irq(int irq, int active);
2314 #else
pcibios_penalize_isa_irq(int irq,int active)2315 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2316 #endif
2317 int pcibios_alloc_irq(struct pci_dev *dev);
2318 void pcibios_free_irq(struct pci_dev *dev);
2319 resource_size_t pcibios_default_alignment(void);
2320
2321 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2322 extern int pci_create_resource_files(struct pci_dev *dev);
2323 extern void pci_remove_resource_files(struct pci_dev *dev);
2324 #endif
2325
2326 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2327 void __init pci_mmcfg_early_init(void);
2328 void __init pci_mmcfg_late_init(void);
2329 #else
pci_mmcfg_early_init(void)2330 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2331 static inline void pci_mmcfg_late_init(void) { }
2332 #endif
2333
2334 int pci_ext_cfg_avail(void);
2335
2336 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2337 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2338
2339 #ifdef CONFIG_PCI_IOV
2340 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2341 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2342 int pci_iov_vf_id(struct pci_dev *dev);
2343 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2344 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2345 void pci_disable_sriov(struct pci_dev *dev);
2346
2347 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2348 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2349 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2350 int pci_num_vf(struct pci_dev *dev);
2351 int pci_vfs_assigned(struct pci_dev *dev);
2352 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2353 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2354 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2355 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2356 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2357
2358 /* Arch may override these (weak) */
2359 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2360 int pcibios_sriov_disable(struct pci_dev *pdev);
2361 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2362 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2363 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2364 {
2365 return -ENOSYS;
2366 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2367 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2368 {
2369 return -ENOSYS;
2370 }
2371
pci_iov_vf_id(struct pci_dev * dev)2372 static inline int pci_iov_vf_id(struct pci_dev *dev)
2373 {
2374 return -ENOSYS;
2375 }
2376
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2377 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2378 struct pci_driver *pf_driver)
2379 {
2380 return ERR_PTR(-EINVAL);
2381 }
2382
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2383 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2384 { return -ENODEV; }
2385
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2386 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2387 struct pci_dev *virtfn, int id)
2388 {
2389 return -ENODEV;
2390 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2391 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2392 {
2393 return -ENOSYS;
2394 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2395 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2396 int id) { }
pci_disable_sriov(struct pci_dev * dev)2397 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2398 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2399 static inline int pci_vfs_assigned(struct pci_dev *dev)
2400 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2401 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2402 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2403 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2404 { return 0; }
2405 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2406 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2407 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2408 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2409 #endif
2410
2411 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2412 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2413 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2414 #endif
2415
2416 /**
2417 * pci_pcie_cap - get the saved PCIe capability offset
2418 * @dev: PCI device
2419 *
2420 * PCIe capability offset is calculated at PCI device initialization
2421 * time and saved in the data structure. This function returns saved
2422 * PCIe capability offset. Using this instead of pci_find_capability()
2423 * reduces unnecessary search in the PCI configuration space. If you
2424 * need to calculate PCIe capability offset from raw device for some
2425 * reasons, please use pci_find_capability() instead.
2426 */
pci_pcie_cap(struct pci_dev * dev)2427 static inline int pci_pcie_cap(struct pci_dev *dev)
2428 {
2429 return dev->pcie_cap;
2430 }
2431
2432 /**
2433 * pci_is_pcie - check if the PCI device is PCI Express capable
2434 * @dev: PCI device
2435 *
2436 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2437 */
pci_is_pcie(struct pci_dev * dev)2438 static inline bool pci_is_pcie(struct pci_dev *dev)
2439 {
2440 return pci_pcie_cap(dev);
2441 }
2442
2443 /**
2444 * pcie_caps_reg - get the PCIe Capabilities Register
2445 * @dev: PCI device
2446 */
pcie_caps_reg(const struct pci_dev * dev)2447 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2448 {
2449 return dev->pcie_flags_reg;
2450 }
2451
2452 /**
2453 * pci_pcie_type - get the PCIe device/port type
2454 * @dev: PCI device
2455 */
pci_pcie_type(const struct pci_dev * dev)2456 static inline int pci_pcie_type(const struct pci_dev *dev)
2457 {
2458 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2459 }
2460
2461 /**
2462 * pcie_find_root_port - Get the PCIe root port device
2463 * @dev: PCI device
2464 *
2465 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2466 * for a given PCI/PCIe Device.
2467 */
pcie_find_root_port(struct pci_dev * dev)2468 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2469 {
2470 while (dev) {
2471 if (pci_is_pcie(dev) &&
2472 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2473 return dev;
2474 dev = pci_upstream_bridge(dev);
2475 }
2476
2477 return NULL;
2478 }
2479
2480 void pci_request_acs(void);
2481 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2482 bool pci_acs_path_enabled(struct pci_dev *start,
2483 struct pci_dev *end, u16 acs_flags);
2484 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2485
2486 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2487 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2488
2489 /* Large Resource Data Type Tag Item Names */
2490 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2491 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2492 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2493
2494 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2495 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2496 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2497
2498 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2499 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2500 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2501 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2502 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2503
2504 /**
2505 * pci_vpd_alloc - Allocate buffer and read VPD into it
2506 * @dev: PCI device
2507 * @size: pointer to field where VPD length is returned
2508 *
2509 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2510 */
2511 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2512
2513 /**
2514 * pci_vpd_find_id_string - Locate id string in VPD
2515 * @buf: Pointer to buffered VPD data
2516 * @len: The length of the buffer area in which to search
2517 * @size: Pointer to field where length of id string is returned
2518 *
2519 * Returns the index of the id string or -ENOENT if not found.
2520 */
2521 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2522
2523 /**
2524 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2525 * @buf: Pointer to buffered VPD data
2526 * @len: The length of the buffer area in which to search
2527 * @kw: The keyword to search for
2528 * @size: Pointer to field where length of found keyword data is returned
2529 *
2530 * Returns the index of the information field keyword data or -ENOENT if
2531 * not found.
2532 */
2533 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2534 const char *kw, unsigned int *size);
2535
2536 /**
2537 * pci_vpd_check_csum - Check VPD checksum
2538 * @buf: Pointer to buffered VPD data
2539 * @len: VPD size
2540 *
2541 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2542 */
2543 int pci_vpd_check_csum(const void *buf, unsigned int len);
2544
2545 /* PCI <-> OF binding helpers */
2546 #ifdef CONFIG_OF
2547 struct device_node;
2548 struct irq_domain;
2549 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2550 bool pci_host_of_has_msi_map(struct device *dev);
2551
2552 /* Arch may override this (weak) */
2553 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2554
2555 #else /* CONFIG_OF */
2556 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2557 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2558 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2559 #endif /* CONFIG_OF */
2560
2561 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2562 pci_device_to_OF_node(const struct pci_dev *pdev)
2563 {
2564 return pdev ? pdev->dev.of_node : NULL;
2565 }
2566
pci_bus_to_OF_node(struct pci_bus * bus)2567 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2568 {
2569 return bus ? bus->dev.of_node : NULL;
2570 }
2571
2572 #ifdef CONFIG_ACPI
2573 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2574
2575 void
2576 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2577 bool pci_pr3_present(struct pci_dev *pdev);
2578 #else
2579 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2580 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2581 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2582 #endif
2583
2584 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2585 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2586 {
2587 return pdev->dev.archdata.edev;
2588 }
2589 #endif
2590
2591 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2592 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2593 int pci_for_each_dma_alias(struct pci_dev *pdev,
2594 int (*fn)(struct pci_dev *pdev,
2595 u16 alias, void *data), void *data);
2596
2597 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2598 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2599 {
2600 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2601 }
pci_clear_dev_assigned(struct pci_dev * pdev)2602 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2603 {
2604 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2605 }
pci_is_dev_assigned(struct pci_dev * pdev)2606 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2607 {
2608 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2609 }
2610
2611 /**
2612 * pci_ari_enabled - query ARI forwarding status
2613 * @bus: the PCI bus
2614 *
2615 * Returns true if ARI forwarding is enabled.
2616 */
pci_ari_enabled(struct pci_bus * bus)2617 static inline bool pci_ari_enabled(struct pci_bus *bus)
2618 {
2619 return bus->self && bus->self->ari_enabled;
2620 }
2621
2622 /**
2623 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2624 * @pdev: PCI device to check
2625 *
2626 * Walk upwards from @pdev and check for each encountered bridge if it's part
2627 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2628 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2629 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2630 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2631 {
2632 struct pci_dev *parent = pdev;
2633
2634 if (pdev->is_thunderbolt)
2635 return true;
2636
2637 while ((parent = pci_upstream_bridge(parent)))
2638 if (parent->is_thunderbolt)
2639 return true;
2640
2641 return false;
2642 }
2643
2644 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2645 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2646 #endif
2647
2648 #include <linux/dma-mapping.h>
2649
2650 #define pci_printk(level, pdev, fmt, arg...) \
2651 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2652
2653 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2654 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2655 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2656 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2657 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2658 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2659 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2660 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2661 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2662
2663 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2664 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2665
2666 #define pci_info_ratelimited(pdev, fmt, arg...) \
2667 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2668
2669 #define pci_WARN(pdev, condition, fmt, arg...) \
2670 WARN(condition, "%s %s: " fmt, \
2671 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2672
2673 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2674 WARN_ONCE(condition, "%s %s: " fmt, \
2675 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2676
2677 #endif /* LINUX_PCI_H */
2678