1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include "pci.h"
14
15 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
16 #define CARDBUS_RESERVE_BUSNR 3
17
18 static LIST_HEAD(pci_host_bridges);
19
20 /* Ugh. Need to stop exporting this to modules. */
21 LIST_HEAD(pci_root_buses);
22 EXPORT_SYMBOL(pci_root_buses);
23
24
find_anything(struct device * dev,void * data)25 static int find_anything(struct device *dev, void *data)
26 {
27 return 1;
28 }
29
30 /*
31 * Some device drivers need know if pci is initiated.
32 * Basically, we think pci is not initiated when there
33 * is no device to be found on the pci_bus_type.
34 */
no_pci_devices(void)35 int no_pci_devices(void)
36 {
37 struct device *dev;
38 int no_devices;
39
40 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
41 no_devices = (dev == NULL);
42 put_device(dev);
43 return no_devices;
44 }
45 EXPORT_SYMBOL(no_pci_devices);
46
pci_host_bridge(struct pci_dev * dev)47 static struct pci_host_bridge *pci_host_bridge(struct pci_dev *dev)
48 {
49 struct pci_bus *bus;
50 struct pci_host_bridge *bridge;
51
52 bus = dev->bus;
53 while (bus->parent)
54 bus = bus->parent;
55
56 list_for_each_entry(bridge, &pci_host_bridges, list) {
57 if (bridge->bus == bus)
58 return bridge;
59 }
60
61 return NULL;
62 }
63
resource_contains(struct resource * res1,struct resource * res2)64 static bool resource_contains(struct resource *res1, struct resource *res2)
65 {
66 return res1->start <= res2->start && res1->end >= res2->end;
67 }
68
pcibios_resource_to_bus(struct pci_dev * dev,struct pci_bus_region * region,struct resource * res)69 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
70 struct resource *res)
71 {
72 struct pci_host_bridge *bridge = pci_host_bridge(dev);
73 struct pci_host_bridge_window *window;
74 resource_size_t offset = 0;
75
76 list_for_each_entry(window, &bridge->windows, list) {
77 if (resource_type(res) != resource_type(window->res))
78 continue;
79
80 if (resource_contains(window->res, res)) {
81 offset = window->offset;
82 break;
83 }
84 }
85
86 region->start = res->start - offset;
87 region->end = res->end - offset;
88 }
89 EXPORT_SYMBOL(pcibios_resource_to_bus);
90
region_contains(struct pci_bus_region * region1,struct pci_bus_region * region2)91 static bool region_contains(struct pci_bus_region *region1,
92 struct pci_bus_region *region2)
93 {
94 return region1->start <= region2->start && region1->end >= region2->end;
95 }
96
pcibios_bus_to_resource(struct pci_dev * dev,struct resource * res,struct pci_bus_region * region)97 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
98 struct pci_bus_region *region)
99 {
100 struct pci_host_bridge *bridge = pci_host_bridge(dev);
101 struct pci_host_bridge_window *window;
102 struct pci_bus_region bus_region;
103 resource_size_t offset = 0;
104
105 list_for_each_entry(window, &bridge->windows, list) {
106 if (resource_type(res) != resource_type(window->res))
107 continue;
108
109 bus_region.start = window->res->start - window->offset;
110 bus_region.end = window->res->end - window->offset;
111
112 if (region_contains(&bus_region, region)) {
113 offset = window->offset;
114 break;
115 }
116 }
117
118 res->start = region->start + offset;
119 res->end = region->end + offset;
120 }
121 EXPORT_SYMBOL(pcibios_bus_to_resource);
122
123 /*
124 * PCI Bus Class
125 */
release_pcibus_dev(struct device * dev)126 static void release_pcibus_dev(struct device *dev)
127 {
128 struct pci_bus *pci_bus = to_pci_bus(dev);
129
130 if (pci_bus->bridge)
131 put_device(pci_bus->bridge);
132 pci_bus_remove_resources(pci_bus);
133 pci_release_bus_of_node(pci_bus);
134 kfree(pci_bus);
135 }
136
137 static struct class pcibus_class = {
138 .name = "pci_bus",
139 .dev_release = &release_pcibus_dev,
140 .dev_attrs = pcibus_dev_attrs,
141 };
142
pcibus_class_init(void)143 static int __init pcibus_class_init(void)
144 {
145 return class_register(&pcibus_class);
146 }
147 postcore_initcall(pcibus_class_init);
148
pci_size(u64 base,u64 maxbase,u64 mask)149 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
150 {
151 u64 size = mask & maxbase; /* Find the significant bits */
152 if (!size)
153 return 0;
154
155 /* Get the lowest of them to find the decode size, and
156 from that the extent. */
157 size = (size & ~(size-1)) - 1;
158
159 /* base == maxbase can be valid only if the BAR has
160 already been programmed with all 1s. */
161 if (base == maxbase && ((base | size) & mask) != mask)
162 return 0;
163
164 return size;
165 }
166
decode_bar(struct pci_dev * dev,u32 bar)167 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
168 {
169 u32 mem_type;
170 unsigned long flags;
171
172 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
173 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
174 flags |= IORESOURCE_IO;
175 return flags;
176 }
177
178 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
179 flags |= IORESOURCE_MEM;
180 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
181 flags |= IORESOURCE_PREFETCH;
182
183 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
184 switch (mem_type) {
185 case PCI_BASE_ADDRESS_MEM_TYPE_32:
186 break;
187 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
188 dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
189 break;
190 case PCI_BASE_ADDRESS_MEM_TYPE_64:
191 flags |= IORESOURCE_MEM_64;
192 break;
193 default:
194 dev_warn(&dev->dev,
195 "mem unknown type %x treated as 32-bit BAR\n",
196 mem_type);
197 break;
198 }
199 return flags;
200 }
201
202 /**
203 * pci_read_base - read a PCI BAR
204 * @dev: the PCI device
205 * @type: type of the BAR
206 * @res: resource buffer to be filled in
207 * @pos: BAR position in the config space
208 *
209 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
210 */
__pci_read_base(struct pci_dev * dev,enum pci_bar_type type,struct resource * res,unsigned int pos)211 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
212 struct resource *res, unsigned int pos)
213 {
214 u32 l, sz, mask;
215 u16 orig_cmd;
216 struct pci_bus_region region;
217
218 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
219
220 if (!dev->mmio_always_on) {
221 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
222 pci_write_config_word(dev, PCI_COMMAND,
223 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
224 }
225
226 res->name = pci_name(dev);
227
228 pci_read_config_dword(dev, pos, &l);
229 pci_write_config_dword(dev, pos, l | mask);
230 pci_read_config_dword(dev, pos, &sz);
231 pci_write_config_dword(dev, pos, l);
232
233 if (!dev->mmio_always_on)
234 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
235
236 /*
237 * All bits set in sz means the device isn't working properly.
238 * If the BAR isn't implemented, all bits must be 0. If it's a
239 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
240 * 1 must be clear.
241 */
242 if (!sz || sz == 0xffffffff)
243 goto fail;
244
245 /*
246 * I don't know how l can have all bits set. Copied from old code.
247 * Maybe it fixes a bug on some ancient platform.
248 */
249 if (l == 0xffffffff)
250 l = 0;
251
252 if (type == pci_bar_unknown) {
253 res->flags = decode_bar(dev, l);
254 res->flags |= IORESOURCE_SIZEALIGN;
255 if (res->flags & IORESOURCE_IO) {
256 l &= PCI_BASE_ADDRESS_IO_MASK;
257 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
258 } else {
259 l &= PCI_BASE_ADDRESS_MEM_MASK;
260 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
261 }
262 } else {
263 res->flags |= (l & IORESOURCE_ROM_ENABLE);
264 l &= PCI_ROM_ADDRESS_MASK;
265 mask = (u32)PCI_ROM_ADDRESS_MASK;
266 }
267
268 if (res->flags & IORESOURCE_MEM_64) {
269 u64 l64 = l;
270 u64 sz64 = sz;
271 u64 mask64 = mask | (u64)~0 << 32;
272
273 pci_read_config_dword(dev, pos + 4, &l);
274 pci_write_config_dword(dev, pos + 4, ~0);
275 pci_read_config_dword(dev, pos + 4, &sz);
276 pci_write_config_dword(dev, pos + 4, l);
277
278 l64 |= ((u64)l << 32);
279 sz64 |= ((u64)sz << 32);
280
281 sz64 = pci_size(l64, sz64, mask64);
282
283 if (!sz64)
284 goto fail;
285
286 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
287 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
288 pos);
289 goto fail;
290 }
291
292 if ((sizeof(resource_size_t) < 8) && l) {
293 /* Address above 32-bit boundary; disable the BAR */
294 pci_write_config_dword(dev, pos, 0);
295 pci_write_config_dword(dev, pos + 4, 0);
296 region.start = 0;
297 region.end = sz64;
298 pcibios_bus_to_resource(dev, res, ®ion);
299 } else {
300 region.start = l64;
301 region.end = l64 + sz64;
302 pcibios_bus_to_resource(dev, res, ®ion);
303 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
304 pos, res);
305 }
306 } else {
307 sz = pci_size(l, sz, mask);
308
309 if (!sz)
310 goto fail;
311
312 region.start = l;
313 region.end = l + sz;
314 pcibios_bus_to_resource(dev, res, ®ion);
315
316 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
317 }
318
319 out:
320 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
321 fail:
322 res->flags = 0;
323 goto out;
324 }
325
pci_read_bases(struct pci_dev * dev,unsigned int howmany,int rom)326 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
327 {
328 unsigned int pos, reg;
329
330 for (pos = 0; pos < howmany; pos++) {
331 struct resource *res = &dev->resource[pos];
332 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
333 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
334 }
335
336 if (rom) {
337 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
338 dev->rom_base_reg = rom;
339 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
340 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
341 IORESOURCE_SIZEALIGN;
342 __pci_read_base(dev, pci_bar_mem32, res, rom);
343 }
344 }
345
pci_read_bridge_io(struct pci_bus * child)346 static void __devinit pci_read_bridge_io(struct pci_bus *child)
347 {
348 struct pci_dev *dev = child->self;
349 u8 io_base_lo, io_limit_lo;
350 unsigned long base, limit;
351 struct pci_bus_region region;
352 struct resource *res, res2;
353
354 res = child->resource[0];
355 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
356 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
357 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
358 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
359
360 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
361 u16 io_base_hi, io_limit_hi;
362 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
363 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
364 base |= (io_base_hi << 16);
365 limit |= (io_limit_hi << 16);
366 }
367
368 if (base && base <= limit) {
369 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
370 res2.flags = res->flags;
371 region.start = base;
372 region.end = limit + 0xfff;
373 pcibios_bus_to_resource(dev, &res2, ®ion);
374 if (!res->start)
375 res->start = res2.start;
376 if (!res->end)
377 res->end = res2.end;
378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
379 }
380 }
381
pci_read_bridge_mmio(struct pci_bus * child)382 static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
387 struct pci_bus_region region;
388 struct resource *res;
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 if (base && base <= limit) {
396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 region.start = base;
398 region.end = limit + 0xfffff;
399 pcibios_bus_to_resource(dev, res, ®ion);
400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
401 }
402 }
403
pci_read_bridge_mmio_pref(struct pci_bus * child)404 static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
408 unsigned long base, limit;
409 struct pci_bus_region region;
410 struct resource *res;
411
412 res = child->resource[2];
413 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
414 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
415 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
416 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
417
418 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
419 u32 mem_base_hi, mem_limit_hi;
420 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
421 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
422
423 /*
424 * Some bridges set the base > limit by default, and some
425 * (broken) BIOSes do not initialize them. If we find
426 * this, just assume they are not being used.
427 */
428 if (mem_base_hi <= mem_limit_hi) {
429 #if BITS_PER_LONG == 64
430 base |= ((long) mem_base_hi) << 32;
431 limit |= ((long) mem_limit_hi) << 32;
432 #else
433 if (mem_base_hi || mem_limit_hi) {
434 dev_err(&dev->dev, "can't handle 64-bit "
435 "address space for bridge\n");
436 return;
437 }
438 #endif
439 }
440 }
441 if (base && base <= limit) {
442 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
443 IORESOURCE_MEM | IORESOURCE_PREFETCH;
444 if (res->flags & PCI_PREF_RANGE_TYPE_64)
445 res->flags |= IORESOURCE_MEM_64;
446 region.start = base;
447 region.end = limit + 0xfffff;
448 pcibios_bus_to_resource(dev, res, ®ion);
449 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
450 }
451 }
452
pci_read_bridge_bases(struct pci_bus * child)453 void __devinit pci_read_bridge_bases(struct pci_bus *child)
454 {
455 struct pci_dev *dev = child->self;
456 struct resource *res;
457 int i;
458
459 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
460 return;
461
462 dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
463 child->secondary, child->subordinate,
464 dev->transparent ? " (subtractive decode)" : "");
465
466 pci_bus_remove_resources(child);
467 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
468 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
469
470 pci_read_bridge_io(child);
471 pci_read_bridge_mmio(child);
472 pci_read_bridge_mmio_pref(child);
473
474 if (dev->transparent) {
475 pci_bus_for_each_resource(child->parent, res, i) {
476 if (res) {
477 pci_bus_add_resource(child, res,
478 PCI_SUBTRACTIVE_DECODE);
479 dev_printk(KERN_DEBUG, &dev->dev,
480 " bridge window %pR (subtractive decode)\n",
481 res);
482 }
483 }
484 }
485 }
486
pci_alloc_bus(void)487 static struct pci_bus * pci_alloc_bus(void)
488 {
489 struct pci_bus *b;
490
491 b = kzalloc(sizeof(*b), GFP_KERNEL);
492 if (b) {
493 INIT_LIST_HEAD(&b->node);
494 INIT_LIST_HEAD(&b->children);
495 INIT_LIST_HEAD(&b->devices);
496 INIT_LIST_HEAD(&b->slots);
497 INIT_LIST_HEAD(&b->resources);
498 b->max_bus_speed = PCI_SPEED_UNKNOWN;
499 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
500 }
501 return b;
502 }
503
504 static unsigned char pcix_bus_speed[] = {
505 PCI_SPEED_UNKNOWN, /* 0 */
506 PCI_SPEED_66MHz_PCIX, /* 1 */
507 PCI_SPEED_100MHz_PCIX, /* 2 */
508 PCI_SPEED_133MHz_PCIX, /* 3 */
509 PCI_SPEED_UNKNOWN, /* 4 */
510 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
511 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
512 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
513 PCI_SPEED_UNKNOWN, /* 8 */
514 PCI_SPEED_66MHz_PCIX_266, /* 9 */
515 PCI_SPEED_100MHz_PCIX_266, /* A */
516 PCI_SPEED_133MHz_PCIX_266, /* B */
517 PCI_SPEED_UNKNOWN, /* C */
518 PCI_SPEED_66MHz_PCIX_533, /* D */
519 PCI_SPEED_100MHz_PCIX_533, /* E */
520 PCI_SPEED_133MHz_PCIX_533 /* F */
521 };
522
523 static unsigned char pcie_link_speed[] = {
524 PCI_SPEED_UNKNOWN, /* 0 */
525 PCIE_SPEED_2_5GT, /* 1 */
526 PCIE_SPEED_5_0GT, /* 2 */
527 PCIE_SPEED_8_0GT, /* 3 */
528 PCI_SPEED_UNKNOWN, /* 4 */
529 PCI_SPEED_UNKNOWN, /* 5 */
530 PCI_SPEED_UNKNOWN, /* 6 */
531 PCI_SPEED_UNKNOWN, /* 7 */
532 PCI_SPEED_UNKNOWN, /* 8 */
533 PCI_SPEED_UNKNOWN, /* 9 */
534 PCI_SPEED_UNKNOWN, /* A */
535 PCI_SPEED_UNKNOWN, /* B */
536 PCI_SPEED_UNKNOWN, /* C */
537 PCI_SPEED_UNKNOWN, /* D */
538 PCI_SPEED_UNKNOWN, /* E */
539 PCI_SPEED_UNKNOWN /* F */
540 };
541
pcie_update_link_speed(struct pci_bus * bus,u16 linksta)542 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
543 {
544 bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
545 }
546 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
547
548 static unsigned char agp_speeds[] = {
549 AGP_UNKNOWN,
550 AGP_1X,
551 AGP_2X,
552 AGP_4X,
553 AGP_8X
554 };
555
agp_speed(int agp3,int agpstat)556 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
557 {
558 int index = 0;
559
560 if (agpstat & 4)
561 index = 3;
562 else if (agpstat & 2)
563 index = 2;
564 else if (agpstat & 1)
565 index = 1;
566 else
567 goto out;
568
569 if (agp3) {
570 index += 2;
571 if (index == 5)
572 index = 0;
573 }
574
575 out:
576 return agp_speeds[index];
577 }
578
579
pci_set_bus_speed(struct pci_bus * bus)580 static void pci_set_bus_speed(struct pci_bus *bus)
581 {
582 struct pci_dev *bridge = bus->self;
583 int pos;
584
585 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
586 if (!pos)
587 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
588 if (pos) {
589 u32 agpstat, agpcmd;
590
591 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
592 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
593
594 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
595 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
596 }
597
598 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
599 if (pos) {
600 u16 status;
601 enum pci_bus_speed max;
602 pci_read_config_word(bridge, pos + 2, &status);
603
604 if (status & 0x8000) {
605 max = PCI_SPEED_133MHz_PCIX_533;
606 } else if (status & 0x4000) {
607 max = PCI_SPEED_133MHz_PCIX_266;
608 } else if (status & 0x0002) {
609 if (((status >> 12) & 0x3) == 2) {
610 max = PCI_SPEED_133MHz_PCIX_ECC;
611 } else {
612 max = PCI_SPEED_133MHz_PCIX;
613 }
614 } else {
615 max = PCI_SPEED_66MHz_PCIX;
616 }
617
618 bus->max_bus_speed = max;
619 bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
620
621 return;
622 }
623
624 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
625 if (pos) {
626 u32 linkcap;
627 u16 linksta;
628
629 pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
630 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
631
632 pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
633 pcie_update_link_speed(bus, linksta);
634 }
635 }
636
637
pci_alloc_child_bus(struct pci_bus * parent,struct pci_dev * bridge,int busnr)638 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
639 struct pci_dev *bridge, int busnr)
640 {
641 struct pci_bus *child;
642 int i;
643
644 /*
645 * Allocate a new bus, and inherit stuff from the parent..
646 */
647 child = pci_alloc_bus();
648 if (!child)
649 return NULL;
650
651 child->parent = parent;
652 child->ops = parent->ops;
653 child->sysdata = parent->sysdata;
654 child->bus_flags = parent->bus_flags;
655
656 /* initialize some portions of the bus device, but don't register it
657 * now as the parent is not properly set up yet. This device will get
658 * registered later in pci_bus_add_devices()
659 */
660 child->dev.class = &pcibus_class;
661 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
662
663 /*
664 * Set up the primary, secondary and subordinate
665 * bus numbers.
666 */
667 child->number = child->secondary = busnr;
668 child->primary = parent->secondary;
669 child->subordinate = 0xff;
670
671 if (!bridge)
672 return child;
673
674 child->self = bridge;
675 child->bridge = get_device(&bridge->dev);
676 pci_set_bus_of_node(child);
677 pci_set_bus_speed(child);
678
679 /* Set up default resource pointers and names.. */
680 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
681 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
682 child->resource[i]->name = child->name;
683 }
684 bridge->subordinate = child;
685
686 return child;
687 }
688
pci_add_new_bus(struct pci_bus * parent,struct pci_dev * dev,int busnr)689 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
690 {
691 struct pci_bus *child;
692
693 child = pci_alloc_child_bus(parent, dev, busnr);
694 if (child) {
695 down_write(&pci_bus_sem);
696 list_add_tail(&child->node, &parent->children);
697 up_write(&pci_bus_sem);
698 }
699 return child;
700 }
701
pci_fixup_parent_subordinate_busnr(struct pci_bus * child,int max)702 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
703 {
704 struct pci_bus *parent = child->parent;
705
706 /* Attempts to fix that up are really dangerous unless
707 we're going to re-assign all bus numbers. */
708 if (!pcibios_assign_all_busses())
709 return;
710
711 while (parent->parent && parent->subordinate < max) {
712 parent->subordinate = max;
713 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
714 parent = parent->parent;
715 }
716 }
717
718 /*
719 * If it's a bridge, configure it and scan the bus behind it.
720 * For CardBus bridges, we don't scan behind as the devices will
721 * be handled by the bridge driver itself.
722 *
723 * We need to process bridges in two passes -- first we scan those
724 * already configured by the BIOS and after we are done with all of
725 * them, we proceed to assigning numbers to the remaining buses in
726 * order to avoid overlaps between old and new bus numbers.
727 */
pci_scan_bridge(struct pci_bus * bus,struct pci_dev * dev,int max,int pass)728 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
729 {
730 struct pci_bus *child;
731 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
732 u32 buses, i, j = 0;
733 u16 bctl;
734 u8 primary, secondary, subordinate;
735 int broken = 0;
736
737 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
738 primary = buses & 0xFF;
739 secondary = (buses >> 8) & 0xFF;
740 subordinate = (buses >> 16) & 0xFF;
741
742 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
743 secondary, subordinate, pass);
744
745 if (!primary && (primary != bus->number) && secondary && subordinate) {
746 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
747 primary = bus->number;
748 }
749
750 /* Check if setup is sensible at all */
751 if (!pass &&
752 (primary != bus->number || secondary <= bus->number ||
753 secondary > subordinate)) {
754 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
755 secondary, subordinate);
756 broken = 1;
757 }
758
759 /* Disable MasterAbortMode during probing to avoid reporting
760 of bus errors (in some architectures) */
761 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
762 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
763 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
764
765 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
766 !is_cardbus && !broken) {
767 unsigned int cmax;
768 /*
769 * Bus already configured by firmware, process it in the first
770 * pass and just note the configuration.
771 */
772 if (pass)
773 goto out;
774
775 /*
776 * If we already got to this bus through a different bridge,
777 * don't re-add it. This can happen with the i450NX chipset.
778 *
779 * However, we continue to descend down the hierarchy and
780 * scan remaining child buses.
781 */
782 child = pci_find_bus(pci_domain_nr(bus), secondary);
783 if (!child) {
784 child = pci_add_new_bus(bus, dev, secondary);
785 if (!child)
786 goto out;
787 child->primary = primary;
788 child->subordinate = subordinate;
789 child->bridge_ctl = bctl;
790 }
791
792 cmax = pci_scan_child_bus(child);
793 if (cmax > max)
794 max = cmax;
795 if (child->subordinate > max)
796 max = child->subordinate;
797 } else {
798 /*
799 * We need to assign a number to this bus which we always
800 * do in the second pass.
801 */
802 if (!pass) {
803 if (pcibios_assign_all_busses() || broken)
804 /* Temporarily disable forwarding of the
805 configuration cycles on all bridges in
806 this bus segment to avoid possible
807 conflicts in the second pass between two
808 bridges programmed with overlapping
809 bus ranges. */
810 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
811 buses & ~0xffffff);
812 goto out;
813 }
814
815 /* Clear errors */
816 pci_write_config_word(dev, PCI_STATUS, 0xffff);
817
818 /* Prevent assigning a bus number that already exists.
819 * This can happen when a bridge is hot-plugged, so in
820 * this case we only re-scan this bus. */
821 child = pci_find_bus(pci_domain_nr(bus), max+1);
822 if (!child) {
823 child = pci_add_new_bus(bus, dev, ++max);
824 if (!child)
825 goto out;
826 }
827 buses = (buses & 0xff000000)
828 | ((unsigned int)(child->primary) << 0)
829 | ((unsigned int)(child->secondary) << 8)
830 | ((unsigned int)(child->subordinate) << 16);
831
832 /*
833 * yenta.c forces a secondary latency timer of 176.
834 * Copy that behaviour here.
835 */
836 if (is_cardbus) {
837 buses &= ~0xff000000;
838 buses |= CARDBUS_LATENCY_TIMER << 24;
839 }
840
841 /*
842 * We need to blast all three values with a single write.
843 */
844 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
845
846 if (!is_cardbus) {
847 child->bridge_ctl = bctl;
848 /*
849 * Adjust subordinate busnr in parent buses.
850 * We do this before scanning for children because
851 * some devices may not be detected if the bios
852 * was lazy.
853 */
854 pci_fixup_parent_subordinate_busnr(child, max);
855 /* Now we can scan all subordinate buses... */
856 max = pci_scan_child_bus(child);
857 /*
858 * now fix it up again since we have found
859 * the real value of max.
860 */
861 pci_fixup_parent_subordinate_busnr(child, max);
862 } else {
863 /*
864 * For CardBus bridges, we leave 4 bus numbers
865 * as cards with a PCI-to-PCI bridge can be
866 * inserted later.
867 */
868 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
869 struct pci_bus *parent = bus;
870 if (pci_find_bus(pci_domain_nr(bus),
871 max+i+1))
872 break;
873 while (parent->parent) {
874 if ((!pcibios_assign_all_busses()) &&
875 (parent->subordinate > max) &&
876 (parent->subordinate <= max+i)) {
877 j = 1;
878 }
879 parent = parent->parent;
880 }
881 if (j) {
882 /*
883 * Often, there are two cardbus bridges
884 * -- try to leave one valid bus number
885 * for each one.
886 */
887 i /= 2;
888 break;
889 }
890 }
891 max += i;
892 pci_fixup_parent_subordinate_busnr(child, max);
893 }
894 /*
895 * Set the subordinate bus number to its real value.
896 */
897 child->subordinate = max;
898 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
899 }
900
901 sprintf(child->name,
902 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
903 pci_domain_nr(bus), child->number);
904
905 /* Has only triggered on CardBus, fixup is in yenta_socket */
906 while (bus->parent) {
907 if ((child->subordinate > bus->subordinate) ||
908 (child->number > bus->subordinate) ||
909 (child->number < bus->number) ||
910 (child->subordinate < bus->number)) {
911 dev_info(&child->dev, "[bus %02x-%02x] %s "
912 "hidden behind%s bridge %s [bus %02x-%02x]\n",
913 child->number, child->subordinate,
914 (bus->number > child->subordinate &&
915 bus->subordinate < child->number) ?
916 "wholly" : "partially",
917 bus->self->transparent ? " transparent" : "",
918 dev_name(&bus->dev),
919 bus->number, bus->subordinate);
920 }
921 bus = bus->parent;
922 }
923
924 out:
925 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
926
927 return max;
928 }
929
930 /*
931 * Read interrupt line and base address registers.
932 * The architecture-dependent code can tweak these, of course.
933 */
pci_read_irq(struct pci_dev * dev)934 static void pci_read_irq(struct pci_dev *dev)
935 {
936 unsigned char irq;
937
938 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
939 dev->pin = irq;
940 if (irq)
941 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
942 dev->irq = irq;
943 }
944
set_pcie_port_type(struct pci_dev * pdev)945 void set_pcie_port_type(struct pci_dev *pdev)
946 {
947 int pos;
948 u16 reg16;
949
950 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
951 if (!pos)
952 return;
953 pdev->is_pcie = 1;
954 pdev->pcie_cap = pos;
955 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
956 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
957 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
958 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
959 }
960
set_pcie_hotplug_bridge(struct pci_dev * pdev)961 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
962 {
963 int pos;
964 u16 reg16;
965 u32 reg32;
966
967 pos = pci_pcie_cap(pdev);
968 if (!pos)
969 return;
970 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
971 if (!(reg16 & PCI_EXP_FLAGS_SLOT))
972 return;
973 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32);
974 if (reg32 & PCI_EXP_SLTCAP_HPC)
975 pdev->is_hotplug_bridge = 1;
976 }
977
978 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
979
980 /**
981 * pci_setup_device - fill in class and map information of a device
982 * @dev: the device structure to fill
983 *
984 * Initialize the device structure with information about the device's
985 * vendor,class,memory and IO-space addresses,IRQ lines etc.
986 * Called at initialisation of the PCI subsystem and by CardBus services.
987 * Returns 0 on success and negative if unknown type of device (not normal,
988 * bridge or CardBus).
989 */
pci_setup_device(struct pci_dev * dev)990 int pci_setup_device(struct pci_dev *dev)
991 {
992 u32 class;
993 u8 hdr_type;
994 struct pci_slot *slot;
995 int pos = 0;
996 struct pci_bus_region region;
997 struct resource *res;
998
999 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1000 return -EIO;
1001
1002 dev->sysdata = dev->bus->sysdata;
1003 dev->dev.parent = dev->bus->bridge;
1004 dev->dev.bus = &pci_bus_type;
1005 dev->hdr_type = hdr_type & 0x7f;
1006 dev->multifunction = !!(hdr_type & 0x80);
1007 dev->error_state = pci_channel_io_normal;
1008 set_pcie_port_type(dev);
1009
1010 list_for_each_entry(slot, &dev->bus->slots, list)
1011 if (PCI_SLOT(dev->devfn) == slot->number)
1012 dev->slot = slot;
1013
1014 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1015 set this higher, assuming the system even supports it. */
1016 dev->dma_mask = 0xffffffff;
1017
1018 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1019 dev->bus->number, PCI_SLOT(dev->devfn),
1020 PCI_FUNC(dev->devfn));
1021
1022 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1023 dev->revision = class & 0xff;
1024 dev->class = class >> 8; /* upper 3 bytes */
1025
1026 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1027 dev->vendor, dev->device, dev->hdr_type, dev->class);
1028
1029 /* need to have dev->class ready */
1030 dev->cfg_size = pci_cfg_space_size(dev);
1031
1032 /* "Unknown power state" */
1033 dev->current_state = PCI_UNKNOWN;
1034
1035 /* Early fixups, before probing the BARs */
1036 pci_fixup_device(pci_fixup_early, dev);
1037 /* device class may be changed after fixup */
1038 class = dev->class >> 8;
1039
1040 switch (dev->hdr_type) { /* header type */
1041 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1042 if (class == PCI_CLASS_BRIDGE_PCI)
1043 goto bad;
1044 pci_read_irq(dev);
1045 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1046 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1047 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1048
1049 /*
1050 * Do the ugly legacy mode stuff here rather than broken chip
1051 * quirk code. Legacy mode ATA controllers have fixed
1052 * addresses. These are not always echoed in BAR0-3, and
1053 * BAR0-3 in a few cases contain junk!
1054 */
1055 if (class == PCI_CLASS_STORAGE_IDE) {
1056 u8 progif;
1057 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1058 if ((progif & 1) == 0) {
1059 region.start = 0x1F0;
1060 region.end = 0x1F7;
1061 res = &dev->resource[0];
1062 res->flags = LEGACY_IO_RESOURCE;
1063 pcibios_bus_to_resource(dev, res, ®ion);
1064 region.start = 0x3F6;
1065 region.end = 0x3F6;
1066 res = &dev->resource[1];
1067 res->flags = LEGACY_IO_RESOURCE;
1068 pcibios_bus_to_resource(dev, res, ®ion);
1069 }
1070 if ((progif & 4) == 0) {
1071 region.start = 0x170;
1072 region.end = 0x177;
1073 res = &dev->resource[2];
1074 res->flags = LEGACY_IO_RESOURCE;
1075 pcibios_bus_to_resource(dev, res, ®ion);
1076 region.start = 0x376;
1077 region.end = 0x376;
1078 res = &dev->resource[3];
1079 res->flags = LEGACY_IO_RESOURCE;
1080 pcibios_bus_to_resource(dev, res, ®ion);
1081 }
1082 }
1083 break;
1084
1085 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1086 if (class != PCI_CLASS_BRIDGE_PCI)
1087 goto bad;
1088 /* The PCI-to-PCI bridge spec requires that subtractive
1089 decoding (i.e. transparent) bridge must have programming
1090 interface code of 0x01. */
1091 pci_read_irq(dev);
1092 dev->transparent = ((dev->class & 0xff) == 1);
1093 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1094 set_pcie_hotplug_bridge(dev);
1095 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1096 if (pos) {
1097 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1098 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1099 }
1100 break;
1101
1102 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1103 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1104 goto bad;
1105 pci_read_irq(dev);
1106 pci_read_bases(dev, 1, 0);
1107 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1108 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1109 break;
1110
1111 default: /* unknown header */
1112 dev_err(&dev->dev, "unknown header type %02x, "
1113 "ignoring device\n", dev->hdr_type);
1114 return -EIO;
1115
1116 bad:
1117 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1118 "type %02x)\n", dev->class, dev->hdr_type);
1119 dev->class = PCI_CLASS_NOT_DEFINED;
1120 }
1121
1122 /* We found a fine healthy device, go go go... */
1123 return 0;
1124 }
1125
pci_release_capabilities(struct pci_dev * dev)1126 static void pci_release_capabilities(struct pci_dev *dev)
1127 {
1128 pci_vpd_release(dev);
1129 pci_iov_release(dev);
1130 pci_free_cap_save_buffers(dev);
1131 }
1132
1133 /**
1134 * pci_release_dev - free a pci device structure when all users of it are finished.
1135 * @dev: device that's been disconnected
1136 *
1137 * Will be called only by the device core when all users of this pci device are
1138 * done.
1139 */
pci_release_dev(struct device * dev)1140 static void pci_release_dev(struct device *dev)
1141 {
1142 struct pci_dev *pci_dev;
1143
1144 pci_dev = to_pci_dev(dev);
1145 pci_release_capabilities(pci_dev);
1146 pci_release_of_node(pci_dev);
1147 kfree(pci_dev);
1148 }
1149
1150 /**
1151 * pci_cfg_space_size - get the configuration space size of the PCI device.
1152 * @dev: PCI device
1153 *
1154 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1155 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1156 * access it. Maybe we don't have a way to generate extended config space
1157 * accesses, or the device is behind a reverse Express bridge. So we try
1158 * reading the dword at 0x100 which must either be 0 or a valid extended
1159 * capability header.
1160 */
pci_cfg_space_size_ext(struct pci_dev * dev)1161 int pci_cfg_space_size_ext(struct pci_dev *dev)
1162 {
1163 u32 status;
1164 int pos = PCI_CFG_SPACE_SIZE;
1165
1166 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1167 goto fail;
1168 if (status == 0xffffffff)
1169 goto fail;
1170
1171 return PCI_CFG_SPACE_EXP_SIZE;
1172
1173 fail:
1174 return PCI_CFG_SPACE_SIZE;
1175 }
1176
pci_cfg_space_size(struct pci_dev * dev)1177 int pci_cfg_space_size(struct pci_dev *dev)
1178 {
1179 int pos;
1180 u32 status;
1181 u16 class;
1182
1183 class = dev->class >> 8;
1184 if (class == PCI_CLASS_BRIDGE_HOST)
1185 return pci_cfg_space_size_ext(dev);
1186
1187 pos = pci_pcie_cap(dev);
1188 if (!pos) {
1189 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1190 if (!pos)
1191 goto fail;
1192
1193 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1194 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1195 goto fail;
1196 }
1197
1198 return pci_cfg_space_size_ext(dev);
1199
1200 fail:
1201 return PCI_CFG_SPACE_SIZE;
1202 }
1203
pci_release_bus_bridge_dev(struct device * dev)1204 static void pci_release_bus_bridge_dev(struct device *dev)
1205 {
1206 kfree(dev);
1207 }
1208
alloc_pci_dev(void)1209 struct pci_dev *alloc_pci_dev(void)
1210 {
1211 struct pci_dev *dev;
1212
1213 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1214 if (!dev)
1215 return NULL;
1216
1217 INIT_LIST_HEAD(&dev->bus_list);
1218
1219 return dev;
1220 }
1221 EXPORT_SYMBOL(alloc_pci_dev);
1222
pci_bus_read_dev_vendor_id(struct pci_bus * bus,int devfn,u32 * l,int crs_timeout)1223 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1224 int crs_timeout)
1225 {
1226 int delay = 1;
1227
1228 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1229 return false;
1230
1231 /* some broken boards return 0 or ~0 if a slot is empty: */
1232 if (*l == 0xffffffff || *l == 0x00000000 ||
1233 *l == 0x0000ffff || *l == 0xffff0000)
1234 return false;
1235
1236 /* Configuration request Retry Status */
1237 while (*l == 0xffff0001) {
1238 if (!crs_timeout)
1239 return false;
1240
1241 msleep(delay);
1242 delay *= 2;
1243 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1244 return false;
1245 /* Card hasn't responded in 60 seconds? Must be stuck. */
1246 if (delay > crs_timeout) {
1247 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1248 "responding\n", pci_domain_nr(bus),
1249 bus->number, PCI_SLOT(devfn),
1250 PCI_FUNC(devfn));
1251 return false;
1252 }
1253 }
1254
1255 return true;
1256 }
1257 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1258
1259 /*
1260 * Read the config data for a PCI device, sanity-check it
1261 * and fill in the dev structure...
1262 */
pci_scan_device(struct pci_bus * bus,int devfn)1263 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1264 {
1265 struct pci_dev *dev;
1266 u32 l;
1267
1268 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1269 return NULL;
1270
1271 dev = alloc_pci_dev();
1272 if (!dev)
1273 return NULL;
1274
1275 dev->bus = bus;
1276 dev->devfn = devfn;
1277 dev->vendor = l & 0xffff;
1278 dev->device = (l >> 16) & 0xffff;
1279
1280 pci_set_of_node(dev);
1281
1282 if (pci_setup_device(dev)) {
1283 kfree(dev);
1284 return NULL;
1285 }
1286
1287 return dev;
1288 }
1289
pci_init_capabilities(struct pci_dev * dev)1290 static void pci_init_capabilities(struct pci_dev *dev)
1291 {
1292 /* MSI/MSI-X list */
1293 pci_msi_init_pci_dev(dev);
1294
1295 /* Buffers for saving PCIe and PCI-X capabilities */
1296 pci_allocate_cap_save_buffers(dev);
1297
1298 /* Power Management */
1299 pci_pm_init(dev);
1300 platform_pci_wakeup_init(dev);
1301
1302 /* Vital Product Data */
1303 pci_vpd_pci22_init(dev);
1304
1305 /* Alternative Routing-ID Forwarding */
1306 pci_enable_ari(dev);
1307
1308 /* Single Root I/O Virtualization */
1309 pci_iov_init(dev);
1310
1311 /* Enable ACS P2P upstream forwarding */
1312 pci_enable_acs(dev);
1313 }
1314
pci_device_add(struct pci_dev * dev,struct pci_bus * bus)1315 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1316 {
1317 device_initialize(&dev->dev);
1318 dev->dev.release = pci_release_dev;
1319 pci_dev_get(dev);
1320
1321 dev->dev.dma_mask = &dev->dma_mask;
1322 dev->dev.dma_parms = &dev->dma_parms;
1323 dev->dev.coherent_dma_mask = 0xffffffffull;
1324
1325 pci_set_dma_max_seg_size(dev, 65536);
1326 pci_set_dma_seg_boundary(dev, 0xffffffff);
1327
1328 /* Fix up broken headers */
1329 pci_fixup_device(pci_fixup_header, dev);
1330
1331 /* moved out from quirk header fixup code */
1332 pci_reassigndev_resource_alignment(dev);
1333
1334 /* Clear the state_saved flag. */
1335 dev->state_saved = false;
1336
1337 /* Initialize various capabilities */
1338 pci_init_capabilities(dev);
1339
1340 /*
1341 * Add the device to our list of discovered devices
1342 * and the bus list for fixup functions, etc.
1343 */
1344 down_write(&pci_bus_sem);
1345 list_add_tail(&dev->bus_list, &bus->devices);
1346 up_write(&pci_bus_sem);
1347 }
1348
pci_scan_single_device(struct pci_bus * bus,int devfn)1349 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1350 {
1351 struct pci_dev *dev;
1352
1353 dev = pci_get_slot(bus, devfn);
1354 if (dev) {
1355 pci_dev_put(dev);
1356 return dev;
1357 }
1358
1359 dev = pci_scan_device(bus, devfn);
1360 if (!dev)
1361 return NULL;
1362
1363 pci_device_add(dev, bus);
1364
1365 return dev;
1366 }
1367 EXPORT_SYMBOL(pci_scan_single_device);
1368
next_ari_fn(struct pci_dev * dev,unsigned fn)1369 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
1370 {
1371 u16 cap;
1372 unsigned pos, next_fn;
1373
1374 if (!dev)
1375 return 0;
1376
1377 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1378 if (!pos)
1379 return 0;
1380 pci_read_config_word(dev, pos + 4, &cap);
1381 next_fn = cap >> 8;
1382 if (next_fn <= fn)
1383 return 0;
1384 return next_fn;
1385 }
1386
next_trad_fn(struct pci_dev * dev,unsigned fn)1387 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
1388 {
1389 return (fn + 1) % 8;
1390 }
1391
no_next_fn(struct pci_dev * dev,unsigned fn)1392 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
1393 {
1394 return 0;
1395 }
1396
only_one_child(struct pci_bus * bus)1397 static int only_one_child(struct pci_bus *bus)
1398 {
1399 struct pci_dev *parent = bus->self;
1400 if (!parent || !pci_is_pcie(parent))
1401 return 0;
1402 if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
1403 parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
1404 return 1;
1405 return 0;
1406 }
1407
1408 /**
1409 * pci_scan_slot - scan a PCI slot on a bus for devices.
1410 * @bus: PCI bus to scan
1411 * @devfn: slot number to scan (must have zero function.)
1412 *
1413 * Scan a PCI slot on the specified PCI bus for devices, adding
1414 * discovered devices to the @bus->devices list. New devices
1415 * will not have is_added set.
1416 *
1417 * Returns the number of new devices found.
1418 */
pci_scan_slot(struct pci_bus * bus,int devfn)1419 int pci_scan_slot(struct pci_bus *bus, int devfn)
1420 {
1421 unsigned fn, nr = 0;
1422 struct pci_dev *dev;
1423 unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
1424
1425 if (only_one_child(bus) && (devfn > 0))
1426 return 0; /* Already scanned the entire slot */
1427
1428 dev = pci_scan_single_device(bus, devfn);
1429 if (!dev)
1430 return 0;
1431 if (!dev->is_added)
1432 nr++;
1433
1434 if (pci_ari_enabled(bus))
1435 next_fn = next_ari_fn;
1436 else if (dev->multifunction)
1437 next_fn = next_trad_fn;
1438
1439 for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
1440 dev = pci_scan_single_device(bus, devfn + fn);
1441 if (dev) {
1442 if (!dev->is_added)
1443 nr++;
1444 dev->multifunction = 1;
1445 }
1446 }
1447
1448 /* only one slot has pcie device */
1449 if (bus->self && nr)
1450 pcie_aspm_init_link_state(bus->self);
1451
1452 return nr;
1453 }
1454
pcie_find_smpss(struct pci_dev * dev,void * data)1455 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1456 {
1457 u8 *smpss = data;
1458
1459 if (!pci_is_pcie(dev))
1460 return 0;
1461
1462 /* For PCIE hotplug enabled slots not connected directly to a
1463 * PCI-E root port, there can be problems when hotplugging
1464 * devices. This is due to the possibility of hotplugging a
1465 * device into the fabric with a smaller MPS that the devices
1466 * currently running have configured. Modifying the MPS on the
1467 * running devices could cause a fatal bus error due to an
1468 * incoming frame being larger than the newly configured MPS.
1469 * To work around this, the MPS for the entire fabric must be
1470 * set to the minimum size. Any devices hotplugged into this
1471 * fabric will have the minimum MPS set. If the PCI hotplug
1472 * slot is directly connected to the root port and there are not
1473 * other devices on the fabric (which seems to be the most
1474 * common case), then this is not an issue and MPS discovery
1475 * will occur as normal.
1476 */
1477 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1478 (dev->bus->self &&
1479 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
1480 *smpss = 0;
1481
1482 if (*smpss > dev->pcie_mpss)
1483 *smpss = dev->pcie_mpss;
1484
1485 return 0;
1486 }
1487
pcie_write_mps(struct pci_dev * dev,int mps)1488 static void pcie_write_mps(struct pci_dev *dev, int mps)
1489 {
1490 int rc;
1491
1492 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1493 mps = 128 << dev->pcie_mpss;
1494
1495 if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self)
1496 /* For "Performance", the assumption is made that
1497 * downstream communication will never be larger than
1498 * the MRRS. So, the MPS only needs to be configured
1499 * for the upstream communication. This being the case,
1500 * walk from the top down and set the MPS of the child
1501 * to that of the parent bus.
1502 *
1503 * Configure the device MPS with the smaller of the
1504 * device MPSS or the bridge MPS (which is assumed to be
1505 * properly configured at this point to the largest
1506 * allowable MPS based on its parent bus).
1507 */
1508 mps = min(mps, pcie_get_mps(dev->bus->self));
1509 }
1510
1511 rc = pcie_set_mps(dev, mps);
1512 if (rc)
1513 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1514 }
1515
pcie_write_mrrs(struct pci_dev * dev)1516 static void pcie_write_mrrs(struct pci_dev *dev)
1517 {
1518 int rc, mrrs;
1519
1520 /* In the "safe" case, do not configure the MRRS. There appear to be
1521 * issues with setting MRRS to 0 on a number of devices.
1522 */
1523 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1524 return;
1525
1526 /* For Max performance, the MRRS must be set to the largest supported
1527 * value. However, it cannot be configured larger than the MPS the
1528 * device or the bus can support. This should already be properly
1529 * configured by a prior call to pcie_write_mps.
1530 */
1531 mrrs = pcie_get_mps(dev);
1532
1533 /* MRRS is a R/W register. Invalid values can be written, but a
1534 * subsequent read will verify if the value is acceptable or not.
1535 * If the MRRS value provided is not acceptable (e.g., too large),
1536 * shrink the value until it is acceptable to the HW.
1537 */
1538 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1539 rc = pcie_set_readrq(dev, mrrs);
1540 if (!rc)
1541 break;
1542
1543 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1544 mrrs /= 2;
1545 }
1546
1547 if (mrrs < 128)
1548 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1549 "safe value. If problems are experienced, try running "
1550 "with pci=pcie_bus_safe.\n");
1551 }
1552
pcie_bus_configure_set(struct pci_dev * dev,void * data)1553 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1554 {
1555 int mps, orig_mps;
1556
1557 if (!pci_is_pcie(dev))
1558 return 0;
1559
1560 mps = 128 << *(u8 *)data;
1561 orig_mps = pcie_get_mps(dev);
1562
1563 pcie_write_mps(dev, mps);
1564 pcie_write_mrrs(dev);
1565
1566 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1567 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1568 orig_mps, pcie_get_readrq(dev));
1569
1570 return 0;
1571 }
1572
1573 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1574 * parents then children fashion. If this changes, then this code will not
1575 * work as designed.
1576 */
pcie_bus_configure_settings(struct pci_bus * bus,u8 mpss)1577 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1578 {
1579 u8 smpss;
1580
1581 if (!pci_is_pcie(bus->self))
1582 return;
1583
1584 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1585 return;
1586
1587 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1588 * to be aware to the MPS of the destination. To work around this,
1589 * simply force the MPS of the entire system to the smallest possible.
1590 */
1591 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1592 smpss = 0;
1593
1594 if (pcie_bus_config == PCIE_BUS_SAFE) {
1595 smpss = mpss;
1596
1597 pcie_find_smpss(bus->self, &smpss);
1598 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1599 }
1600
1601 pcie_bus_configure_set(bus->self, &smpss);
1602 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1603 }
1604 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1605
pci_scan_child_bus(struct pci_bus * bus)1606 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1607 {
1608 unsigned int devfn, pass, max = bus->secondary;
1609 struct pci_dev *dev;
1610
1611 dev_dbg(&bus->dev, "scanning bus\n");
1612
1613 /* Go find them, Rover! */
1614 for (devfn = 0; devfn < 0x100; devfn += 8)
1615 pci_scan_slot(bus, devfn);
1616
1617 /* Reserve buses for SR-IOV capability. */
1618 max += pci_iov_bus_range(bus);
1619
1620 /*
1621 * After performing arch-dependent fixup of the bus, look behind
1622 * all PCI-to-PCI bridges on this bus.
1623 */
1624 if (!bus->is_added) {
1625 dev_dbg(&bus->dev, "fixups for bus\n");
1626 pcibios_fixup_bus(bus);
1627 if (pci_is_root_bus(bus))
1628 bus->is_added = 1;
1629 }
1630
1631 for (pass=0; pass < 2; pass++)
1632 list_for_each_entry(dev, &bus->devices, bus_list) {
1633 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1634 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1635 max = pci_scan_bridge(bus, dev, max, pass);
1636 }
1637
1638 /*
1639 * We've scanned the bus and so we know all about what's on
1640 * the other side of any bridges that may be on this bus plus
1641 * any devices.
1642 *
1643 * Return how far we've got finding sub-buses.
1644 */
1645 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1646 return max;
1647 }
1648
pci_create_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)1649 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1650 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1651 {
1652 int error;
1653 struct pci_host_bridge *bridge;
1654 struct pci_bus *b, *b2;
1655 struct device *dev;
1656 struct pci_host_bridge_window *window, *n;
1657 struct resource *res;
1658 resource_size_t offset;
1659 char bus_addr[64];
1660 char *fmt;
1661
1662 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
1663 if (!bridge)
1664 return NULL;
1665
1666 b = pci_alloc_bus();
1667 if (!b)
1668 goto err_bus;
1669
1670 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1671 if (!dev)
1672 goto err_dev;
1673
1674 b->sysdata = sysdata;
1675 b->ops = ops;
1676
1677 b2 = pci_find_bus(pci_domain_nr(b), bus);
1678 if (b2) {
1679 /* If we already got to this bus through a different bridge, ignore it */
1680 dev_dbg(&b2->dev, "bus already known\n");
1681 goto err_out;
1682 }
1683
1684 dev->parent = parent;
1685 dev->release = pci_release_bus_bridge_dev;
1686 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1687 error = device_register(dev);
1688 if (error)
1689 goto dev_reg_err;
1690 b->bridge = get_device(dev);
1691 device_enable_async_suspend(b->bridge);
1692 pci_set_bus_of_node(b);
1693
1694 if (!parent)
1695 set_dev_node(b->bridge, pcibus_to_node(b));
1696
1697 b->dev.class = &pcibus_class;
1698 b->dev.parent = b->bridge;
1699 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1700 error = device_register(&b->dev);
1701 if (error)
1702 goto class_dev_reg_err;
1703
1704 /* Create legacy_io and legacy_mem files for this bus */
1705 pci_create_legacy_files(b);
1706
1707 b->number = b->secondary = bus;
1708
1709 bridge->bus = b;
1710 INIT_LIST_HEAD(&bridge->windows);
1711
1712 if (parent)
1713 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1714 else
1715 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1716
1717 /* Add initial resources to the bus */
1718 list_for_each_entry_safe(window, n, resources, list) {
1719 list_move_tail(&window->list, &bridge->windows);
1720 res = window->res;
1721 offset = window->offset;
1722 pci_bus_add_resource(b, res, 0);
1723 if (offset) {
1724 if (resource_type(res) == IORESOURCE_IO)
1725 fmt = " (bus address [%#06llx-%#06llx])";
1726 else
1727 fmt = " (bus address [%#010llx-%#010llx])";
1728 snprintf(bus_addr, sizeof(bus_addr), fmt,
1729 (unsigned long long) (res->start - offset),
1730 (unsigned long long) (res->end - offset));
1731 } else
1732 bus_addr[0] = '\0';
1733 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1734 }
1735
1736 down_write(&pci_bus_sem);
1737 list_add_tail(&bridge->list, &pci_host_bridges);
1738 list_add_tail(&b->node, &pci_root_buses);
1739 up_write(&pci_bus_sem);
1740
1741 return b;
1742
1743 class_dev_reg_err:
1744 device_unregister(dev);
1745 dev_reg_err:
1746 down_write(&pci_bus_sem);
1747 list_del(&bridge->list);
1748 list_del(&b->node);
1749 up_write(&pci_bus_sem);
1750 err_out:
1751 kfree(dev);
1752 err_dev:
1753 kfree(b);
1754 err_bus:
1755 kfree(bridge);
1756 return NULL;
1757 }
1758
pci_scan_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)1759 struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
1760 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1761 {
1762 struct pci_bus *b;
1763
1764 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1765 if (!b)
1766 return NULL;
1767
1768 b->subordinate = pci_scan_child_bus(b);
1769 pci_bus_add_devices(b);
1770 return b;
1771 }
1772 EXPORT_SYMBOL(pci_scan_root_bus);
1773
1774 /* Deprecated; use pci_scan_root_bus() instead */
pci_scan_bus_parented(struct device * parent,int bus,struct pci_ops * ops,void * sysdata)1775 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1776 int bus, struct pci_ops *ops, void *sysdata)
1777 {
1778 LIST_HEAD(resources);
1779 struct pci_bus *b;
1780
1781 pci_add_resource(&resources, &ioport_resource);
1782 pci_add_resource(&resources, &iomem_resource);
1783 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1784 if (b)
1785 b->subordinate = pci_scan_child_bus(b);
1786 else
1787 pci_free_resource_list(&resources);
1788 return b;
1789 }
1790 EXPORT_SYMBOL(pci_scan_bus_parented);
1791
pci_scan_bus(int bus,struct pci_ops * ops,void * sysdata)1792 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
1793 void *sysdata)
1794 {
1795 LIST_HEAD(resources);
1796 struct pci_bus *b;
1797
1798 pci_add_resource(&resources, &ioport_resource);
1799 pci_add_resource(&resources, &iomem_resource);
1800 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1801 if (b) {
1802 b->subordinate = pci_scan_child_bus(b);
1803 pci_bus_add_devices(b);
1804 } else {
1805 pci_free_resource_list(&resources);
1806 }
1807 return b;
1808 }
1809 EXPORT_SYMBOL(pci_scan_bus);
1810
1811 #ifdef CONFIG_HOTPLUG
1812 /**
1813 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1814 * @bridge: PCI bridge for the bus to scan
1815 *
1816 * Scan a PCI bus and child buses for new devices, add them,
1817 * and enable them, resizing bridge mmio/io resource if necessary
1818 * and possible. The caller must ensure the child devices are already
1819 * removed for resizing to occur.
1820 *
1821 * Returns the max number of subordinate bus discovered.
1822 */
pci_rescan_bus_bridge_resize(struct pci_dev * bridge)1823 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1824 {
1825 unsigned int max;
1826 struct pci_bus *bus = bridge->subordinate;
1827
1828 max = pci_scan_child_bus(bus);
1829
1830 pci_assign_unassigned_bridge_resources(bridge);
1831
1832 pci_bus_add_devices(bus);
1833
1834 return max;
1835 }
1836
1837 EXPORT_SYMBOL(pci_add_new_bus);
1838 EXPORT_SYMBOL(pci_scan_slot);
1839 EXPORT_SYMBOL(pci_scan_bridge);
1840 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1841 #endif
1842
pci_sort_bf_cmp(const struct device * d_a,const struct device * d_b)1843 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1844 {
1845 const struct pci_dev *a = to_pci_dev(d_a);
1846 const struct pci_dev *b = to_pci_dev(d_b);
1847
1848 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1849 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1850
1851 if (a->bus->number < b->bus->number) return -1;
1852 else if (a->bus->number > b->bus->number) return 1;
1853
1854 if (a->devfn < b->devfn) return -1;
1855 else if (a->devfn > b->devfn) return 1;
1856
1857 return 0;
1858 }
1859
pci_sort_breadthfirst(void)1860 void __init pci_sort_breadthfirst(void)
1861 {
1862 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1863 }
1864