1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
4 *
5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
6 * Copyright 2008-2009 MontaVista Software, Inc.
7 *
8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
10 * Rewrite the routing for Frescale PCI and PCI Express
11 * Roy Zang <tie-fei.zang@freescale.com>
12 * MPC83xx PCI-Express support:
13 * Tony Li <tony.li@freescale.com>
14 * Anton Vorontsov <avorontsov@ru.mvista.com>
15 */
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/fsl/edac.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/memblock.h>
24 #include <linux/log2.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/uaccess.h>
32
33 #include <asm/io.h>
34 #include <asm/pci-bridge.h>
35 #include <asm/ppc-pci.h>
36 #include <asm/machdep.h>
37 #include <asm/mpc85xx.h>
38 #include <asm/disassemble.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/swiotlb.h>
41 #include <sysdev/fsl_soc.h>
42 #include <sysdev/fsl_pci.h>
43
44 static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
45
quirk_fsl_pcie_early(struct pci_dev * dev)46 static void quirk_fsl_pcie_early(struct pci_dev *dev)
47 {
48 u8 hdr_type;
49
50 /* if we aren't a PCIe don't bother */
51 if (!pci_is_pcie(dev))
52 return;
53
54 /* if we aren't in host mode don't bother */
55 pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
56 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
57 return;
58
59 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
60 fsl_pcie_bus_fixup = 1;
61 return;
62 }
63
64 static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
65 int, int, u32 *);
66
fsl_pcie_check_link(struct pci_controller * hose)67 static int fsl_pcie_check_link(struct pci_controller *hose)
68 {
69 u32 val = 0;
70
71 if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
72 if (hose->ops->read == fsl_indirect_read_config)
73 __indirect_read_config(hose, hose->first_busno, 0,
74 PCIE_LTSSM, 4, &val);
75 else
76 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
77 if (val < PCIE_LTSSM_L0)
78 return 1;
79 } else {
80 struct ccsr_pci __iomem *pci = hose->private_data;
81 /* for PCIe IP rev 3.0 or greater use CSR0 for link state */
82 val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
83 >> PEX_CSR0_LTSSM_SHIFT;
84 if (val != PEX_CSR0_LTSSM_L0)
85 return 1;
86 }
87
88 return 0;
89 }
90
fsl_indirect_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)91 static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
92 int offset, int len, u32 *val)
93 {
94 struct pci_controller *hose = pci_bus_to_host(bus);
95
96 if (fsl_pcie_check_link(hose))
97 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
98 else
99 hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
100
101 return indirect_read_config(bus, devfn, offset, len, val);
102 }
103
104 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
105
106 static struct pci_ops fsl_indirect_pcie_ops =
107 {
108 .read = fsl_indirect_read_config,
109 .write = indirect_write_config,
110 };
111
112 static u64 pci64_dma_offset;
113
114 #ifdef CONFIG_SWIOTLB
pci_dma_dev_setup_swiotlb(struct pci_dev * pdev)115 static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
116 {
117 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
118
119 pdev->dev.bus_dma_limit =
120 hose->dma_window_base_cur + hose->dma_window_size - 1;
121 }
122
setup_swiotlb_ops(struct pci_controller * hose)123 static void setup_swiotlb_ops(struct pci_controller *hose)
124 {
125 if (ppc_swiotlb_enable)
126 hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
127 }
128 #else
setup_swiotlb_ops(struct pci_controller * hose)129 static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
130 #endif
131
fsl_pci_dma_set_mask(struct device * dev,u64 dma_mask)132 static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
133 {
134 /*
135 * Fix up PCI devices that are able to DMA to the large inbound
136 * mapping that allows addressing any RAM address from across PCI.
137 */
138 if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
139 dev->bus_dma_limit = 0;
140 dev->archdata.dma_offset = pci64_dma_offset;
141 }
142 }
143
setup_one_atmu(struct ccsr_pci __iomem * pci,unsigned int index,const struct resource * res,resource_size_t offset)144 static int setup_one_atmu(struct ccsr_pci __iomem *pci,
145 unsigned int index, const struct resource *res,
146 resource_size_t offset)
147 {
148 resource_size_t pci_addr = res->start - offset;
149 resource_size_t phys_addr = res->start;
150 resource_size_t size = resource_size(res);
151 u32 flags = 0x80044000; /* enable & mem R/W */
152 unsigned int i;
153
154 pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
155 (u64)res->start, (u64)size);
156
157 if (res->flags & IORESOURCE_PREFETCH)
158 flags |= 0x10000000; /* enable relaxed ordering */
159
160 for (i = 0; size > 0; i++) {
161 unsigned int bits = min_t(u32, ilog2(size),
162 __ffs(pci_addr | phys_addr));
163
164 if (index + i >= 5)
165 return -1;
166
167 out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
168 out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
169 out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
170 out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
171
172 pci_addr += (resource_size_t)1U << bits;
173 phys_addr += (resource_size_t)1U << bits;
174 size -= (resource_size_t)1U << bits;
175 }
176
177 return i;
178 }
179
is_kdump(void)180 static bool is_kdump(void)
181 {
182 struct device_node *node;
183
184 node = of_find_node_by_type(NULL, "memory");
185 if (!node) {
186 WARN_ON_ONCE(1);
187 return false;
188 }
189
190 return of_property_read_bool(node, "linux,usable-memory");
191 }
192
193 /* atmu setup for fsl pci/pcie controller */
setup_pci_atmu(struct pci_controller * hose)194 static void setup_pci_atmu(struct pci_controller *hose)
195 {
196 struct ccsr_pci __iomem *pci = hose->private_data;
197 int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
198 u64 mem, sz, paddr_hi = 0;
199 u64 offset = 0, paddr_lo = ULLONG_MAX;
200 u32 pcicsrbar = 0, pcicsrbar_sz;
201 u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
202 PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
203 const u64 *reg;
204 int len;
205 bool setup_inbound;
206
207 /*
208 * If this is kdump, we don't want to trigger a bunch of PCI
209 * errors by closing the window on in-flight DMA.
210 *
211 * We still run most of the function's logic so that things like
212 * hose->dma_window_size still get set.
213 */
214 setup_inbound = !is_kdump();
215
216 if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
217 /*
218 * BSC9132 Rev1.0 has an issue where all the PEX inbound
219 * windows have implemented the default target value as 0xf
220 * for CCSR space.In all Freescale legacy devices the target
221 * of 0xf is reserved for local memory space. 9132 Rev1.0
222 * now has local memory space mapped to target 0x0 instead of
223 * 0xf. Hence adding a workaround to remove the target 0xf
224 * defined for memory space from Inbound window attributes.
225 */
226 piwar &= ~PIWAR_TGI_LOCAL;
227 }
228
229 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
230 if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
231 win_idx = 2;
232 start_idx = 0;
233 end_idx = 3;
234 }
235 }
236
237 /* Disable all windows (except powar0 since it's ignored) */
238 for(i = 1; i < 5; i++)
239 out_be32(&pci->pow[i].powar, 0);
240
241 if (setup_inbound) {
242 for (i = start_idx; i < end_idx; i++)
243 out_be32(&pci->piw[i].piwar, 0);
244 }
245
246 /* Setup outbound MEM window */
247 for(i = 0, j = 1; i < 3; i++) {
248 if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
249 continue;
250
251 paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
252 paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
253
254 /* We assume all memory resources have the same offset */
255 offset = hose->mem_offset[i];
256 n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
257
258 if (n < 0 || j >= 5) {
259 pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
260 hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
261 } else
262 j += n;
263 }
264
265 /* Setup outbound IO window */
266 if (hose->io_resource.flags & IORESOURCE_IO) {
267 if (j >= 5) {
268 pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
269 } else {
270 pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
271 "phy base 0x%016llx.\n",
272 (u64)hose->io_resource.start,
273 (u64)resource_size(&hose->io_resource),
274 (u64)hose->io_base_phys);
275 out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
276 out_be32(&pci->pow[j].potear, 0);
277 out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
278 /* Enable, IO R/W */
279 out_be32(&pci->pow[j].powar, 0x80088000
280 | (ilog2(hose->io_resource.end
281 - hose->io_resource.start + 1) - 1));
282 }
283 }
284
285 /* convert to pci address space */
286 paddr_hi -= offset;
287 paddr_lo -= offset;
288
289 if (paddr_hi == paddr_lo) {
290 pr_err("%pOF: No outbound window space\n", hose->dn);
291 return;
292 }
293
294 if (paddr_lo == 0) {
295 pr_err("%pOF: No space for inbound window\n", hose->dn);
296 return;
297 }
298
299 /* setup PCSRBAR/PEXCSRBAR */
300 early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
301 early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
302 pcicsrbar_sz = ~pcicsrbar_sz + 1;
303
304 if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
305 (paddr_lo > 0x100000000ull))
306 pcicsrbar = 0x100000000ull - pcicsrbar_sz;
307 else
308 pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
309 early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
310
311 paddr_lo = min(paddr_lo, (u64)pcicsrbar);
312
313 pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
314
315 /* Setup inbound mem window */
316 mem = memblock_end_of_DRAM();
317 pr_info("%s: end of DRAM %llx\n", __func__, mem);
318
319 /*
320 * The msi-address-64 property, if it exists, indicates the physical
321 * address of the MSIIR register. Normally, this register is located
322 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
323 * this property exists, then we normally need to create a new ATMU
324 * for it. For now, however, we cheat. The only entity that creates
325 * this property is the Freescale hypervisor, and the address is
326 * specified in the partition configuration. Typically, the address
327 * is located in the page immediately after the end of DDR. If so, we
328 * can avoid allocating a new ATMU by extending the DDR ATMU by one
329 * page.
330 */
331 reg = of_get_property(hose->dn, "msi-address-64", &len);
332 if (reg && (len == sizeof(u64))) {
333 u64 address = be64_to_cpup(reg);
334
335 if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
336 pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
337 mem += PAGE_SIZE;
338 } else {
339 /* TODO: Create a new ATMU for MSIIR */
340 pr_warn("%pOF: msi-address-64 address of %llx is "
341 "unsupported\n", hose->dn, address);
342 }
343 }
344
345 sz = min(mem, paddr_lo);
346 mem_log = ilog2(sz);
347
348 /* PCIe can overmap inbound & outbound since RX & TX are separated */
349 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
350 /* Size window to exact size if power-of-two or one size up */
351 if ((1ull << mem_log) != mem) {
352 mem_log++;
353 if ((1ull << mem_log) > mem)
354 pr_info("%pOF: Setting PCI inbound window "
355 "greater than memory size\n", hose->dn);
356 }
357
358 piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
359
360 if (setup_inbound) {
361 /* Setup inbound memory window */
362 out_be32(&pci->piw[win_idx].pitar, 0x00000000);
363 out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
364 out_be32(&pci->piw[win_idx].piwar, piwar);
365 }
366
367 win_idx--;
368 hose->dma_window_base_cur = 0x00000000;
369 hose->dma_window_size = (resource_size_t)sz;
370
371 /*
372 * if we have >4G of memory setup second PCI inbound window to
373 * let devices that are 64-bit address capable to work w/o
374 * SWIOTLB and access the full range of memory
375 */
376 if (sz != mem) {
377 mem_log = ilog2(mem);
378
379 /* Size window up if we dont fit in exact power-of-2 */
380 if ((1ull << mem_log) != mem)
381 mem_log++;
382
383 piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
384 pci64_dma_offset = 1ULL << mem_log;
385
386 if (setup_inbound) {
387 /* Setup inbound memory window */
388 out_be32(&pci->piw[win_idx].pitar, 0x00000000);
389 out_be32(&pci->piw[win_idx].piwbear,
390 pci64_dma_offset >> 44);
391 out_be32(&pci->piw[win_idx].piwbar,
392 pci64_dma_offset >> 12);
393 out_be32(&pci->piw[win_idx].piwar, piwar);
394 }
395
396 /*
397 * install our own dma_set_mask handler to fixup dma_ops
398 * and dma_offset
399 */
400 ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
401
402 pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
403 }
404 } else {
405 u64 paddr = 0;
406
407 if (setup_inbound) {
408 /* Setup inbound memory window */
409 out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
410 out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
411 out_be32(&pci->piw[win_idx].piwar,
412 (piwar | (mem_log - 1)));
413 }
414
415 win_idx--;
416 paddr += 1ull << mem_log;
417 sz -= 1ull << mem_log;
418
419 if (sz) {
420 mem_log = ilog2(sz);
421 piwar |= (mem_log - 1);
422
423 if (setup_inbound) {
424 out_be32(&pci->piw[win_idx].pitar,
425 paddr >> 12);
426 out_be32(&pci->piw[win_idx].piwbar,
427 paddr >> 12);
428 out_be32(&pci->piw[win_idx].piwar, piwar);
429 }
430
431 win_idx--;
432 paddr += 1ull << mem_log;
433 }
434
435 hose->dma_window_base_cur = 0x00000000;
436 hose->dma_window_size = (resource_size_t)paddr;
437 }
438
439 if (hose->dma_window_size < mem) {
440 #ifdef CONFIG_SWIOTLB
441 ppc_swiotlb_enable = 1;
442 #else
443 pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
444 "map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
445 hose->dn);
446 #endif
447 /* adjusting outbound windows could reclaim space in mem map */
448 if (paddr_hi < 0xffffffffull)
449 pr_warn("%pOF: WARNING: Outbound window cfg leaves "
450 "gaps in memory map. Adjusting the memory map "
451 "could reduce unnecessary bounce buffering.\n",
452 hose->dn);
453
454 pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
455 (u64)hose->dma_window_size);
456 }
457 }
458
setup_pci_cmd(struct pci_controller * hose)459 static void setup_pci_cmd(struct pci_controller *hose)
460 {
461 u16 cmd;
462 int cap_x;
463
464 early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
465 cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
466 | PCI_COMMAND_IO;
467 early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
468
469 cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
470 if (cap_x) {
471 int pci_x_cmd = cap_x + PCI_X_CMD;
472 cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
473 | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
474 early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
475 } else {
476 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
477 }
478 }
479
fsl_pcibios_fixup_bus(struct pci_bus * bus)480 void fsl_pcibios_fixup_bus(struct pci_bus *bus)
481 {
482 struct pci_controller *hose = pci_bus_to_host(bus);
483 int i, is_pcie = 0, no_link;
484
485 /* The root complex bridge comes up with bogus resources,
486 * we copy the PHB ones in.
487 *
488 * With the current generic PCI code, the PHB bus no longer
489 * has bus->resource[0..4] set, so things are a bit more
490 * tricky.
491 */
492
493 if (fsl_pcie_bus_fixup)
494 is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
495 no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
496
497 if (bus->parent == hose->bus && (is_pcie || no_link)) {
498 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
499 struct resource *res = bus->resource[i];
500 struct resource *par;
501
502 if (!res)
503 continue;
504 if (i == 0)
505 par = &hose->io_resource;
506 else if (i < 4)
507 par = &hose->mem_resources[i-1];
508 else par = NULL;
509
510 res->start = par ? par->start : 0;
511 res->end = par ? par->end : 0;
512 res->flags = par ? par->flags : 0;
513 }
514 }
515 }
516
fsl_add_bridge(struct platform_device * pdev,int is_primary)517 int fsl_add_bridge(struct platform_device *pdev, int is_primary)
518 {
519 int len;
520 struct pci_controller *hose;
521 struct resource rsrc;
522 const int *bus_range;
523 u8 hdr_type, progif;
524 u32 class_code;
525 struct device_node *dev;
526 struct ccsr_pci __iomem *pci;
527 u16 temp;
528 u32 svr = mfspr(SPRN_SVR);
529
530 dev = pdev->dev.of_node;
531
532 if (!of_device_is_available(dev)) {
533 pr_warn("%pOF: disabled\n", dev);
534 return -ENODEV;
535 }
536
537 pr_debug("Adding PCI host bridge %pOF\n", dev);
538
539 /* Fetch host bridge registers address */
540 if (of_address_to_resource(dev, 0, &rsrc)) {
541 printk(KERN_WARNING "Can't get pci register base!");
542 return -ENOMEM;
543 }
544
545 /* Get bus range if any */
546 bus_range = of_get_property(dev, "bus-range", &len);
547 if (bus_range == NULL || len < 2 * sizeof(int))
548 printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
549 " bus 0\n", dev);
550
551 pci_add_flags(PCI_REASSIGN_ALL_BUS);
552 hose = pcibios_alloc_controller(dev);
553 if (!hose)
554 return -ENOMEM;
555
556 /* set platform device as the parent */
557 hose->parent = &pdev->dev;
558 hose->first_busno = bus_range ? bus_range[0] : 0x0;
559 hose->last_busno = bus_range ? bus_range[1] : 0xff;
560
561 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
562 (u64)rsrc.start, (u64)resource_size(&rsrc));
563
564 pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
565 if (!hose->private_data)
566 goto no_bridge;
567
568 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
569 PPC_INDIRECT_TYPE_BIG_ENDIAN);
570
571 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
572 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
573
574 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
575 /* use fsl_indirect_read_config for PCIe */
576 hose->ops = &fsl_indirect_pcie_ops;
577 /* For PCIE read HEADER_TYPE to identify controller mode */
578 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
579 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
580 goto no_bridge;
581
582 } else {
583 /* For PCI read PROG to identify controller mode */
584 early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
585 if ((progif & 1) &&
586 !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
587 goto no_bridge;
588 }
589
590 setup_pci_cmd(hose);
591
592 /* check PCI express link status */
593 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
594 hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
595 PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
596 if (fsl_pcie_check_link(hose))
597 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
598 /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
599 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
600 early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
601 class_code &= 0xff;
602 class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
603 early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
604 }
605 } else {
606 /*
607 * Set PBFR(PCI Bus Function Register)[10] = 1 to
608 * disable the combining of crossing cacheline
609 * boundary requests into one burst transaction.
610 * PCI-X operation is not affected.
611 * Fix erratum PCI 5 on MPC8548
612 */
613 #define PCI_BUS_FUNCTION 0x44
614 #define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */
615 if (((SVR_SOC_VER(svr) == SVR_8543) ||
616 (SVR_SOC_VER(svr) == SVR_8545) ||
617 (SVR_SOC_VER(svr) == SVR_8547) ||
618 (SVR_SOC_VER(svr) == SVR_8548)) &&
619 !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
620 early_read_config_word(hose, 0, 0,
621 PCI_BUS_FUNCTION, &temp);
622 temp |= PCI_BUS_FUNCTION_MDS;
623 early_write_config_word(hose, 0, 0,
624 PCI_BUS_FUNCTION, temp);
625 }
626 }
627
628 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
629 "Firmware bus number: %d->%d\n",
630 (unsigned long long)rsrc.start, hose->first_busno,
631 hose->last_busno);
632
633 pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
634 hose, hose->cfg_addr, hose->cfg_data);
635
636 /* Interpret the "ranges" property */
637 /* This also maps the I/O region and sets isa_io/mem_base */
638 pci_process_bridge_OF_ranges(hose, dev, is_primary);
639
640 /* Setup PEX window registers */
641 setup_pci_atmu(hose);
642
643 /* Set up controller operations */
644 setup_swiotlb_ops(hose);
645
646 return 0;
647
648 no_bridge:
649 iounmap(hose->private_data);
650 /* unmap cfg_data & cfg_addr separately if not on same page */
651 if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
652 ((unsigned long)hose->cfg_addr & PAGE_MASK))
653 iounmap(hose->cfg_data);
654 iounmap(hose->cfg_addr);
655 pcibios_free_controller(hose);
656 return -ENODEV;
657 }
658 #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
659
660 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
661 quirk_fsl_pcie_early);
662
663 #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
664 struct mpc83xx_pcie_priv {
665 void __iomem *cfg_type0;
666 void __iomem *cfg_type1;
667 u32 dev_base;
668 };
669
670 struct pex_inbound_window {
671 u32 ar;
672 u32 tar;
673 u32 barl;
674 u32 barh;
675 };
676
677 /*
678 * With the convention of u-boot, the PCIE outbound window 0 serves
679 * as configuration transactions outbound.
680 */
681 #define PEX_OUTWIN0_BAR 0xCA4
682 #define PEX_OUTWIN0_TAL 0xCA8
683 #define PEX_OUTWIN0_TAH 0xCAC
684 #define PEX_RC_INWIN_BASE 0xE60
685 #define PEX_RCIWARn_EN 0x1
686
mpc83xx_pcie_exclude_device(struct pci_bus * bus,unsigned int devfn)687 static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
688 {
689 struct pci_controller *hose = pci_bus_to_host(bus);
690
691 if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
692 return PCIBIOS_DEVICE_NOT_FOUND;
693 /*
694 * Workaround for the HW bug: for Type 0 configure transactions the
695 * PCI-E controller does not check the device number bits and just
696 * assumes that the device number bits are 0.
697 */
698 if (bus->number == hose->first_busno ||
699 bus->primary == hose->first_busno) {
700 if (devfn & 0xf8)
701 return PCIBIOS_DEVICE_NOT_FOUND;
702 }
703
704 if (ppc_md.pci_exclude_device) {
705 if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
706 return PCIBIOS_DEVICE_NOT_FOUND;
707 }
708
709 return PCIBIOS_SUCCESSFUL;
710 }
711
mpc83xx_pcie_remap_cfg(struct pci_bus * bus,unsigned int devfn,int offset)712 static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
713 unsigned int devfn, int offset)
714 {
715 struct pci_controller *hose = pci_bus_to_host(bus);
716 struct mpc83xx_pcie_priv *pcie = hose->dn->data;
717 u32 dev_base = bus->number << 24 | devfn << 16;
718 int ret;
719
720 ret = mpc83xx_pcie_exclude_device(bus, devfn);
721 if (ret)
722 return NULL;
723
724 offset &= 0xfff;
725
726 /* Type 0 */
727 if (bus->number == hose->first_busno)
728 return pcie->cfg_type0 + offset;
729
730 if (pcie->dev_base == dev_base)
731 goto mapped;
732
733 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
734
735 pcie->dev_base = dev_base;
736 mapped:
737 return pcie->cfg_type1 + offset;
738 }
739
mpc83xx_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)740 static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
741 int offset, int len, u32 val)
742 {
743 struct pci_controller *hose = pci_bus_to_host(bus);
744
745 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
746 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
747 val &= 0xffffff00;
748
749 return pci_generic_config_write(bus, devfn, offset, len, val);
750 }
751
752 static struct pci_ops mpc83xx_pcie_ops = {
753 .map_bus = mpc83xx_pcie_remap_cfg,
754 .read = pci_generic_config_read,
755 .write = mpc83xx_pcie_write_config,
756 };
757
mpc83xx_pcie_setup(struct pci_controller * hose,struct resource * reg)758 static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
759 struct resource *reg)
760 {
761 struct mpc83xx_pcie_priv *pcie;
762 u32 cfg_bar;
763 int ret = -ENOMEM;
764
765 pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
766 if (!pcie)
767 return ret;
768
769 pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
770 if (!pcie->cfg_type0)
771 goto err0;
772
773 cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
774 if (!cfg_bar) {
775 /* PCI-E isn't configured. */
776 ret = -ENODEV;
777 goto err1;
778 }
779
780 pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
781 if (!pcie->cfg_type1)
782 goto err1;
783
784 WARN_ON(hose->dn->data);
785 hose->dn->data = pcie;
786 hose->ops = &mpc83xx_pcie_ops;
787 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
788
789 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
790 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
791
792 if (fsl_pcie_check_link(hose))
793 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
794
795 return 0;
796 err1:
797 iounmap(pcie->cfg_type0);
798 err0:
799 kfree(pcie);
800 return ret;
801
802 }
803
mpc83xx_add_bridge(struct device_node * dev)804 int __init mpc83xx_add_bridge(struct device_node *dev)
805 {
806 int ret;
807 int len;
808 struct pci_controller *hose;
809 struct resource rsrc_reg;
810 struct resource rsrc_cfg;
811 const int *bus_range;
812 int primary;
813
814 is_mpc83xx_pci = 1;
815
816 if (!of_device_is_available(dev)) {
817 pr_warn("%pOF: disabled by the firmware.\n",
818 dev);
819 return -ENODEV;
820 }
821 pr_debug("Adding PCI host bridge %pOF\n", dev);
822
823 /* Fetch host bridge registers address */
824 if (of_address_to_resource(dev, 0, &rsrc_reg)) {
825 printk(KERN_WARNING "Can't get pci register base!\n");
826 return -ENOMEM;
827 }
828
829 memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
830
831 if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
832 printk(KERN_WARNING
833 "No pci config register base in dev tree, "
834 "using default\n");
835 /*
836 * MPC83xx supports up to two host controllers
837 * one at 0x8500 has config space registers at 0x8300
838 * one at 0x8600 has config space registers at 0x8380
839 */
840 if ((rsrc_reg.start & 0xfffff) == 0x8500)
841 rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
842 else if ((rsrc_reg.start & 0xfffff) == 0x8600)
843 rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
844 }
845 /*
846 * Controller at offset 0x8500 is primary
847 */
848 if ((rsrc_reg.start & 0xfffff) == 0x8500)
849 primary = 1;
850 else
851 primary = 0;
852
853 /* Get bus range if any */
854 bus_range = of_get_property(dev, "bus-range", &len);
855 if (bus_range == NULL || len < 2 * sizeof(int)) {
856 printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
857 " bus 0\n", dev);
858 }
859
860 pci_add_flags(PCI_REASSIGN_ALL_BUS);
861 hose = pcibios_alloc_controller(dev);
862 if (!hose)
863 return -ENOMEM;
864
865 hose->first_busno = bus_range ? bus_range[0] : 0;
866 hose->last_busno = bus_range ? bus_range[1] : 0xff;
867
868 if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
869 ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
870 if (ret)
871 goto err0;
872 } else {
873 setup_indirect_pci(hose, rsrc_cfg.start,
874 rsrc_cfg.start + 4, 0);
875 }
876
877 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
878 "Firmware bus number: %d->%d\n",
879 (unsigned long long)rsrc_reg.start, hose->first_busno,
880 hose->last_busno);
881
882 pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
883 hose, hose->cfg_addr, hose->cfg_data);
884
885 /* Interpret the "ranges" property */
886 /* This also maps the I/O region and sets isa_io/mem_base */
887 pci_process_bridge_OF_ranges(hose, dev, primary);
888
889 return 0;
890 err0:
891 pcibios_free_controller(hose);
892 return ret;
893 }
894 #endif /* CONFIG_PPC_83xx */
895
fsl_pci_immrbar_base(struct pci_controller * hose)896 u64 fsl_pci_immrbar_base(struct pci_controller *hose)
897 {
898 #ifdef CONFIG_PPC_83xx
899 if (is_mpc83xx_pci) {
900 struct mpc83xx_pcie_priv *pcie = hose->dn->data;
901 struct pex_inbound_window *in;
902 int i;
903
904 /* Walk the Root Complex Inbound windows to match IMMR base */
905 in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
906 for (i = 0; i < 4; i++) {
907 /* not enabled, skip */
908 if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
909 continue;
910
911 if (get_immrbase() == in_le32(&in[i].tar))
912 return (u64)in_le32(&in[i].barh) << 32 |
913 in_le32(&in[i].barl);
914 }
915
916 printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
917 }
918 #endif
919
920 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
921 if (!is_mpc83xx_pci) {
922 u32 base;
923
924 pci_bus_read_config_dword(hose->bus,
925 PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
926
927 /*
928 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
929 * address type. So when getting base address, these
930 * bits should be masked
931 */
932 base &= PCI_BASE_ADDRESS_MEM_MASK;
933
934 return base;
935 }
936 #endif
937
938 return 0;
939 }
940
941 #ifdef CONFIG_E500
mcheck_handle_load(struct pt_regs * regs,u32 inst)942 static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
943 {
944 unsigned int rd, ra, rb, d;
945
946 rd = get_rt(inst);
947 ra = get_ra(inst);
948 rb = get_rb(inst);
949 d = get_d(inst);
950
951 switch (get_op(inst)) {
952 case 31:
953 switch (get_xop(inst)) {
954 case OP_31_XOP_LWZX:
955 case OP_31_XOP_LWBRX:
956 regs->gpr[rd] = 0xffffffff;
957 break;
958
959 case OP_31_XOP_LWZUX:
960 regs->gpr[rd] = 0xffffffff;
961 regs->gpr[ra] += regs->gpr[rb];
962 break;
963
964 case OP_31_XOP_LBZX:
965 regs->gpr[rd] = 0xff;
966 break;
967
968 case OP_31_XOP_LBZUX:
969 regs->gpr[rd] = 0xff;
970 regs->gpr[ra] += regs->gpr[rb];
971 break;
972
973 case OP_31_XOP_LHZX:
974 case OP_31_XOP_LHBRX:
975 regs->gpr[rd] = 0xffff;
976 break;
977
978 case OP_31_XOP_LHZUX:
979 regs->gpr[rd] = 0xffff;
980 regs->gpr[ra] += regs->gpr[rb];
981 break;
982
983 case OP_31_XOP_LHAX:
984 regs->gpr[rd] = ~0UL;
985 break;
986
987 case OP_31_XOP_LHAUX:
988 regs->gpr[rd] = ~0UL;
989 regs->gpr[ra] += regs->gpr[rb];
990 break;
991
992 default:
993 return 0;
994 }
995 break;
996
997 case OP_LWZ:
998 regs->gpr[rd] = 0xffffffff;
999 break;
1000
1001 case OP_LWZU:
1002 regs->gpr[rd] = 0xffffffff;
1003 regs->gpr[ra] += (s16)d;
1004 break;
1005
1006 case OP_LBZ:
1007 regs->gpr[rd] = 0xff;
1008 break;
1009
1010 case OP_LBZU:
1011 regs->gpr[rd] = 0xff;
1012 regs->gpr[ra] += (s16)d;
1013 break;
1014
1015 case OP_LHZ:
1016 regs->gpr[rd] = 0xffff;
1017 break;
1018
1019 case OP_LHZU:
1020 regs->gpr[rd] = 0xffff;
1021 regs->gpr[ra] += (s16)d;
1022 break;
1023
1024 case OP_LHA:
1025 regs->gpr[rd] = ~0UL;
1026 break;
1027
1028 case OP_LHAU:
1029 regs->gpr[rd] = ~0UL;
1030 regs->gpr[ra] += (s16)d;
1031 break;
1032
1033 default:
1034 return 0;
1035 }
1036
1037 return 1;
1038 }
1039
is_in_pci_mem_space(phys_addr_t addr)1040 static int is_in_pci_mem_space(phys_addr_t addr)
1041 {
1042 struct pci_controller *hose;
1043 struct resource *res;
1044 int i;
1045
1046 list_for_each_entry(hose, &hose_list, list_node) {
1047 if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1048 continue;
1049
1050 for (i = 0; i < 3; i++) {
1051 res = &hose->mem_resources[i];
1052 if ((res->flags & IORESOURCE_MEM) &&
1053 addr >= res->start && addr <= res->end)
1054 return 1;
1055 }
1056 }
1057 return 0;
1058 }
1059
fsl_pci_mcheck_exception(struct pt_regs * regs)1060 int fsl_pci_mcheck_exception(struct pt_regs *regs)
1061 {
1062 u32 inst;
1063 int ret;
1064 phys_addr_t addr = 0;
1065
1066 /* Let KVM/QEMU deal with the exception */
1067 if (regs->msr & MSR_GS)
1068 return 0;
1069
1070 #ifdef CONFIG_PHYS_64BIT
1071 addr = mfspr(SPRN_MCARU);
1072 addr <<= 32;
1073 #endif
1074 addr += mfspr(SPRN_MCAR);
1075
1076 if (is_in_pci_mem_space(addr)) {
1077 if (user_mode(regs))
1078 ret = copy_from_user_nofault(&inst,
1079 (void __user *)regs->nip, sizeof(inst));
1080 else
1081 ret = get_kernel_nofault(inst, (void *)regs->nip);
1082
1083 if (!ret && mcheck_handle_load(regs, inst)) {
1084 regs_add_return_ip(regs, 4);
1085 return 1;
1086 }
1087 }
1088
1089 return 0;
1090 }
1091 #endif
1092
1093 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1094 static const struct of_device_id pci_ids[] = {
1095 { .compatible = "fsl,mpc8540-pci", },
1096 { .compatible = "fsl,mpc8548-pcie", },
1097 { .compatible = "fsl,mpc8610-pci", },
1098 { .compatible = "fsl,mpc8641-pcie", },
1099 { .compatible = "fsl,qoriq-pcie", },
1100 { .compatible = "fsl,qoriq-pcie-v2.1", },
1101 { .compatible = "fsl,qoriq-pcie-v2.2", },
1102 { .compatible = "fsl,qoriq-pcie-v2.3", },
1103 { .compatible = "fsl,qoriq-pcie-v2.4", },
1104 { .compatible = "fsl,qoriq-pcie-v3.0", },
1105
1106 /*
1107 * The following entries are for compatibility with older device
1108 * trees.
1109 */
1110 { .compatible = "fsl,p1022-pcie", },
1111 { .compatible = "fsl,p4080-pcie", },
1112
1113 {},
1114 };
1115
1116 struct device_node *fsl_pci_primary;
1117
fsl_pci_assign_primary(void)1118 void __init fsl_pci_assign_primary(void)
1119 {
1120 struct device_node *np;
1121
1122 /* Callers can specify the primary bus using other means. */
1123 if (fsl_pci_primary)
1124 return;
1125
1126 /* If a PCI host bridge contains an ISA node, it's primary. */
1127 np = of_find_node_by_type(NULL, "isa");
1128 while ((fsl_pci_primary = of_get_parent(np))) {
1129 of_node_put(np);
1130 np = fsl_pci_primary;
1131
1132 if (of_match_node(pci_ids, np) && of_device_is_available(np))
1133 return;
1134 }
1135
1136 /*
1137 * If there's no PCI host bridge with ISA, arbitrarily
1138 * designate one as primary. This can go away once
1139 * various bugs with primary-less systems are fixed.
1140 */
1141 for_each_matching_node(np, pci_ids) {
1142 if (of_device_is_available(np)) {
1143 fsl_pci_primary = np;
1144 of_node_put(np);
1145 return;
1146 }
1147 }
1148 }
1149
1150 #ifdef CONFIG_PM_SLEEP
fsl_pci_pme_handle(int irq,void * dev_id)1151 static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1152 {
1153 struct pci_controller *hose = dev_id;
1154 struct ccsr_pci __iomem *pci = hose->private_data;
1155 u32 dr;
1156
1157 dr = in_be32(&pci->pex_pme_mes_dr);
1158 if (!dr)
1159 return IRQ_NONE;
1160
1161 out_be32(&pci->pex_pme_mes_dr, dr);
1162
1163 return IRQ_HANDLED;
1164 }
1165
fsl_pci_pme_probe(struct pci_controller * hose)1166 static int fsl_pci_pme_probe(struct pci_controller *hose)
1167 {
1168 struct ccsr_pci __iomem *pci;
1169 struct pci_dev *dev;
1170 int pme_irq;
1171 int res;
1172 u16 pms;
1173
1174 /* Get hose's pci_dev */
1175 dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1176
1177 /* PME Disable */
1178 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1179 pms &= ~PCI_PM_CTRL_PME_ENABLE;
1180 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1181
1182 pme_irq = irq_of_parse_and_map(hose->dn, 0);
1183 if (!pme_irq) {
1184 dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1185
1186 return -ENXIO;
1187 }
1188
1189 res = devm_request_irq(hose->parent, pme_irq,
1190 fsl_pci_pme_handle,
1191 IRQF_SHARED,
1192 "[PCI] PME", hose);
1193 if (res < 0) {
1194 dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1195 irq_dispose_mapping(pme_irq);
1196
1197 return -ENODEV;
1198 }
1199
1200 pci = hose->private_data;
1201
1202 /* Enable PTOD, ENL23D & EXL23D */
1203 clrbits32(&pci->pex_pme_mes_disr,
1204 PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1205
1206 out_be32(&pci->pex_pme_mes_ier, 0);
1207 setbits32(&pci->pex_pme_mes_ier,
1208 PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1209
1210 /* PME Enable */
1211 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1212 pms |= PCI_PM_CTRL_PME_ENABLE;
1213 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1214
1215 return 0;
1216 }
1217
send_pme_turnoff_message(struct pci_controller * hose)1218 static void send_pme_turnoff_message(struct pci_controller *hose)
1219 {
1220 struct ccsr_pci __iomem *pci = hose->private_data;
1221 u32 dr;
1222 int i;
1223
1224 /* Send PME_Turn_Off Message Request */
1225 setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1226
1227 /* Wait trun off done */
1228 for (i = 0; i < 150; i++) {
1229 dr = in_be32(&pci->pex_pme_mes_dr);
1230 if (dr) {
1231 out_be32(&pci->pex_pme_mes_dr, dr);
1232 break;
1233 }
1234
1235 udelay(1000);
1236 }
1237 }
1238
fsl_pci_syscore_do_suspend(struct pci_controller * hose)1239 static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1240 {
1241 send_pme_turnoff_message(hose);
1242 }
1243
fsl_pci_syscore_suspend(void)1244 static int fsl_pci_syscore_suspend(void)
1245 {
1246 struct pci_controller *hose, *tmp;
1247
1248 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1249 fsl_pci_syscore_do_suspend(hose);
1250
1251 return 0;
1252 }
1253
fsl_pci_syscore_do_resume(struct pci_controller * hose)1254 static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1255 {
1256 struct ccsr_pci __iomem *pci = hose->private_data;
1257 u32 dr;
1258 int i;
1259
1260 /* Send Exit L2 State Message */
1261 setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1262
1263 /* Wait exit done */
1264 for (i = 0; i < 150; i++) {
1265 dr = in_be32(&pci->pex_pme_mes_dr);
1266 if (dr) {
1267 out_be32(&pci->pex_pme_mes_dr, dr);
1268 break;
1269 }
1270
1271 udelay(1000);
1272 }
1273
1274 setup_pci_atmu(hose);
1275 }
1276
fsl_pci_syscore_resume(void)1277 static void fsl_pci_syscore_resume(void)
1278 {
1279 struct pci_controller *hose, *tmp;
1280
1281 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1282 fsl_pci_syscore_do_resume(hose);
1283 }
1284
1285 static struct syscore_ops pci_syscore_pm_ops = {
1286 .suspend = fsl_pci_syscore_suspend,
1287 .resume = fsl_pci_syscore_resume,
1288 };
1289 #endif
1290
fsl_pcibios_fixup_phb(struct pci_controller * phb)1291 void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1292 {
1293 #ifdef CONFIG_PM_SLEEP
1294 fsl_pci_pme_probe(phb);
1295 #endif
1296 }
1297
add_err_dev(struct platform_device * pdev)1298 static int add_err_dev(struct platform_device *pdev)
1299 {
1300 struct platform_device *errdev;
1301 struct mpc85xx_edac_pci_plat_data pd = {
1302 .of_node = pdev->dev.of_node
1303 };
1304
1305 errdev = platform_device_register_resndata(&pdev->dev,
1306 "mpc85xx-pci-edac",
1307 PLATFORM_DEVID_AUTO,
1308 pdev->resource,
1309 pdev->num_resources,
1310 &pd, sizeof(pd));
1311
1312 return PTR_ERR_OR_ZERO(errdev);
1313 }
1314
fsl_pci_probe(struct platform_device * pdev)1315 static int fsl_pci_probe(struct platform_device *pdev)
1316 {
1317 struct device_node *node;
1318 int ret;
1319
1320 node = pdev->dev.of_node;
1321 ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1322 if (ret)
1323 return ret;
1324
1325 ret = add_err_dev(pdev);
1326 if (ret)
1327 dev_err(&pdev->dev, "couldn't register error device: %d\n",
1328 ret);
1329
1330 return 0;
1331 }
1332
1333 static struct platform_driver fsl_pci_driver = {
1334 .driver = {
1335 .name = "fsl-pci",
1336 .of_match_table = pci_ids,
1337 },
1338 .probe = fsl_pci_probe,
1339 };
1340
fsl_pci_init(void)1341 static int __init fsl_pci_init(void)
1342 {
1343 #ifdef CONFIG_PM_SLEEP
1344 register_syscore_ops(&pci_syscore_pm_ops);
1345 #endif
1346 return platform_driver_register(&fsl_pci_driver);
1347 }
1348 arch_initcall(fsl_pci_init);
1349 #endif
1350