1 /*
2  *	linux/arch/alpha/kernel/pci.c
3  *
4  * Extruded from code written by
5  *	Dave Rusling (david.rusling@reo.mts.dec.com)
6  *	David Mosberger (davidm@cs.arizona.edu)
7  */
8 
9 /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
10 
11 /*
12  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13  *	     PCI-PCI bridges cleanup
14  */
15 #include <linux/string.h>
16 #include <linux/pci.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/kernel.h>
20 #include <linux/bootmem.h>
21 #include <linux/module.h>
22 #include <linux/cache.h>
23 #include <linux/slab.h>
24 #include <asm/machvec.h>
25 
26 #include "proto.h"
27 #include "pci_impl.h"
28 
29 
30 /*
31  * Some string constants used by the various core logics.
32  */
33 
34 const char *const pci_io_names[] = {
35   "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
36   "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
37 };
38 
39 const char *const pci_mem_names[] = {
40   "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
41   "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
42 };
43 
44 const char pci_hae0_name[] = "HAE0";
45 
46 /*
47  * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
48  * assignments.
49  */
50 
51 /*
52  * The PCI controller list.
53  */
54 
55 struct pci_controller *hose_head, **hose_tail = &hose_head;
56 struct pci_controller *pci_isa_hose;
57 
58 /*
59  * Quirks.
60  */
61 
62 static void __init
quirk_isa_bridge(struct pci_dev * dev)63 quirk_isa_bridge(struct pci_dev *dev)
64 {
65 	dev->class = PCI_CLASS_BRIDGE_ISA << 8;
66 }
67 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
68 
69 static void __init
quirk_cypress(struct pci_dev * dev)70 quirk_cypress(struct pci_dev *dev)
71 {
72 	/* The Notorious Cy82C693 chip.  */
73 
74 	/* The generic legacy mode IDE fixup in drivers/pci/probe.c
75 	   doesn't work correctly with the Cypress IDE controller as
76 	   it has non-standard register layout.  Fix that.  */
77 	if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
78 		dev->resource[2].start = dev->resource[3].start = 0;
79 		dev->resource[2].end = dev->resource[3].end = 0;
80 		dev->resource[2].flags = dev->resource[3].flags = 0;
81 		if (PCI_FUNC(dev->devfn) == 2) {
82 			dev->resource[0].start = 0x170;
83 			dev->resource[0].end = 0x177;
84 			dev->resource[1].start = 0x376;
85 			dev->resource[1].end = 0x376;
86 		}
87 	}
88 
89 	/* The Cypress bridge responds on the PCI bus in the address range
90 	   0xffff0000-0xffffffff (conventional x86 BIOS ROM).  There is no
91 	   way to turn this off.  The bridge also supports several extended
92 	   BIOS ranges (disabled after power-up), and some consoles do turn
93 	   them on.  So if we use a large direct-map window, or a large SG
94 	   window, we must avoid the entire 0xfff00000-0xffffffff region.  */
95 	if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
96 		if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
97 			__direct_map_size = 0xfff00000UL - __direct_map_base;
98 		else {
99 			struct pci_controller *hose = dev->sysdata;
100 			struct pci_iommu_arena *pci = hose->sg_pci;
101 			if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
102 				pci->size = 0xfff00000UL - pci->dma_base;
103 		}
104 	}
105 }
106 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
107 
108 /* Called for each device after PCI setup is done. */
109 static void __init
pcibios_fixup_final(struct pci_dev * dev)110 pcibios_fixup_final(struct pci_dev *dev)
111 {
112 	unsigned int class = dev->class >> 8;
113 
114 	if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
115 		dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
116 		isa_bridge = dev;
117 	}
118 }
119 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
120 
121 /* Just declaring that the power-of-ten prefixes are actually the
122    power-of-two ones doesn't make it true :) */
123 #define KB			1024
124 #define MB			(1024*KB)
125 #define GB			(1024*MB)
126 
127 resource_size_t
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)128 pcibios_align_resource(void *data, const struct resource *res,
129 		       resource_size_t size, resource_size_t align)
130 {
131 	struct pci_dev *dev = data;
132 	struct pci_controller *hose = dev->sysdata;
133 	unsigned long alignto;
134 	resource_size_t start = res->start;
135 
136 	if (res->flags & IORESOURCE_IO) {
137 		/* Make sure we start at our min on all hoses */
138 		if (start - hose->io_space->start < PCIBIOS_MIN_IO)
139 			start = PCIBIOS_MIN_IO + hose->io_space->start;
140 
141 		/*
142 		 * Put everything into 0x00-0xff region modulo 0x400
143 		 */
144 		if (start & 0x300)
145 			start = (start + 0x3ff) & ~0x3ff;
146 	}
147 	else if	(res->flags & IORESOURCE_MEM) {
148 		/* Make sure we start at our min on all hoses */
149 		if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
150 			start = PCIBIOS_MIN_MEM + hose->mem_space->start;
151 
152 		/*
153 		 * The following holds at least for the Low Cost
154 		 * Alpha implementation of the PCI interface:
155 		 *
156 		 * In sparse memory address space, the first
157 		 * octant (16MB) of every 128MB segment is
158 		 * aliased to the very first 16 MB of the
159 		 * address space (i.e., it aliases the ISA
160 		 * memory address space).  Thus, we try to
161 		 * avoid allocating PCI devices in that range.
162 		 * Can be allocated in 2nd-7th octant only.
163 		 * Devices that need more than 112MB of
164 		 * address space must be accessed through
165 		 * dense memory space only!
166 		 */
167 
168 		/* Align to multiple of size of minimum base.  */
169 		alignto = max_t(resource_size_t, 0x1000, align);
170 		start = ALIGN(start, alignto);
171 		if (hose->sparse_mem_base && size <= 7 * 16*MB) {
172 			if (((start / (16*MB)) & 0x7) == 0) {
173 				start &= ~(128*MB - 1);
174 				start += 16*MB;
175 				start  = ALIGN(start, alignto);
176 			}
177 			if (start/(128*MB) != (start + size - 1)/(128*MB)) {
178 				start &= ~(128*MB - 1);
179 				start += (128 + 16)*MB;
180 				start  = ALIGN(start, alignto);
181 			}
182 		}
183 	}
184 
185 	return start;
186 }
187 #undef KB
188 #undef MB
189 #undef GB
190 
191 static int __init
pcibios_init(void)192 pcibios_init(void)
193 {
194 	if (alpha_mv.init_pci)
195 		alpha_mv.init_pci();
196 	return 0;
197 }
198 
199 subsys_initcall(pcibios_init);
200 
201 char * __devinit
pcibios_setup(char * str)202 pcibios_setup(char *str)
203 {
204 	return str;
205 }
206 
207 #ifdef ALPHA_RESTORE_SRM_SETUP
208 static struct pdev_srm_saved_conf *srm_saved_configs;
209 
210 void __devinit
pdev_save_srm_config(struct pci_dev * dev)211 pdev_save_srm_config(struct pci_dev *dev)
212 {
213 	struct pdev_srm_saved_conf *tmp;
214 	static int printed = 0;
215 
216 	if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY))
217 		return;
218 
219 	if (!printed) {
220 		printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
221 		printed = 1;
222 	}
223 
224 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
225 	if (!tmp) {
226 		printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
227 		return;
228 	}
229 	tmp->next = srm_saved_configs;
230 	tmp->dev = dev;
231 
232 	pci_save_state(dev);
233 
234 	srm_saved_configs = tmp;
235 }
236 
237 void
pci_restore_srm_config(void)238 pci_restore_srm_config(void)
239 {
240 	struct pdev_srm_saved_conf *tmp;
241 
242 	/* No need to restore if probed only. */
243 	if (pci_has_flag(PCI_PROBE_ONLY))
244 		return;
245 
246 	/* Restore SRM config. */
247 	for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
248 		pci_restore_state(tmp->dev);
249 	}
250 }
251 #endif
252 
253 void __devinit
pcibios_fixup_bus(struct pci_bus * bus)254 pcibios_fixup_bus(struct pci_bus *bus)
255 {
256 	struct pci_dev *dev = bus->self;
257 
258 	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
259  		   (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
260  		pci_read_bridge_bases(bus);
261 	}
262 
263 	list_for_each_entry(dev, &bus->devices, bus_list) {
264 		pdev_save_srm_config(dev);
265 	}
266 }
267 
268 void __init
pcibios_update_irq(struct pci_dev * dev,int irq)269 pcibios_update_irq(struct pci_dev *dev, int irq)
270 {
271 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
272 }
273 
274 int
pcibios_enable_device(struct pci_dev * dev,int mask)275 pcibios_enable_device(struct pci_dev *dev, int mask)
276 {
277 	return pci_enable_resources(dev, mask);
278 }
279 
280 /*
281  *  If we set up a device for bus mastering, we need to check the latency
282  *  timer as certain firmware forgets to set it properly, as seen
283  *  on SX164 and LX164 with SRM.
284  */
285 void
pcibios_set_master(struct pci_dev * dev)286 pcibios_set_master(struct pci_dev *dev)
287 {
288 	u8 lat;
289 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
290 	if (lat >= 16) return;
291 	printk("PCI: Setting latency timer of device %s to 64\n",
292 							pci_name(dev));
293 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
294 }
295 
296 void __init
pcibios_claim_one_bus(struct pci_bus * b)297 pcibios_claim_one_bus(struct pci_bus *b)
298 {
299 	struct pci_dev *dev;
300 	struct pci_bus *child_bus;
301 
302 	list_for_each_entry(dev, &b->devices, bus_list) {
303 		int i;
304 
305 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
306 			struct resource *r = &dev->resource[i];
307 
308 			if (r->parent || !r->start || !r->flags)
309 				continue;
310 			if (pci_has_flag(PCI_PROBE_ONLY) ||
311 			    (r->flags & IORESOURCE_PCI_FIXED))
312 				pci_claim_resource(dev, i);
313 		}
314 	}
315 
316 	list_for_each_entry(child_bus, &b->children, node)
317 		pcibios_claim_one_bus(child_bus);
318 }
319 
320 static void __init
pcibios_claim_console_setup(void)321 pcibios_claim_console_setup(void)
322 {
323 	struct pci_bus *b;
324 
325 	list_for_each_entry(b, &pci_root_buses, node)
326 		pcibios_claim_one_bus(b);
327 }
328 
329 void __init
common_init_pci(void)330 common_init_pci(void)
331 {
332 	struct pci_controller *hose;
333 	struct list_head resources;
334 	struct pci_bus *bus;
335 	int next_busno;
336 	int need_domain_info = 0;
337 	u32 pci_mem_end;
338 	u32 sg_base;
339 	unsigned long end;
340 
341 	/* Scan all of the recorded PCI controllers.  */
342 	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
343 		sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
344 
345 		/* Adjust hose mem_space limit to prevent PCI allocations
346 		   in the iommu windows. */
347 		pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
348 		end = hose->mem_space->start + pci_mem_end;
349 		if (hose->mem_space->end > end)
350 			hose->mem_space->end = end;
351 
352 		INIT_LIST_HEAD(&resources);
353 		pci_add_resource_offset(&resources, hose->io_space,
354 					hose->io_space->start);
355 		pci_add_resource_offset(&resources, hose->mem_space,
356 					hose->mem_space->start);
357 
358 		bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops,
359 					hose, &resources);
360 		hose->bus = bus;
361 		hose->need_domain_info = need_domain_info;
362 		next_busno = bus->subordinate + 1;
363 		/* Don't allow 8-bit bus number overflow inside the hose -
364 		   reserve some space for bridges. */
365 		if (next_busno > 224) {
366 			next_busno = 0;
367 			need_domain_info = 1;
368 		}
369 	}
370 
371 	pcibios_claim_console_setup();
372 
373 	pci_assign_unassigned_resources();
374 	pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
375 }
376 
377 
378 struct pci_controller * __init
alloc_pci_controller(void)379 alloc_pci_controller(void)
380 {
381 	struct pci_controller *hose;
382 
383 	hose = alloc_bootmem(sizeof(*hose));
384 
385 	*hose_tail = hose;
386 	hose_tail = &hose->next;
387 
388 	return hose;
389 }
390 
391 struct resource * __init
alloc_resource(void)392 alloc_resource(void)
393 {
394 	struct resource *res;
395 
396 	res = alloc_bootmem(sizeof(*res));
397 
398 	return res;
399 }
400 
401 
402 /* Provide information on locations of various I/O regions in physical
403    memory.  Do this on a per-card basis so that we choose the right hose.  */
404 
405 asmlinkage long
sys_pciconfig_iobase(long which,unsigned long bus,unsigned long dfn)406 sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
407 {
408 	struct pci_controller *hose;
409 	struct pci_dev *dev;
410 
411 	/* from hose or from bus.devfn */
412 	if (which & IOBASE_FROM_HOSE) {
413 		for(hose = hose_head; hose; hose = hose->next)
414 			if (hose->index == bus) break;
415 		if (!hose) return -ENODEV;
416 	} else {
417 		/* Special hook for ISA access.  */
418 		if (bus == 0 && dfn == 0) {
419 			hose = pci_isa_hose;
420 		} else {
421 			dev = pci_get_bus_and_slot(bus, dfn);
422 			if (!dev)
423 				return -ENODEV;
424 			hose = dev->sysdata;
425 			pci_dev_put(dev);
426 		}
427 	}
428 
429 	switch (which & ~IOBASE_FROM_HOSE) {
430 	case IOBASE_HOSE:
431 		return hose->index;
432 	case IOBASE_SPARSE_MEM:
433 		return hose->sparse_mem_base;
434 	case IOBASE_DENSE_MEM:
435 		return hose->dense_mem_base;
436 	case IOBASE_SPARSE_IO:
437 		return hose->sparse_io_base;
438 	case IOBASE_DENSE_IO:
439 		return hose->dense_io_base;
440 	case IOBASE_ROOT_BUS:
441 		return hose->bus->number;
442 	}
443 
444 	return -EOPNOTSUPP;
445 }
446 
447 /* Destroy an __iomem token.  Not copied from lib/iomap.c.  */
448 
pci_iounmap(struct pci_dev * dev,void __iomem * addr)449 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
450 {
451 	if (__is_mmio(addr))
452 		iounmap(addr);
453 }
454 
455 EXPORT_SYMBOL(pci_iounmap);
456 
457 /* FIXME: Some boxes have multiple ISA bridges! */
458 struct pci_dev *isa_bridge;
459 EXPORT_SYMBOL(isa_bridge);
460