1 /*
2  *	drivers/pci/setup-bus.c
3  *
4  * Extruded from code written by
5  *      Dave Rusling (david.rusling@reo.mts.dec.com)
6  *      David Mosberger (davidm@cs.arizona.edu)
7  *	David Miller (davem@redhat.com)
8  *
9  * Support routines for initializing a PCI subsystem.
10  */
11 
12 /*
13  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14  *	     PCI-PCI bridges cleanup, sorted resource allocation.
15  * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16  *	     Converted to allocation in 3 passes, which gives
17  *	     tighter packing. Prefetchable range support.
18  */
19 
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
28 #include "pci.h"
29 
30 struct resource_list_x {
31 	struct resource_list_x *next;
32 	struct resource *res;
33 	struct pci_dev *dev;
34 	resource_size_t start;
35 	resource_size_t end;
36 	resource_size_t add_size;
37 	unsigned long flags;
38 };
39 
40 #define free_list(type, head) do {                      \
41 	struct type *list, *tmp;			\
42 	for (list = (head)->next; list;) {		\
43 		tmp = list;				\
44 		list = list->next;			\
45 		kfree(tmp);				\
46 	}						\
47 	(head)->next = NULL;				\
48 } while (0)
49 
50 /**
51  * add_to_list() - add a new resource tracker to the list
52  * @head:	Head of the list
53  * @dev:	device corresponding to which the resource
54  *		belongs
55  * @res:	The resource to be tracked
56  * @add_size:	additional size to be optionally added
57  *              to the resource
58  */
add_to_list(struct resource_list_x * head,struct pci_dev * dev,struct resource * res,resource_size_t add_size)59 static void add_to_list(struct resource_list_x *head,
60 		 struct pci_dev *dev, struct resource *res,
61 		 resource_size_t add_size)
62 {
63 	struct resource_list_x *list = head;
64 	struct resource_list_x *ln = list->next;
65 	struct resource_list_x *tmp;
66 
67 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
68 	if (!tmp) {
69 		pr_warning("add_to_list: kmalloc() failed!\n");
70 		return;
71 	}
72 
73 	tmp->next = ln;
74 	tmp->res = res;
75 	tmp->dev = dev;
76 	tmp->start = res->start;
77 	tmp->end = res->end;
78 	tmp->flags = res->flags;
79 	tmp->add_size = add_size;
80 	list->next = tmp;
81 }
82 
add_to_failed_list(struct resource_list_x * head,struct pci_dev * dev,struct resource * res)83 static void add_to_failed_list(struct resource_list_x *head,
84 				struct pci_dev *dev, struct resource *res)
85 {
86 	add_to_list(head, dev, res, 0);
87 }
88 
__dev_sort_resources(struct pci_dev * dev,struct resource_list * head)89 static void __dev_sort_resources(struct pci_dev *dev,
90 				 struct resource_list *head)
91 {
92 	u16 class = dev->class >> 8;
93 
94 	/* Don't touch classless devices or host bridges or ioapics.  */
95 	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
96 		return;
97 
98 	/* Don't touch ioapic devices already enabled by firmware */
99 	if (class == PCI_CLASS_SYSTEM_PIC) {
100 		u16 command;
101 		pci_read_config_word(dev, PCI_COMMAND, &command);
102 		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
103 			return;
104 	}
105 
106 	pdev_sort_resources(dev, head);
107 }
108 
reset_resource(struct resource * res)109 static inline void reset_resource(struct resource *res)
110 {
111 	res->start = 0;
112 	res->end = 0;
113 	res->flags = 0;
114 }
115 
116 /**
117  * adjust_resources_sorted() - satisfy any additional resource requests
118  *
119  * @add_head : head of the list tracking requests requiring additional
120  *             resources
121  * @head     : head of the list tracking requests with allocated
122  *             resources
123  *
124  * Walk through each element of the add_head and try to procure
125  * additional resources for the element, provided the element
126  * is in the head list.
127  */
adjust_resources_sorted(struct resource_list_x * add_head,struct resource_list * head)128 static void adjust_resources_sorted(struct resource_list_x *add_head,
129 		struct resource_list *head)
130 {
131 	struct resource *res;
132 	struct resource_list_x *list, *tmp, *prev;
133 	struct resource_list *hlist;
134 	resource_size_t add_size;
135 	int idx;
136 
137 	prev = add_head;
138 	for (list = add_head->next; list;) {
139 		res = list->res;
140 		/* skip resource that has been reset */
141 		if (!res->flags)
142 			goto out;
143 
144 		/* skip this resource if not found in head list */
145 		for (hlist = head->next; hlist && hlist->res != res;
146 				hlist = hlist->next);
147 		if (!hlist) { /* just skip */
148 			prev = list;
149 			list = list->next;
150 			continue;
151 		}
152 
153 		idx = res - &list->dev->resource[0];
154 		add_size=list->add_size;
155 		if (!resource_size(res) && add_size) {
156 			 res->end = res->start + add_size - 1;
157 			 if(pci_assign_resource(list->dev, idx))
158 				reset_resource(res);
159 		} else if (add_size) {
160 			adjust_resource(res, res->start,
161 				resource_size(res) + add_size);
162 		}
163 out:
164 		tmp = list;
165 		prev->next = list = list->next;
166 		kfree(tmp);
167 	}
168 }
169 
170 /**
171  * assign_requested_resources_sorted() - satisfy resource requests
172  *
173  * @head : head of the list tracking requests for resources
174  * @failed_list : head of the list tracking requests that could
175  *		not be allocated
176  *
177  * Satisfy resource requests of each element in the list. Add
178  * requests that could not satisfied to the failed_list.
179  */
assign_requested_resources_sorted(struct resource_list * head,struct resource_list_x * fail_head)180 static void assign_requested_resources_sorted(struct resource_list *head,
181 				 struct resource_list_x *fail_head)
182 {
183 	struct resource *res;
184 	struct resource_list *list;
185 	int idx;
186 
187 	for (list = head->next; list; list = list->next) {
188 		res = list->res;
189 		idx = res - &list->dev->resource[0];
190 		if (resource_size(res) && pci_assign_resource(list->dev, idx)) {
191 			if (fail_head && !pci_is_root_bus(list->dev->bus)) {
192 				/*
193 				 * if the failed res is for ROM BAR, and it will
194 				 * be enabled later, don't add it to the list
195 				 */
196 				if (!((idx == PCI_ROM_RESOURCE) &&
197 				      (!(res->flags & IORESOURCE_ROM_ENABLE))))
198 					add_to_failed_list(fail_head, list->dev, res);
199 			}
200 			reset_resource(res);
201 		}
202 	}
203 }
204 
__assign_resources_sorted(struct resource_list * head,struct resource_list_x * add_head,struct resource_list_x * fail_head)205 static void __assign_resources_sorted(struct resource_list *head,
206 				 struct resource_list_x *add_head,
207 				 struct resource_list_x *fail_head)
208 {
209 	/* Satisfy the must-have resource requests */
210 	assign_requested_resources_sorted(head, fail_head);
211 
212 	/* Try to satisfy any additional nice-to-have resource
213 		requests */
214 	if (add_head)
215 		adjust_resources_sorted(add_head, head);
216 	free_list(resource_list, head);
217 }
218 
pdev_assign_resources_sorted(struct pci_dev * dev,struct resource_list_x * fail_head)219 static void pdev_assign_resources_sorted(struct pci_dev *dev,
220 				 struct resource_list_x *fail_head)
221 {
222 	struct resource_list head;
223 
224 	head.next = NULL;
225 	__dev_sort_resources(dev, &head);
226 	__assign_resources_sorted(&head, NULL, fail_head);
227 
228 }
229 
pbus_assign_resources_sorted(const struct pci_bus * bus,struct resource_list_x * add_head,struct resource_list_x * fail_head)230 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
231 					 struct resource_list_x *add_head,
232 					 struct resource_list_x *fail_head)
233 {
234 	struct pci_dev *dev;
235 	struct resource_list head;
236 
237 	head.next = NULL;
238 	list_for_each_entry(dev, &bus->devices, bus_list)
239 		__dev_sort_resources(dev, &head);
240 
241 	__assign_resources_sorted(&head, add_head, fail_head);
242 }
243 
pci_setup_cardbus(struct pci_bus * bus)244 void pci_setup_cardbus(struct pci_bus *bus)
245 {
246 	struct pci_dev *bridge = bus->self;
247 	struct resource *res;
248 	struct pci_bus_region region;
249 
250 	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
251 		 bus->secondary, bus->subordinate);
252 
253 	res = bus->resource[0];
254 	pcibios_resource_to_bus(bridge, &region, res);
255 	if (res->flags & IORESOURCE_IO) {
256 		/*
257 		 * The IO resource is allocated a range twice as large as it
258 		 * would normally need.  This allows us to set both IO regs.
259 		 */
260 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
261 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
262 					region.start);
263 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
264 					region.end);
265 	}
266 
267 	res = bus->resource[1];
268 	pcibios_resource_to_bus(bridge, &region, res);
269 	if (res->flags & IORESOURCE_IO) {
270 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
271 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
272 					region.start);
273 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
274 					region.end);
275 	}
276 
277 	res = bus->resource[2];
278 	pcibios_resource_to_bus(bridge, &region, res);
279 	if (res->flags & IORESOURCE_MEM) {
280 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
281 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
282 					region.start);
283 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
284 					region.end);
285 	}
286 
287 	res = bus->resource[3];
288 	pcibios_resource_to_bus(bridge, &region, res);
289 	if (res->flags & IORESOURCE_MEM) {
290 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
291 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
292 					region.start);
293 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
294 					region.end);
295 	}
296 }
297 EXPORT_SYMBOL(pci_setup_cardbus);
298 
299 /* Initialize bridges with base/limit values we have collected.
300    PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
301    requires that if there is no I/O ports or memory behind the
302    bridge, corresponding range must be turned off by writing base
303    value greater than limit to the bridge's base/limit registers.
304 
305    Note: care must be taken when updating I/O base/limit registers
306    of bridges which support 32-bit I/O. This update requires two
307    config space writes, so it's quite possible that an I/O window of
308    the bridge will have some undesirable address (e.g. 0) after the
309    first write. Ditto 64-bit prefetchable MMIO.  */
pci_setup_bridge_io(struct pci_bus * bus)310 static void pci_setup_bridge_io(struct pci_bus *bus)
311 {
312 	struct pci_dev *bridge = bus->self;
313 	struct resource *res;
314 	struct pci_bus_region region;
315 	u32 l, io_upper16;
316 
317 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
318 	res = bus->resource[0];
319 	pcibios_resource_to_bus(bridge, &region, res);
320 	if (res->flags & IORESOURCE_IO) {
321 		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
322 		l &= 0xffff0000;
323 		l |= (region.start >> 8) & 0x00f0;
324 		l |= region.end & 0xf000;
325 		/* Set up upper 16 bits of I/O base/limit. */
326 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
327 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
328 	} else {
329 		/* Clear upper 16 bits of I/O base/limit. */
330 		io_upper16 = 0;
331 		l = 0x00f0;
332 		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
333 	}
334 	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
335 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
336 	/* Update lower 16 bits of I/O base/limit. */
337 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
338 	/* Update upper 16 bits of I/O base/limit. */
339 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
340 }
341 
pci_setup_bridge_mmio(struct pci_bus * bus)342 static void pci_setup_bridge_mmio(struct pci_bus *bus)
343 {
344 	struct pci_dev *bridge = bus->self;
345 	struct resource *res;
346 	struct pci_bus_region region;
347 	u32 l;
348 
349 	/* Set up the top and bottom of the PCI Memory segment for this bus. */
350 	res = bus->resource[1];
351 	pcibios_resource_to_bus(bridge, &region, res);
352 	if (res->flags & IORESOURCE_MEM) {
353 		l = (region.start >> 16) & 0xfff0;
354 		l |= region.end & 0xfff00000;
355 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
356 	} else {
357 		l = 0x0000fff0;
358 		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
359 	}
360 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
361 }
362 
pci_setup_bridge_mmio_pref(struct pci_bus * bus)363 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
364 {
365 	struct pci_dev *bridge = bus->self;
366 	struct resource *res;
367 	struct pci_bus_region region;
368 	u32 l, bu, lu;
369 
370 	/* Clear out the upper 32 bits of PREF limit.
371 	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
372 	   disables PREF range, which is ok. */
373 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
374 
375 	/* Set up PREF base/limit. */
376 	bu = lu = 0;
377 	res = bus->resource[2];
378 	pcibios_resource_to_bus(bridge, &region, res);
379 	if (res->flags & IORESOURCE_PREFETCH) {
380 		l = (region.start >> 16) & 0xfff0;
381 		l |= region.end & 0xfff00000;
382 		if (res->flags & IORESOURCE_MEM_64) {
383 			bu = upper_32_bits(region.start);
384 			lu = upper_32_bits(region.end);
385 		}
386 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
387 	} else {
388 		l = 0x0000fff0;
389 		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
390 	}
391 	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
392 
393 	/* Set the upper 32 bits of PREF base & limit. */
394 	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
395 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
396 }
397 
__pci_setup_bridge(struct pci_bus * bus,unsigned long type)398 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
399 {
400 	struct pci_dev *bridge = bus->self;
401 
402 	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
403 		 bus->secondary, bus->subordinate);
404 
405 	if (type & IORESOURCE_IO)
406 		pci_setup_bridge_io(bus);
407 
408 	if (type & IORESOURCE_MEM)
409 		pci_setup_bridge_mmio(bus);
410 
411 	if (type & IORESOURCE_PREFETCH)
412 		pci_setup_bridge_mmio_pref(bus);
413 
414 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
415 }
416 
pci_setup_bridge(struct pci_bus * bus)417 static void pci_setup_bridge(struct pci_bus *bus)
418 {
419 	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
420 				  IORESOURCE_PREFETCH;
421 
422 	__pci_setup_bridge(bus, type);
423 }
424 
425 /* Check whether the bridge supports optional I/O and
426    prefetchable memory ranges. If not, the respective
427    base/limit registers must be read-only and read as 0. */
pci_bridge_check_ranges(struct pci_bus * bus)428 static void pci_bridge_check_ranges(struct pci_bus *bus)
429 {
430 	u16 io;
431 	u32 pmem;
432 	struct pci_dev *bridge = bus->self;
433 	struct resource *b_res;
434 
435 	b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
436 	b_res[1].flags |= IORESOURCE_MEM;
437 
438 	pci_read_config_word(bridge, PCI_IO_BASE, &io);
439 	if (!io) {
440 		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
441 		pci_read_config_word(bridge, PCI_IO_BASE, &io);
442  		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
443  	}
444  	if (io)
445 		b_res[0].flags |= IORESOURCE_IO;
446 	/*  DECchip 21050 pass 2 errata: the bridge may miss an address
447 	    disconnect boundary by one PCI data phase.
448 	    Workaround: do not use prefetching on this device. */
449 	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
450 		return;
451 	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
452 	if (!pmem) {
453 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
454 					       0xfff0fff0);
455 		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
456 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
457 	}
458 	if (pmem) {
459 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
460 		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
461 		    PCI_PREF_RANGE_TYPE_64) {
462 			b_res[2].flags |= IORESOURCE_MEM_64;
463 			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
464 		}
465 	}
466 
467 	/* double check if bridge does support 64 bit pref */
468 	if (b_res[2].flags & IORESOURCE_MEM_64) {
469 		u32 mem_base_hi, tmp;
470 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
471 					 &mem_base_hi);
472 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
473 					       0xffffffff);
474 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
475 		if (!tmp)
476 			b_res[2].flags &= ~IORESOURCE_MEM_64;
477 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
478 				       mem_base_hi);
479 	}
480 }
481 
482 /* Helper function for sizing routines: find first available
483    bus resource of a given type. Note: we intentionally skip
484    the bus resources which have already been assigned (that is,
485    have non-NULL parent resource). */
find_free_bus_resource(struct pci_bus * bus,unsigned long type)486 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
487 {
488 	int i;
489 	struct resource *r;
490 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
491 				  IORESOURCE_PREFETCH;
492 
493 	pci_bus_for_each_resource(bus, r, i) {
494 		if (r == &ioport_resource || r == &iomem_resource)
495 			continue;
496 		if (r && (r->flags & type_mask) == type && !r->parent)
497 			return r;
498 	}
499 	return NULL;
500 }
501 
calculate_iosize(resource_size_t size,resource_size_t min_size,resource_size_t size1,resource_size_t old_size,resource_size_t align)502 static resource_size_t calculate_iosize(resource_size_t size,
503 		resource_size_t min_size,
504 		resource_size_t size1,
505 		resource_size_t old_size,
506 		resource_size_t align)
507 {
508 	if (size < min_size)
509 		size = min_size;
510 	if (old_size == 1 )
511 		old_size = 0;
512 	/* To be fixed in 2.5: we should have sort of HAVE_ISA
513 	   flag in the struct pci_bus. */
514 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
515 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
516 #endif
517 	size = ALIGN(size + size1, align);
518 	if (size < old_size)
519 		size = old_size;
520 	return size;
521 }
522 
calculate_memsize(resource_size_t size,resource_size_t min_size,resource_size_t size1,resource_size_t old_size,resource_size_t align)523 static resource_size_t calculate_memsize(resource_size_t size,
524 		resource_size_t min_size,
525 		resource_size_t size1,
526 		resource_size_t old_size,
527 		resource_size_t align)
528 {
529 	if (size < min_size)
530 		size = min_size;
531 	if (old_size == 1 )
532 		old_size = 0;
533 	if (size < old_size)
534 		size = old_size;
535 	size = ALIGN(size + size1, align);
536 	return size;
537 }
538 
539 /**
540  * pbus_size_io() - size the io window of a given bus
541  *
542  * @bus : the bus
543  * @min_size : the minimum io window that must to be allocated
544  * @add_size : additional optional io window
545  * @add_head : track the additional io window on this list
546  *
547  * Sizing the IO windows of the PCI-PCI bridge is trivial,
548  * since these windows have 4K granularity and the IO ranges
549  * of non-bridge PCI devices are limited to 256 bytes.
550  * We must be careful with the ISA aliasing though.
551  */
pbus_size_io(struct pci_bus * bus,resource_size_t min_size,resource_size_t add_size,struct resource_list_x * add_head)552 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
553 		resource_size_t add_size, struct resource_list_x *add_head)
554 {
555 	struct pci_dev *dev;
556 	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
557 	unsigned long size = 0, size0 = 0, size1 = 0;
558 
559 	if (!b_res)
560  		return;
561 
562 	list_for_each_entry(dev, &bus->devices, bus_list) {
563 		int i;
564 
565 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
566 			struct resource *r = &dev->resource[i];
567 			unsigned long r_size;
568 
569 			if (r->parent || !(r->flags & IORESOURCE_IO))
570 				continue;
571 			r_size = resource_size(r);
572 
573 			if (r_size < 0x400)
574 				/* Might be re-aligned for ISA */
575 				size += r_size;
576 			else
577 				size1 += r_size;
578 		}
579 	}
580 	size0 = calculate_iosize(size, min_size, size1,
581 			resource_size(b_res), 4096);
582 	size1 = (!add_head || (add_head && !add_size)) ? size0 :
583 		calculate_iosize(size, min_size+add_size, size1,
584 			resource_size(b_res), 4096);
585 	if (!size0 && !size1) {
586 		if (b_res->start || b_res->end)
587 			dev_info(&bus->self->dev, "disabling bridge window "
588 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
589 				 bus->secondary, bus->subordinate);
590 		b_res->flags = 0;
591 		return;
592 	}
593 	/* Alignment of the IO window is always 4K */
594 	b_res->start = 4096;
595 	b_res->end = b_res->start + size0 - 1;
596 	b_res->flags |= IORESOURCE_STARTALIGN;
597 	if (size1 > size0 && add_head)
598 		add_to_list(add_head, bus->self, b_res, size1-size0);
599 }
600 
601 /**
602  * pbus_size_mem() - size the memory window of a given bus
603  *
604  * @bus : the bus
605  * @min_size : the minimum memory window that must to be allocated
606  * @add_size : additional optional memory window
607  * @add_head : track the additional memory window on this list
608  *
609  * Calculate the size of the bus and minimal alignment which
610  * guarantees that all child resources fit in this size.
611  */
pbus_size_mem(struct pci_bus * bus,unsigned long mask,unsigned long type,resource_size_t min_size,resource_size_t add_size,struct resource_list_x * add_head)612 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
613 			 unsigned long type, resource_size_t min_size,
614 			resource_size_t add_size,
615 			struct resource_list_x *add_head)
616 {
617 	struct pci_dev *dev;
618 	resource_size_t min_align, align, size, size0, size1;
619 	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
620 	int order, max_order;
621 	struct resource *b_res = find_free_bus_resource(bus, type);
622 	unsigned int mem64_mask = 0;
623 
624 	if (!b_res)
625 		return 0;
626 
627 	memset(aligns, 0, sizeof(aligns));
628 	max_order = 0;
629 	size = 0;
630 
631 	mem64_mask = b_res->flags & IORESOURCE_MEM_64;
632 	b_res->flags &= ~IORESOURCE_MEM_64;
633 
634 	list_for_each_entry(dev, &bus->devices, bus_list) {
635 		int i;
636 
637 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
638 			struct resource *r = &dev->resource[i];
639 			resource_size_t r_size;
640 
641 			if (r->parent || (r->flags & mask) != type)
642 				continue;
643 			r_size = resource_size(r);
644 			/* For bridges size != alignment */
645 			align = pci_resource_alignment(dev, r);
646 			order = __ffs(align) - 20;
647 			if (order > 11) {
648 				dev_warn(&dev->dev, "disabling BAR %d: %pR "
649 					 "(bad alignment %#llx)\n", i, r,
650 					 (unsigned long long) align);
651 				r->flags = 0;
652 				continue;
653 			}
654 			size += r_size;
655 			if (order < 0)
656 				order = 0;
657 			/* Exclude ranges with size > align from
658 			   calculation of the alignment. */
659 			if (r_size == align)
660 				aligns[order] += align;
661 			if (order > max_order)
662 				max_order = order;
663 			mem64_mask &= r->flags & IORESOURCE_MEM_64;
664 		}
665 	}
666 	align = 0;
667 	min_align = 0;
668 	for (order = 0; order <= max_order; order++) {
669 		resource_size_t align1 = 1;
670 
671 		align1 <<= (order + 20);
672 
673 		if (!align)
674 			min_align = align1;
675 		else if (ALIGN(align + min_align, min_align) < align1)
676 			min_align = align1 >> 1;
677 		align += aligns[order];
678 	}
679 	size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
680 	size1 = (!add_head || (add_head && !add_size)) ? size0 :
681 		calculate_memsize(size, min_size+add_size, 0,
682 				resource_size(b_res), min_align);
683 	if (!size0 && !size1) {
684 		if (b_res->start || b_res->end)
685 			dev_info(&bus->self->dev, "disabling bridge window "
686 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
687 				 bus->secondary, bus->subordinate);
688 		b_res->flags = 0;
689 		return 1;
690 	}
691 	b_res->start = min_align;
692 	b_res->end = size0 + min_align - 1;
693 	b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
694 	if (size1 > size0 && add_head)
695 		add_to_list(add_head, bus->self, b_res, size1-size0);
696 	return 1;
697 }
698 
pci_bus_size_cardbus(struct pci_bus * bus)699 static void pci_bus_size_cardbus(struct pci_bus *bus)
700 {
701 	struct pci_dev *bridge = bus->self;
702 	struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
703 	u16 ctrl;
704 
705 	/*
706 	 * Reserve some resources for CardBus.  We reserve
707 	 * a fixed amount of bus space for CardBus bridges.
708 	 */
709 	b_res[0].start = 0;
710 	b_res[0].end = pci_cardbus_io_size - 1;
711 	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
712 
713 	b_res[1].start = 0;
714 	b_res[1].end = pci_cardbus_io_size - 1;
715 	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
716 
717 	/*
718 	 * Check whether prefetchable memory is supported
719 	 * by this bridge.
720 	 */
721 	pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
722 	if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
723 		ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
724 		pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
725 		pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
726 	}
727 
728 	/*
729 	 * If we have prefetchable memory support, allocate
730 	 * two regions.  Otherwise, allocate one region of
731 	 * twice the size.
732 	 */
733 	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
734 		b_res[2].start = 0;
735 		b_res[2].end = pci_cardbus_mem_size - 1;
736 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
737 
738 		b_res[3].start = 0;
739 		b_res[3].end = pci_cardbus_mem_size - 1;
740 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
741 	} else {
742 		b_res[3].start = 0;
743 		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
744 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
745 	}
746 }
747 
__pci_bus_size_bridges(struct pci_bus * bus,struct resource_list_x * add_head)748 void __ref __pci_bus_size_bridges(struct pci_bus *bus,
749 			struct resource_list_x *add_head)
750 {
751 	struct pci_dev *dev;
752 	unsigned long mask, prefmask;
753 	resource_size_t additional_mem_size = 0, additional_io_size = 0;
754 
755 	list_for_each_entry(dev, &bus->devices, bus_list) {
756 		struct pci_bus *b = dev->subordinate;
757 		if (!b)
758 			continue;
759 
760 		switch (dev->class >> 8) {
761 		case PCI_CLASS_BRIDGE_CARDBUS:
762 			pci_bus_size_cardbus(b);
763 			break;
764 
765 		case PCI_CLASS_BRIDGE_PCI:
766 		default:
767 			__pci_bus_size_bridges(b, add_head);
768 			break;
769 		}
770 	}
771 
772 	/* The root bus? */
773 	if (!bus->self)
774 		return;
775 
776 	switch (bus->self->class >> 8) {
777 	case PCI_CLASS_BRIDGE_CARDBUS:
778 		/* don't size cardbuses yet. */
779 		break;
780 
781 	case PCI_CLASS_BRIDGE_PCI:
782 		pci_bridge_check_ranges(bus);
783 		if (bus->self->is_hotplug_bridge) {
784 			additional_io_size  = pci_hotplug_io_size;
785 			additional_mem_size = pci_hotplug_mem_size;
786 		}
787 		/*
788 		 * Follow thru
789 		 */
790 	default:
791 		pbus_size_io(bus, 0, additional_io_size, add_head);
792 		/* If the bridge supports prefetchable range, size it
793 		   separately. If it doesn't, or its prefetchable window
794 		   has already been allocated by arch code, try
795 		   non-prefetchable range for both types of PCI memory
796 		   resources. */
797 		mask = IORESOURCE_MEM;
798 		prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
799 		if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head))
800 			mask = prefmask; /* Success, size non-prefetch only. */
801 		else
802 			additional_mem_size += additional_mem_size;
803 		pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head);
804 		break;
805 	}
806 }
807 
pci_bus_size_bridges(struct pci_bus * bus)808 void __ref pci_bus_size_bridges(struct pci_bus *bus)
809 {
810 	__pci_bus_size_bridges(bus, NULL);
811 }
812 EXPORT_SYMBOL(pci_bus_size_bridges);
813 
__pci_bus_assign_resources(const struct pci_bus * bus,struct resource_list_x * add_head,struct resource_list_x * fail_head)814 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
815 					 struct resource_list_x *add_head,
816 					 struct resource_list_x *fail_head)
817 {
818 	struct pci_bus *b;
819 	struct pci_dev *dev;
820 
821 	pbus_assign_resources_sorted(bus, add_head, fail_head);
822 
823 	list_for_each_entry(dev, &bus->devices, bus_list) {
824 		b = dev->subordinate;
825 		if (!b)
826 			continue;
827 
828 		__pci_bus_assign_resources(b, add_head, fail_head);
829 
830 		switch (dev->class >> 8) {
831 		case PCI_CLASS_BRIDGE_PCI:
832 			if (!pci_is_enabled(dev))
833 				pci_setup_bridge(b);
834 			break;
835 
836 		case PCI_CLASS_BRIDGE_CARDBUS:
837 			pci_setup_cardbus(b);
838 			break;
839 
840 		default:
841 			dev_info(&dev->dev, "not setting up bridge for bus "
842 				 "%04x:%02x\n", pci_domain_nr(b), b->number);
843 			break;
844 		}
845 	}
846 }
847 
pci_bus_assign_resources(const struct pci_bus * bus)848 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
849 {
850 	__pci_bus_assign_resources(bus, NULL, NULL);
851 }
852 EXPORT_SYMBOL(pci_bus_assign_resources);
853 
__pci_bridge_assign_resources(const struct pci_dev * bridge,struct resource_list_x * fail_head)854 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
855 					 struct resource_list_x *fail_head)
856 {
857 	struct pci_bus *b;
858 
859 	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
860 
861 	b = bridge->subordinate;
862 	if (!b)
863 		return;
864 
865 	__pci_bus_assign_resources(b, NULL, fail_head);
866 
867 	switch (bridge->class >> 8) {
868 	case PCI_CLASS_BRIDGE_PCI:
869 		pci_setup_bridge(b);
870 		break;
871 
872 	case PCI_CLASS_BRIDGE_CARDBUS:
873 		pci_setup_cardbus(b);
874 		break;
875 
876 	default:
877 		dev_info(&bridge->dev, "not setting up bridge for bus "
878 			 "%04x:%02x\n", pci_domain_nr(b), b->number);
879 		break;
880 	}
881 }
pci_bridge_release_resources(struct pci_bus * bus,unsigned long type)882 static void pci_bridge_release_resources(struct pci_bus *bus,
883 					  unsigned long type)
884 {
885 	int idx;
886 	bool changed = false;
887 	struct pci_dev *dev;
888 	struct resource *r;
889 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
890 				  IORESOURCE_PREFETCH;
891 
892 	dev = bus->self;
893 	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
894 	     idx++) {
895 		r = &dev->resource[idx];
896 		if ((r->flags & type_mask) != type)
897 			continue;
898 		if (!r->parent)
899 			continue;
900 		/*
901 		 * if there are children under that, we should release them
902 		 *  all
903 		 */
904 		release_child_resources(r);
905 		if (!release_resource(r)) {
906 			dev_printk(KERN_DEBUG, &dev->dev,
907 				 "resource %d %pR released\n", idx, r);
908 			/* keep the old size */
909 			r->end = resource_size(r) - 1;
910 			r->start = 0;
911 			r->flags = 0;
912 			changed = true;
913 		}
914 	}
915 
916 	if (changed) {
917 		/* avoiding touch the one without PREF */
918 		if (type & IORESOURCE_PREFETCH)
919 			type = IORESOURCE_PREFETCH;
920 		__pci_setup_bridge(bus, type);
921 	}
922 }
923 
924 enum release_type {
925 	leaf_only,
926 	whole_subtree,
927 };
928 /*
929  * try to release pci bridge resources that is from leaf bridge,
930  * so we can allocate big new one later
931  */
pci_bus_release_bridge_resources(struct pci_bus * bus,unsigned long type,enum release_type rel_type)932 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
933 						   unsigned long type,
934 						   enum release_type rel_type)
935 {
936 	struct pci_dev *dev;
937 	bool is_leaf_bridge = true;
938 
939 	list_for_each_entry(dev, &bus->devices, bus_list) {
940 		struct pci_bus *b = dev->subordinate;
941 		if (!b)
942 			continue;
943 
944 		is_leaf_bridge = false;
945 
946 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
947 			continue;
948 
949 		if (rel_type == whole_subtree)
950 			pci_bus_release_bridge_resources(b, type,
951 						 whole_subtree);
952 	}
953 
954 	if (pci_is_root_bus(bus))
955 		return;
956 
957 	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
958 		return;
959 
960 	if ((rel_type == whole_subtree) || is_leaf_bridge)
961 		pci_bridge_release_resources(bus, type);
962 }
963 
pci_bus_dump_res(struct pci_bus * bus)964 static void pci_bus_dump_res(struct pci_bus *bus)
965 {
966 	struct resource *res;
967 	int i;
968 
969 	pci_bus_for_each_resource(bus, res, i) {
970 		if (!res || !res->end || !res->flags)
971                         continue;
972 
973 		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
974         }
975 }
976 
pci_bus_dump_resources(struct pci_bus * bus)977 static void pci_bus_dump_resources(struct pci_bus *bus)
978 {
979 	struct pci_bus *b;
980 	struct pci_dev *dev;
981 
982 
983 	pci_bus_dump_res(bus);
984 
985 	list_for_each_entry(dev, &bus->devices, bus_list) {
986 		b = dev->subordinate;
987 		if (!b)
988 			continue;
989 
990 		pci_bus_dump_resources(b);
991 	}
992 }
993 
994 void __init
pci_assign_unassigned_resources(void)995 pci_assign_unassigned_resources(void)
996 {
997 	struct pci_bus *bus;
998 	struct resource_list_x add_list; /* list of resources that
999 					want additional resources */
1000 	add_list.next = NULL;
1001 	/* Depth first, calculate sizes and alignments of all
1002 	   subordinate buses. */
1003 	list_for_each_entry(bus, &pci_root_buses, node) {
1004 		__pci_bus_size_bridges(bus, &add_list);
1005 	}
1006 
1007 	/* Depth last, allocate resources and update the hardware. */
1008 	list_for_each_entry(bus, &pci_root_buses, node) {
1009 		__pci_bus_assign_resources(bus, &add_list, NULL);
1010 		pci_enable_bridges(bus);
1011 	}
1012 	BUG_ON(add_list.next);
1013 
1014 	/* dump the resource on buses */
1015 	list_for_each_entry(bus, &pci_root_buses, node) {
1016 		pci_bus_dump_resources(bus);
1017 	}
1018 }
1019 
pci_assign_unassigned_bridge_resources(struct pci_dev * bridge)1020 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
1021 {
1022 	struct pci_bus *parent = bridge->subordinate;
1023 	int tried_times = 0;
1024 	struct resource_list_x head, *list;
1025 	int retval;
1026 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1027 				  IORESOURCE_PREFETCH;
1028 
1029 	head.next = NULL;
1030 
1031 again:
1032 	pci_bus_size_bridges(parent);
1033 	__pci_bridge_assign_resources(bridge, &head);
1034 
1035 	tried_times++;
1036 
1037 	if (!head.next)
1038 		goto enable_all;
1039 
1040 	if (tried_times >= 2) {
1041 		/* still fail, don't need to try more */
1042 		free_list(resource_list_x, &head);
1043 		goto enable_all;
1044 	}
1045 
1046 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
1047 			 tried_times + 1);
1048 
1049 	/*
1050 	 * Try to release leaf bridge's resources that doesn't fit resource of
1051 	 * child device under that bridge
1052 	 */
1053 	for (list = head.next; list;) {
1054 		struct pci_bus *bus = list->dev->bus;
1055 		unsigned long flags = list->flags;
1056 
1057 		pci_bus_release_bridge_resources(bus, flags & type_mask,
1058 						 whole_subtree);
1059 		list = list->next;
1060 	}
1061 	/* restore size and flags */
1062 	for (list = head.next; list;) {
1063 		struct resource *res = list->res;
1064 
1065 		res->start = list->start;
1066 		res->end = list->end;
1067 		res->flags = list->flags;
1068 		if (list->dev->subordinate)
1069 			res->flags = 0;
1070 
1071 		list = list->next;
1072 	}
1073 	free_list(resource_list_x, &head);
1074 
1075 	goto again;
1076 
1077 enable_all:
1078 	retval = pci_reenable_device(bridge);
1079 	pci_set_master(bridge);
1080 	pci_enable_bridges(parent);
1081 }
1082 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
1083