1 /*
2  *	$Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3  *
4  *	PCI Bus Services, see include/linux/pci.h for further explanation.
5  *
6  *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7  *	David Mosberger-Tang
8  *
9  *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10  */
11 
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/ioport.h>
21 #include <linux/spinlock.h>
22 #include <linux/pm.h>
23 #include <linux/kmod.h>		/* for hotplug_path */
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/cache.h>
27 
28 #include <asm/page.h>
29 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
30 
31 #undef DEBUG
32 
33 #ifdef DEBUG
34 #define DBG(x...) printk(x)
35 #else
36 #define DBG(x...)
37 #endif
38 
39 LIST_HEAD(pci_root_buses);
40 LIST_HEAD(pci_devices);
41 
42 /**
43  * pci_find_slot - locate PCI device from a given PCI slot
44  * @bus: number of PCI bus on which desired PCI device resides
45  * @devfn: encodes number of PCI slot in which the desired PCI
46  * device resides and the logical device number within that slot
47  * in case of multi-function devices.
48  *
49  * Given a PCI bus and slot/function number, the desired PCI device
50  * is located in system global list of PCI devices.  If the device
51  * is found, a pointer to its data structure is returned.  If no
52  * device is found, %NULL is returned.
53  */
54 struct pci_dev *
pci_find_slot(unsigned int bus,unsigned int devfn)55 pci_find_slot(unsigned int bus, unsigned int devfn)
56 {
57 	struct pci_dev *dev;
58 
59 	pci_for_each_dev(dev) {
60 		if (dev->bus->number == bus && dev->devfn == devfn)
61 			return dev;
62 	}
63 	return NULL;
64 }
65 
66 /**
67  * pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
68  * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
69  * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
70  * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids
71  * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
72  * @from: Previous PCI device found in search, or %NULL for new search.
73  *
74  * Iterates through the list of known PCI devices.  If a PCI device is
75  * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
76  * device structure is returned.  Otherwise, %NULL is returned.
77  * A new search is initiated by passing %NULL to the @from argument.
78  * Otherwise if @from is not %NULL, searches continue from next device on the global list.
79  */
80 struct pci_dev *
pci_find_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,const struct pci_dev * from)81 pci_find_subsys(unsigned int vendor, unsigned int device,
82 		unsigned int ss_vendor, unsigned int ss_device,
83 		const struct pci_dev *from)
84 {
85 	struct list_head *n = from ? from->global_list.next : pci_devices.next;
86 
87 	while (n != &pci_devices) {
88 		struct pci_dev *dev = pci_dev_g(n);
89 		if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
90 		    (device == PCI_ANY_ID || dev->device == device) &&
91 		    (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) &&
92 		    (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device))
93 			return dev;
94 		n = n->next;
95 	}
96 	return NULL;
97 }
98 
99 
100 /**
101  * pci_find_device - begin or continue searching for a PCI device by vendor/device id
102  * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
103  * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
104  * @from: Previous PCI device found in search, or %NULL for new search.
105  *
106  * Iterates through the list of known PCI devices.  If a PCI device is
107  * found with a matching @vendor and @device, a pointer to its device structure is
108  * returned.  Otherwise, %NULL is returned.
109  * A new search is initiated by passing %NULL to the @from argument.
110  * Otherwise if @from is not %NULL, searches continue from next device on the global list.
111  */
112 struct pci_dev *
pci_find_device(unsigned int vendor,unsigned int device,const struct pci_dev * from)113 pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
114 {
115 	return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
116 }
117 
118 
119 /**
120  * pci_find_class - begin or continue searching for a PCI device by class
121  * @class: search for a PCI device with this class designation
122  * @from: Previous PCI device found in search, or %NULL for new search.
123  *
124  * Iterates through the list of known PCI devices.  If a PCI device is
125  * found with a matching @class, a pointer to its device structure is
126  * returned.  Otherwise, %NULL is returned.
127  * A new search is initiated by passing %NULL to the @from argument.
128  * Otherwise if @from is not %NULL, searches continue from next device
129  * on the global list.
130  */
131 struct pci_dev *
pci_find_class(unsigned int class,const struct pci_dev * from)132 pci_find_class(unsigned int class, const struct pci_dev *from)
133 {
134 	struct list_head *n = from ? from->global_list.next : pci_devices.next;
135 
136 	while (n != &pci_devices) {
137 		struct pci_dev *dev = pci_dev_g(n);
138 		if (dev->class == class)
139 			return dev;
140 		n = n->next;
141 	}
142 	return NULL;
143 }
144 
145 /**
146  * pci_find_capability - query for devices' capabilities
147  * @dev: PCI device to query
148  * @cap: capability code
149  *
150  * Tell if a device supports a given PCI capability.
151  * Returns the address of the requested capability structure within the
152  * device's PCI configuration space or 0 in case the device does not
153  * support it.  Possible values for @cap:
154  *
155  *  %PCI_CAP_ID_PM           Power Management
156  *
157  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
158  *
159  *  %PCI_CAP_ID_VPD          Vital Product Data
160  *
161  *  %PCI_CAP_ID_SLOTID       Slot Identification
162  *
163  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
164  *
165  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
166  *
167  *  %PCI_CAP_ID_PCIX         PCI-X
168  */
169 int
pci_find_capability(struct pci_dev * dev,int cap)170 pci_find_capability(struct pci_dev *dev, int cap)
171 {
172 	u16 status;
173 	u8 pos, id;
174 	int ttl = 48;
175 
176 	pci_read_config_word(dev, PCI_STATUS, &status);
177 	if (!(status & PCI_STATUS_CAP_LIST))
178 		return 0;
179 	switch (dev->hdr_type) {
180 	case PCI_HEADER_TYPE_NORMAL:
181 	case PCI_HEADER_TYPE_BRIDGE:
182 		pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &pos);
183 		break;
184 	case PCI_HEADER_TYPE_CARDBUS:
185 		pci_read_config_byte(dev, PCI_CB_CAPABILITY_LIST, &pos);
186 		break;
187 	default:
188 		return 0;
189 	}
190 	while (ttl-- && pos >= 0x40) {
191 		pos &= ~3;
192 		pci_read_config_byte(dev, pos + PCI_CAP_LIST_ID, &id);
193 		if (id == 0xff)
194 			break;
195 		if (id == cap)
196 			return pos;
197 		pci_read_config_byte(dev, pos + PCI_CAP_LIST_NEXT, &pos);
198 	}
199 	return 0;
200 }
201 
202 
203 /**
204  * pci_find_parent_resource - return resource region of parent bus of given region
205  * @dev: PCI device structure contains resources to be searched
206  * @res: child resource record for which parent is sought
207  *
208  *  For given resource region of given device, return the resource
209  *  region of parent bus the given region is contained in or where
210  *  it should be allocated from.
211  */
212 struct resource *
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)213 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
214 {
215 	const struct pci_bus *bus = dev->bus;
216 	int i;
217 	struct resource *best = NULL;
218 
219 	for(i=0; i<4; i++) {
220 		struct resource *r = bus->resource[i];
221 		if (!r)
222 			continue;
223 		if (res->start && !(res->start >= r->start && res->end <= r->end))
224 			continue;	/* Not contained */
225 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
226 			continue;	/* Wrong type */
227 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
228 			return r;	/* Exact match */
229 		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
230 			best = r;	/* Approximating prefetchable by non-prefetchable */
231 	}
232 	return best;
233 }
234 
235 /**
236  * pci_set_power_state - Set the power state of a PCI device
237  * @dev: PCI device to be suspended
238  * @state: Power state we're entering
239  *
240  * Transition a device to a new power state, using the Power Management
241  * Capabilities in the device's config space.
242  *
243  * RETURN VALUE:
244  * -EINVAL if trying to enter a lower state than we're already in.
245  * 0 if we're already in the requested state.
246  * -EIO if device does not support PCI PM.
247  * 0 if we can successfully change the power state.
248  */
249 
250 int
pci_set_power_state(struct pci_dev * dev,int state)251 pci_set_power_state(struct pci_dev *dev, int state)
252 {
253 	int pm;
254 	u16 pmcsr;
255 
256 	/* bound the state we're entering */
257 	if (state > 3) state = 3;
258 
259 	/* Validate current state:
260 	 * Can enter D0 from any state, but if we can only go deeper
261 	 * to sleep if we're already in a low power state
262 	 */
263 	if (state > 0 && dev->current_state > state)
264 		return -EINVAL;
265 	else if (dev->current_state == state)
266 		return 0;        /* we're already there */
267 
268 	/* find PCI PM capability in list */
269 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
270 
271 	/* abort if the device doesn't support PM capabilities */
272 	if (!pm) return -EIO;
273 
274 	/* check if this device supports the desired state */
275 	if (state == 1 || state == 2) {
276 		u16 pmc;
277 		pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
278 		if (state == 1 && !(pmc & PCI_PM_CAP_D1)) return -EIO;
279 		else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -EIO;
280 	}
281 
282 	/* If we're in D3, force entire word to 0.
283 	 * This doesn't affect PME_Status, disables PME_En, and
284 	 * sets PowerState to 0.
285 	 */
286 	if (dev->current_state >= 3)
287 		pmcsr = 0;
288 	else {
289 		pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
290 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
291 		pmcsr |= state;
292 	}
293 
294 	/* enter specified state */
295 	pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
296 
297 	/* Mandatory power management transition delays */
298 	/* see PCI PM 1.1 5.6.1 table 18 */
299 	if(state == 3 || dev->current_state == 3)
300 	{
301 		set_current_state(TASK_UNINTERRUPTIBLE);
302 		schedule_timeout(HZ/100);
303 	}
304 	else if(state == 2 || dev->current_state == 2)
305 		udelay(200);
306 	dev->current_state = state;
307 
308 	return 0;
309 }
310 
311 /**
312  * pci_save_state - save the PCI configuration space of a device before suspending
313  * @dev: - PCI device that we're dealing with
314  * @buffer: - buffer to hold config space context
315  *
316  * @buffer must be large enough to hold the entire PCI 2.2 config space
317  * (>= 64 bytes).
318  */
319 int
pci_save_state(struct pci_dev * dev,u32 * buffer)320 pci_save_state(struct pci_dev *dev, u32 *buffer)
321 {
322 	int i;
323 	if (buffer) {
324 		/* XXX: 100% dword access ok here? */
325 		for (i = 0; i < 16; i++)
326 			pci_read_config_dword(dev, i * 4,&buffer[i]);
327 	}
328 	return 0;
329 }
330 
331 /**
332  * pci_restore_state - Restore the saved state of a PCI device
333  * @dev: - PCI device that we're dealing with
334  * @buffer: - saved PCI config space
335  *
336  */
337 int
pci_restore_state(struct pci_dev * dev,u32 * buffer)338 pci_restore_state(struct pci_dev *dev, u32 *buffer)
339 {
340 	int i;
341 
342 	if (buffer) {
343 		for (i = 0; i < 16; i++)
344 			pci_write_config_dword(dev,i * 4, buffer[i]);
345 	}
346 	/*
347 	 * otherwise, write the context information we know from bootup.
348 	 * This works around a problem where warm-booting from Windows
349 	 * combined with a D3(hot)->D0 transition causes PCI config
350 	 * header data to be forgotten.
351 	 */
352 	else {
353 		for (i = 0; i < 6; i ++)
354 			pci_write_config_dword(dev,
355 					       PCI_BASE_ADDRESS_0 + (i * 4),
356 					       dev->resource[i].start);
357 		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
358 	}
359 	return 0;
360 }
361 
362 /**
363  * pci_enable_device_bars - Initialize some of a device for use
364  * @dev: PCI device to be initialized
365  * @bars: bitmask of BAR's that must be configured
366  *
367  *  Initialize device before it's used by a driver. Ask low-level code
368  *  to enable selected I/O and memory resources. Wake up the device if it
369  *  was suspended. Beware, this function can fail.
370  */
371 
372 int
pci_enable_device_bars(struct pci_dev * dev,int bars)373 pci_enable_device_bars(struct pci_dev *dev, int bars)
374 {
375 	int err;
376 
377 	pci_set_power_state(dev, 0);
378 	if ((err = pcibios_enable_device(dev, bars)) < 0)
379 		return err;
380 	return 0;
381 }
382 
383 /**
384  * pci_enable_device - Initialize device before it's used by a driver.
385  * @dev: PCI device to be initialized
386  *
387  *  Initialize device before it's used by a driver. Ask low-level code
388  *  to enable I/O and memory. Wake up the device if it was suspended.
389  *  Beware, this function can fail.
390  */
391 int
pci_enable_device(struct pci_dev * dev)392 pci_enable_device(struct pci_dev *dev)
393 {
394 	return pci_enable_device_bars(dev, 0x3F);
395 }
396 
397 /**
398  * pci_disable_device - Disable PCI device after use
399  * @dev: PCI device to be disabled
400  *
401  * Signal to the system that the PCI device is not in use by the system
402  * anymore.  This only involves disabling PCI bus-mastering, if active.
403  */
404 void
pci_disable_device(struct pci_dev * dev)405 pci_disable_device(struct pci_dev *dev)
406 {
407 	u16 pci_command;
408 
409 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
410 	if (pci_command & PCI_COMMAND_MASTER) {
411 		pci_command &= ~PCI_COMMAND_MASTER;
412 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
413 	}
414 }
415 
416 /**
417  * pci_enable_wake - enable device to generate PME# when suspended
418  * @dev: - PCI device to operate on
419  * @state: - Current state of device.
420  * @enable: - Flag to enable or disable generation
421  *
422  * Set the bits in the device's PM Capabilities to generate PME# when
423  * the system is suspended.
424  *
425  * -EIO is returned if device doesn't have PM Capabilities.
426  * -EINVAL is returned if device supports it, but can't generate wake events.
427  * 0 if operation is successful.
428  *
429  */
pci_enable_wake(struct pci_dev * dev,u32 state,int enable)430 int pci_enable_wake(struct pci_dev *dev, u32 state, int enable)
431 {
432 	int pm;
433 	u16 value;
434 
435 	/* find PCI PM capability in list */
436 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
437 
438 	/* If device doesn't support PM Capabilities, but request is to disable
439 	 * wake events, it's a nop; otherwise fail */
440 	if (!pm)
441 		return enable ? -EIO : 0;
442 
443 	/* Check device's ability to generate PME# */
444 	pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
445 
446 	value &= PCI_PM_CAP_PME_MASK;
447 	value >>= ffs(value);   /* First bit of mask */
448 
449 	/* Check if it can generate PME# from requested state. */
450 	if (!value || !(value & (1 << state)))
451 		return enable ? -EINVAL : 0;
452 
453 	pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
454 
455 	/* Clear PME_Status by writing 1 to it and enable PME# */
456 	value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
457 
458 	if (!enable)
459 		value &= ~PCI_PM_CTRL_PME_ENABLE;
460 
461 	pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
462 
463 	return 0;
464 }
465 
466 int
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)467 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
468 {
469 	u8 pin;
470 
471 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
472 	if (!pin)
473 		return -1;
474 	pin--;
475 	while (dev->bus->self) {
476 		pin = (pin + PCI_SLOT(dev->devfn)) % 4;
477 		dev = dev->bus->self;
478 	}
479 	*bridge = dev;
480 	return pin;
481 }
482 
483 /**
484  *	pci_release_region - Release a PCI bar
485  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
486  *	@bar: BAR to release
487  *
488  *	Releases the PCI I/O and memory resources previously reserved by a
489  *	successful call to pci_request_region.  Call this function only
490  *	after all use of the PCI regions has ceased.
491  */
pci_release_region(struct pci_dev * pdev,int bar)492 void pci_release_region(struct pci_dev *pdev, int bar)
493 {
494 	if (pci_resource_len(pdev, bar) == 0)
495 		return;
496 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
497 		release_region(pci_resource_start(pdev, bar),
498 				pci_resource_len(pdev, bar));
499 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
500 		release_mem_region(pci_resource_start(pdev, bar),
501 				pci_resource_len(pdev, bar));
502 }
503 
504 /**
505  *	pci_request_region - Reserved PCI I/O and memory resource
506  *	@pdev: PCI device whose resources are to be reserved
507  *	@bar: BAR to be reserved
508  *	@res_name: Name to be associated with resource.
509  *
510  *	Mark the PCI region associated with PCI device @pdev BR @bar as
511  *	being reserved by owner @res_name.  Do not access any
512  *	address inside the PCI regions unless this call returns
513  *	successfully.
514  *
515  *	Returns 0 on success, or %EBUSY on error.  A warning
516  *	message is also printed on failure.
517  */
pci_request_region(struct pci_dev * pdev,int bar,char * res_name)518 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
519 {
520 	if (pci_resource_len(pdev, bar) == 0)
521 		return 0;
522 
523 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
524 		if (!request_region(pci_resource_start(pdev, bar),
525 			    pci_resource_len(pdev, bar), res_name))
526 			goto err_out;
527 	}
528 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
529 		if (!request_mem_region(pci_resource_start(pdev, bar),
530 				        pci_resource_len(pdev, bar), res_name))
531 			goto err_out;
532 	}
533 
534 	return 0;
535 
536 err_out:
537 	printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
538 		pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
539 		bar + 1, /* PCI BAR # */
540 		pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
541 		pdev->slot_name);
542 	return -EBUSY;
543 }
544 
545 
546 /**
547  *	pci_release_regions - Release reserved PCI I/O and memory resources
548  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
549  *
550  *	Releases all PCI I/O and memory resources previously reserved by a
551  *	successful call to pci_request_regions.  Call this function only
552  *	after all use of the PCI regions has ceased.
553  */
554 
pci_release_regions(struct pci_dev * pdev)555 void pci_release_regions(struct pci_dev *pdev)
556 {
557 	int i;
558 
559 	for (i = 0; i < 6; i++)
560 		pci_release_region(pdev, i);
561 }
562 
563 /**
564  *	pci_request_regions - Reserved PCI I/O and memory resources
565  *	@pdev: PCI device whose resources are to be reserved
566  *	@res_name: Name to be associated with resource.
567  *
568  *	Mark all PCI regions associated with PCI device @pdev as
569  *	being reserved by owner @res_name.  Do not access any
570  *	address inside the PCI regions unless this call returns
571  *	successfully.
572  *
573  *	Returns 0 on success, or %EBUSY on error.  A warning
574  *	message is also printed on failure.
575  */
pci_request_regions(struct pci_dev * pdev,char * res_name)576 int pci_request_regions(struct pci_dev *pdev, char *res_name)
577 {
578 	int i;
579 
580 	for (i = 0; i < 6; i++)
581 		if(pci_request_region(pdev, i, res_name))
582 			goto err_out;
583 	return 0;
584 
585 err_out:
586 	printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
587 		pci_resource_flags(pdev, i) & IORESOURCE_IO ? "I/O" : "mem",
588 		i + 1, /* PCI BAR # */
589 		pci_resource_len(pdev, i), pci_resource_start(pdev, i),
590 		pdev->slot_name);
591 	while(--i >= 0)
592 		pci_release_region(pdev, i);
593 
594 	return -EBUSY;
595 }
596 
597 
598 /*
599  *  Registration of PCI drivers and handling of hot-pluggable devices.
600  */
601 
602 static LIST_HEAD(pci_drivers);
603 
604 /**
605  * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
606  * @ids: array of PCI device id structures to search in
607  * @dev: the PCI device structure to match against
608  *
609  * Used by a driver to check whether a PCI device present in the
610  * system is in its list of supported devices.Returns the matching
611  * pci_device_id structure or %NULL if there is no match.
612  */
613 const struct pci_device_id *
pci_match_device(const struct pci_device_id * ids,const struct pci_dev * dev)614 pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev)
615 {
616 	while (ids->vendor || ids->subvendor || ids->class_mask) {
617 		if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
618 		    (ids->device == PCI_ANY_ID || ids->device == dev->device) &&
619 		    (ids->subvendor == PCI_ANY_ID || ids->subvendor == dev->subsystem_vendor) &&
620 		    (ids->subdevice == PCI_ANY_ID || ids->subdevice == dev->subsystem_device) &&
621 		    !((ids->class ^ dev->class) & ids->class_mask))
622 			return ids;
623 		ids++;
624 	}
625 	return NULL;
626 }
627 
628 static int
pci_announce_device(struct pci_driver * drv,struct pci_dev * dev)629 pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
630 {
631 	const struct pci_device_id *id;
632 	int ret = 0;
633 
634 	if (drv->id_table) {
635 		id = pci_match_device(drv->id_table, dev);
636 		if (!id) {
637 			ret = 0;
638 			goto out;
639 		}
640 	} else
641 		id = NULL;
642 
643 	dev_probe_lock();
644 	if (drv->probe(dev, id) >= 0) {
645 		dev->driver = drv;
646 		ret = 1;
647 	}
648 	dev_probe_unlock();
649 out:
650 	return ret;
651 }
652 
653 /**
654  * pci_register_driver - register a new pci driver
655  * @drv: the driver structure to register
656  *
657  * Adds the driver structure to the list of registered drivers
658  * Returns the number of pci devices which were claimed by the driver
659  * during registration.  The driver remains registered even if the
660  * return value is zero.
661  */
662 int
pci_register_driver(struct pci_driver * drv)663 pci_register_driver(struct pci_driver *drv)
664 {
665 	struct pci_dev *dev;
666 	int count = 0;
667 
668 	list_add_tail(&drv->node, &pci_drivers);
669 	pci_for_each_dev(dev) {
670 		if (!pci_dev_driver(dev))
671 			count += pci_announce_device(drv, dev);
672 	}
673 	return count;
674 }
675 
676 /**
677  * pci_unregister_driver - unregister a pci driver
678  * @drv: the driver structure to unregister
679  *
680  * Deletes the driver structure from the list of registered PCI drivers,
681  * gives it a chance to clean up by calling its remove() function for
682  * each device it was responsible for, and marks those devices as
683  * driverless.
684  */
685 
686 void
pci_unregister_driver(struct pci_driver * drv)687 pci_unregister_driver(struct pci_driver *drv)
688 {
689 	struct pci_dev *dev;
690 
691 	list_del(&drv->node);
692 	pci_for_each_dev(dev) {
693 		if (dev->driver == drv) {
694 			if (drv->remove)
695 				drv->remove(dev);
696 			dev->driver = NULL;
697 		}
698 	}
699 }
700 
701 #ifdef CONFIG_HOTPLUG
702 
703 #ifndef FALSE
704 #define FALSE	(0)
705 #define TRUE	(!FALSE)
706 #endif
707 
708 static void
run_sbin_hotplug(struct pci_dev * pdev,int insert)709 run_sbin_hotplug(struct pci_dev *pdev, int insert)
710 {
711 	int i;
712 	char *argv[3], *envp[8];
713 	char id[20], sub_id[24], bus_id[24], class_id[20];
714 
715 	if (!hotplug_path[0])
716 		return;
717 
718 	sprintf(class_id, "PCI_CLASS=%04X", pdev->class);
719 	sprintf(id, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device);
720 	sprintf(sub_id, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device);
721 	sprintf(bus_id, "PCI_SLOT_NAME=%s", pdev->slot_name);
722 
723 	i = 0;
724 	argv[i++] = hotplug_path;
725 	argv[i++] = "pci";
726 	argv[i] = 0;
727 
728 	i = 0;
729 	/* minimal command environment */
730 	envp[i++] = "HOME=/";
731 	envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
732 
733 	/* other stuff we want to pass to /sbin/hotplug */
734 	envp[i++] = class_id;
735 	envp[i++] = id;
736 	envp[i++] = sub_id;
737 	envp[i++] = bus_id;
738 	if (insert)
739 		envp[i++] = "ACTION=add";
740 	else
741 		envp[i++] = "ACTION=remove";
742 	envp[i] = 0;
743 
744 	call_usermodehelper (argv [0], argv, envp);
745 }
746 
747 /**
748  * pci_announce_device_to_drivers - tell the drivers a new device has appeared
749  * @dev: the device that has shown up
750  *
751  * Notifys the drivers that a new device has appeared, and also notifys
752  * userspace through /sbin/hotplug.
753  */
754 void
pci_announce_device_to_drivers(struct pci_dev * dev)755 pci_announce_device_to_drivers(struct pci_dev *dev)
756 {
757 	struct list_head *ln;
758 
759 	for(ln=pci_drivers.next; ln != &pci_drivers; ln=ln->next) {
760 		struct pci_driver *drv = list_entry(ln, struct pci_driver, node);
761 		if (drv->remove && pci_announce_device(drv, dev))
762 			break;
763 	}
764 
765 	/* notify userspace of new hotplug device */
766 	run_sbin_hotplug(dev, TRUE);
767 }
768 
769 /**
770  * pci_insert_device - insert a hotplug device
771  * @dev: the device to insert
772  * @bus: where to insert it
773  *
774  * Add a new device to the device lists and notify userspace (/sbin/hotplug).
775  */
776 void
pci_insert_device(struct pci_dev * dev,struct pci_bus * bus)777 pci_insert_device(struct pci_dev *dev, struct pci_bus *bus)
778 {
779 	list_add_tail(&dev->bus_list, &bus->devices);
780 	list_add_tail(&dev->global_list, &pci_devices);
781 #ifdef CONFIG_PROC_FS
782 	pci_proc_attach_device(dev);
783 #endif
784 	pci_announce_device_to_drivers(dev);
785 }
786 
787 static void
pci_free_resources(struct pci_dev * dev)788 pci_free_resources(struct pci_dev *dev)
789 {
790 	int i;
791 
792 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
793 		struct resource *res = dev->resource + i;
794 		if (res->parent)
795 			release_resource(res);
796 	}
797 }
798 
799 /**
800  * pci_remove_device - remove a hotplug device
801  * @dev: the device to remove
802  *
803  * Delete the device structure from the device lists and
804  * notify userspace (/sbin/hotplug).
805  */
806 void
pci_remove_device(struct pci_dev * dev)807 pci_remove_device(struct pci_dev *dev)
808 {
809 	if (dev->driver) {
810 		if (dev->driver->remove)
811 			dev->driver->remove(dev);
812 		dev->driver = NULL;
813 	}
814 	list_del(&dev->bus_list);
815 	list_del(&dev->global_list);
816 	pci_free_resources(dev);
817 #ifdef CONFIG_PROC_FS
818 	pci_proc_detach_device(dev);
819 #endif
820 
821 	/* notify userspace of hotplug device removal */
822 	run_sbin_hotplug(dev, FALSE);
823 }
824 
825 #endif
826 
827 static struct pci_driver pci_compat_driver = {
828 	name: "compat"
829 };
830 
831 /**
832  * pci_dev_driver - get the pci_driver of a device
833  * @dev: the device to query
834  *
835  * Returns the appropriate pci_driver structure or %NULL if there is no
836  * registered driver for the device.
837  */
838 struct pci_driver *
pci_dev_driver(const struct pci_dev * dev)839 pci_dev_driver(const struct pci_dev *dev)
840 {
841 	if (dev->driver)
842 		return dev->driver;
843 	else {
844 		int i;
845 		for(i=0; i<=PCI_ROM_RESOURCE; i++)
846 			if (dev->resource[i].flags & IORESOURCE_BUSY)
847 				return &pci_compat_driver;
848 	}
849 	return NULL;
850 }
851 
852 
853 /*
854  * This interrupt-safe spinlock protects all accesses to PCI
855  * configuration space.
856  */
857 
858 static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
859 
860 /*
861  *  Wrappers for all PCI configuration access functions.  They just check
862  *  alignment, do locking and call the low-level functions pointed to
863  *  by pci_dev->ops.
864  */
865 
866 #define PCI_byte_BAD 0
867 #define PCI_word_BAD (pos & 1)
868 #define PCI_dword_BAD (pos & 3)
869 
870 #define PCI_OP(rw,size,type) \
871 int pci_##rw##_config_##size (struct pci_dev *dev, int pos, type value) \
872 {									\
873 	int res;							\
874 	unsigned long flags;						\
875 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
876 	spin_lock_irqsave(&pci_lock, flags);				\
877 	res = dev->bus->ops->rw##_##size(dev, pos, value);		\
878 	spin_unlock_irqrestore(&pci_lock, flags);			\
879 	return res;							\
880 }
881 
PCI_OP(read,byte,u8 *)882 PCI_OP(read, byte, u8 *)
883 PCI_OP(read, word, u16 *)
884 PCI_OP(read, dword, u32 *)
885 PCI_OP(write, byte, u8)
886 PCI_OP(write, word, u16)
887 PCI_OP(write, dword, u32)
888 
889 /**
890  * pci_set_master - enables bus-mastering for device dev
891  * @dev: the PCI device to enable
892  *
893  * Enables bus-mastering on the device and calls pcibios_set_master()
894  * to do the needed arch specific settings.
895  */
896 void
897 pci_set_master(struct pci_dev *dev)
898 {
899 	u16 cmd;
900 
901 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
902 	if (! (cmd & PCI_COMMAND_MASTER)) {
903 		DBG("PCI: Enabling bus mastering for device %s\n", dev->slot_name);
904 		cmd |= PCI_COMMAND_MASTER;
905 		pci_write_config_word(dev, PCI_COMMAND, cmd);
906 	}
907 	pcibios_set_master(dev);
908 }
909 
910 #ifndef HAVE_ARCH_PCI_MWI
911 /* This can be overridden by arch code. */
912 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
913 
914 /**
915  * pci_generic_prep_mwi - helper function for pci_set_mwi
916  * @dev: the PCI device for which MWI is enabled
917  *
918  * Helper function for implementation the arch-specific pcibios_set_mwi
919  * function.  Originally copied from drivers/net/acenic.c.
920  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
921  *
922  * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
923  */
924 static int
pci_generic_prep_mwi(struct pci_dev * dev)925 pci_generic_prep_mwi(struct pci_dev *dev)
926 {
927 	u8 cacheline_size;
928 
929 	if (!pci_cache_line_size)
930 		return -EINVAL;		/* The system doesn't support MWI. */
931 
932 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
933 	   equal to or multiple of the right value. */
934 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
935 	if (cacheline_size >= pci_cache_line_size &&
936 	    (cacheline_size % pci_cache_line_size) == 0)
937 		return 0;
938 
939 	/* Write the correct value. */
940 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
941 	/* Read it back. */
942 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
943 	if (cacheline_size == pci_cache_line_size)
944 		return 0;
945 
946 	printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
947 	       "by device %s\n", pci_cache_line_size << 2, dev->slot_name);
948 
949 	return -EINVAL;
950 }
951 #endif /* !HAVE_ARCH_PCI_MWI */
952 
953 /**
954  * pci_set_mwi - enables memory-write-invalidate PCI transaction
955  * @dev: the PCI device for which MWI is enabled
956  *
957  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
958  * and then calls @pcibios_set_mwi to do the needed arch specific
959  * operations or a generic mwi-prep function.
960  *
961  * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
962  */
963 int
pci_set_mwi(struct pci_dev * dev)964 pci_set_mwi(struct pci_dev *dev)
965 {
966 	int rc;
967 	u16 cmd;
968 
969 #ifdef HAVE_ARCH_PCI_MWI
970 	rc = pcibios_set_mwi(dev);
971 #else
972 	rc = pci_generic_prep_mwi(dev);
973 #endif
974 
975 	if (rc)
976 		return rc;
977 
978 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
979 	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
980 		DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", dev->slot_name);
981 		cmd |= PCI_COMMAND_INVALIDATE;
982 		pci_write_config_word(dev, PCI_COMMAND, cmd);
983 	}
984 
985 	return 0;
986 }
987 
988 /**
989  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
990  * @dev: the PCI device to disable
991  *
992  * Disables PCI Memory-Write-Invalidate transaction on the device
993  */
994 void
pci_clear_mwi(struct pci_dev * dev)995 pci_clear_mwi(struct pci_dev *dev)
996 {
997 	u16 cmd;
998 
999 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1000 	if (cmd & PCI_COMMAND_INVALIDATE) {
1001 		cmd &= ~PCI_COMMAND_INVALIDATE;
1002 		pci_write_config_word(dev, PCI_COMMAND, cmd);
1003 	}
1004 }
1005 
1006 int
pci_set_dma_mask(struct pci_dev * dev,u64 mask)1007 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1008 {
1009 	if (!pci_dma_supported(dev, mask))
1010 		return -EIO;
1011 
1012 	dev->dma_mask = mask;
1013 
1014 	return 0;
1015 }
1016 
1017 int
pci_dac_set_dma_mask(struct pci_dev * dev,u64 mask)1018 pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
1019 {
1020 	if (!pci_dac_dma_supported(dev, mask))
1021 		return -EIO;
1022 
1023 	dev->dma_mask = mask;
1024 
1025 	return 0;
1026 }
1027 
1028 /*
1029  * Translate the low bits of the PCI base
1030  * to the resource type
1031  */
pci_calc_resource_flags(unsigned int flags)1032 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
1033 {
1034 	if (flags & PCI_BASE_ADDRESS_SPACE_IO)
1035 		return IORESOURCE_IO;
1036 
1037 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
1038 		return IORESOURCE_MEM | IORESOURCE_PREFETCH;
1039 
1040 	return IORESOURCE_MEM;
1041 }
1042 
1043 /*
1044  * Find the extent of a PCI decode, do sanity checks.
1045  */
pci_size(u32 base,u32 maxbase,unsigned long mask)1046 static u32 pci_size(u32 base, u32 maxbase, unsigned long mask)
1047 {
1048 	u32 size = mask & maxbase;	/* Find the significant bits */
1049 	if (!size)
1050 		return 0;
1051 	size = size & ~(size-1);	/* Get the lowest of them to find the decode size */
1052 	size -= 1;			/* extent = size - 1 */
1053 	if (base == maxbase && ((base | size) & mask) != mask)
1054 		return 0;		/* base == maxbase can be valid only
1055 					   if the BAR has been already
1056 					   programmed with all 1s */
1057 	return size;
1058 }
1059 
pci_read_bases(struct pci_dev * dev,unsigned int howmany,int rom)1060 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
1061 {
1062 	unsigned int pos, reg, next;
1063 	u32 l, sz;
1064 	struct resource *res;
1065 
1066 	for(pos=0; pos<howmany; pos = next) {
1067 		next = pos+1;
1068 		res = &dev->resource[pos];
1069 		res->name = dev->name;
1070 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
1071 		pci_read_config_dword(dev, reg, &l);
1072 		pci_write_config_dword(dev, reg, ~0);
1073 		pci_read_config_dword(dev, reg, &sz);
1074 		pci_write_config_dword(dev, reg, l);
1075 		if (!sz || sz == 0xffffffff)
1076 			continue;
1077 		if (l == 0xffffffff)
1078 			l = 0;
1079 		if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
1080 			sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
1081 			if (!sz)
1082 				continue;
1083 			res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
1084 			res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
1085 		} else {
1086 			sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
1087 			if (!sz)
1088 				continue;
1089 			res->start = l & PCI_BASE_ADDRESS_IO_MASK;
1090 			res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
1091 		}
1092 		res->end = res->start + (unsigned long) sz;
1093 		res->flags |= pci_calc_resource_flags(l);
1094 		if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
1095 		    == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
1096 			pci_read_config_dword(dev, reg+4, &l);
1097 			next++;
1098 #if BITS_PER_LONG == 64
1099 			res->start |= ((unsigned long) l) << 32;
1100 			res->end = res->start + sz;
1101 			pci_write_config_dword(dev, reg+4, ~0);
1102 			pci_read_config_dword(dev, reg+4, &sz);
1103 			pci_write_config_dword(dev, reg+4, l);
1104 			if (~sz)
1105 				res->end = res->start + 0xffffffff +
1106 						(((unsigned long) ~sz) << 32);
1107 #else
1108 			if (l) {
1109 				printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", dev->slot_name);
1110 				res->start = 0;
1111 				res->flags = 0;
1112 				continue;
1113 			}
1114 #endif
1115 		}
1116 	}
1117 	if (rom) {
1118 		dev->rom_base_reg = rom;
1119 		res = &dev->resource[PCI_ROM_RESOURCE];
1120 		res->name = dev->name;
1121 		pci_read_config_dword(dev, rom, &l);
1122 		pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
1123 		pci_read_config_dword(dev, rom, &sz);
1124 		pci_write_config_dword(dev, rom, l);
1125 		if (l == 0xffffffff)
1126 			l = 0;
1127 		if (sz && sz != 0xffffffff) {
1128 			sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
1129 			if (!sz)
1130 				return;
1131 			res->flags = (l & PCI_ROM_ADDRESS_ENABLE) |
1132 			  IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
1133 			res->start = l & PCI_ROM_ADDRESS_MASK;
1134 			res->end = res->start + (unsigned long) sz;
1135 		}
1136 	}
1137 }
1138 
pci_read_bridge_bases(struct pci_bus * child)1139 void __devinit pci_read_bridge_bases(struct pci_bus *child)
1140 {
1141 	struct pci_dev *dev = child->self;
1142 	u8 io_base_lo, io_limit_lo;
1143 	u16 mem_base_lo, mem_limit_lo;
1144 	unsigned long base, limit;
1145 	struct resource *res;
1146 	int i;
1147 
1148 	if (!dev)		/* It's a host bus, nothing to read */
1149 		return;
1150 
1151 	if (dev->transparent) {
1152 		printk("Transparent bridge - %s\n", dev->name);
1153 		for(i = 0; i < 4; i++)
1154 			child->resource[i] = child->parent->resource[i];
1155 		return;
1156 	}
1157 
1158 	for(i=0; i<3; i++)
1159 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1160 
1161 	res = child->resource[0];
1162 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
1163 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
1164 	base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
1165 	limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
1166 
1167 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1168 		u16 io_base_hi, io_limit_hi;
1169 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
1170 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
1171 		base |= (io_base_hi << 16);
1172 		limit |= (io_limit_hi << 16);
1173 	}
1174 
1175 	if (base && base <= limit) {
1176 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
1177 		res->start = base;
1178 		res->end = limit + 0xfff;
1179 	}
1180 
1181 	res = child->resource[1];
1182 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
1183 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
1184 	base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
1185 	limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
1186 	if (base && base <= limit) {
1187 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
1188 		res->start = base;
1189 		res->end = limit + 0xfffff;
1190 	}
1191 
1192 	res = child->resource[2];
1193 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
1194 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
1195 	base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
1196 	limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1197 
1198 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
1199 		u32 mem_base_hi, mem_limit_hi;
1200 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
1201 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
1202 #if BITS_PER_LONG == 64
1203 		base |= ((long) mem_base_hi) << 32;
1204 		limit |= ((long) mem_limit_hi) << 32;
1205 #else
1206 		if (mem_base_hi || mem_limit_hi) {
1207 			printk(KERN_ERR "PCI: Unable to handle 64-bit address space for %s\n", child->name);
1208 			return;
1209 		}
1210 #endif
1211 	}
1212 	if (base && base <= limit) {
1213 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
1214 		res->start = base;
1215 		res->end = limit + 0xfffff;
1216 	}
1217 }
1218 
pci_alloc_bus(void)1219 static struct pci_bus * __devinit pci_alloc_bus(void)
1220 {
1221 	struct pci_bus *b;
1222 
1223 	b = kmalloc(sizeof(*b), GFP_KERNEL);
1224 	if (b) {
1225 		memset(b, 0, sizeof(*b));
1226 		INIT_LIST_HEAD(&b->children);
1227 		INIT_LIST_HEAD(&b->devices);
1228 	}
1229 	return b;
1230 }
1231 
pci_add_new_bus(struct pci_bus * parent,struct pci_dev * dev,int busnr)1232 struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
1233 {
1234 	struct pci_bus *child;
1235 	int i;
1236 
1237 	/*
1238 	 * Allocate a new bus, and inherit stuff from the parent..
1239 	 */
1240 	child = pci_alloc_bus();
1241 	if (!child)
1242 		return NULL;
1243 
1244 	list_add_tail(&child->node, &parent->children);
1245 	child->self = dev;
1246 	dev->subordinate = child;
1247 	child->parent = parent;
1248 	child->ops = parent->ops;
1249 	child->sysdata = parent->sysdata;
1250 
1251 	/*
1252 	 * Set up the primary, secondary and subordinate
1253 	 * bus numbers.
1254 	 */
1255 	child->number = child->secondary = busnr;
1256 	child->primary = parent->secondary;
1257 	child->subordinate = 0xff;
1258 
1259 	/* Set up default resource pointers and names.. */
1260 	for (i = 0; i < 4; i++) {
1261 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1262 		child->resource[i]->name = child->name;
1263 	}
1264 
1265 	return child;
1266 }
1267 
1268 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus);
1269 
1270 /*
1271  * If it's a bridge, configure it and scan the bus behind it.
1272  * For CardBus bridges, we don't scan behind as the devices will
1273  * be handled by the bridge driver itself.
1274  *
1275  * We need to process bridges in two passes -- first we scan those
1276  * already configured by the BIOS and after we are done with all of
1277  * them, we proceed to assigning numbers to the remaining buses in
1278  * order to avoid overlaps between old and new bus numbers.
1279  */
pci_scan_bridge(struct pci_bus * bus,struct pci_dev * dev,int max,int pass)1280 static int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
1281 {
1282 	unsigned int buses;
1283 	unsigned short cr;
1284 	struct pci_bus *child;
1285 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
1286 
1287 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1288 	DBG("Scanning behind PCI bridge %s, config %06x, pass %d\n", dev->slot_name, buses & 0xffffff, pass);
1289 	if ((buses & 0xffff00) && !pcibios_assign_all_busses()) {
1290 		/*
1291 		 * Bus already configured by firmware, process it in the first
1292 		 * pass and just note the configuration.
1293 		 */
1294 		if (pass)
1295 			return max;
1296 
1297 		child = pci_add_new_bus(bus, dev, 0);
1298 		if (!child)
1299 			return max;
1300 
1301 		child->primary = buses & 0xFF;
1302 		child->secondary = (buses >> 8) & 0xFF;
1303 		child->subordinate = (buses >> 16) & 0xFF;
1304 		child->number = child->secondary;
1305 		if (!is_cardbus) {
1306 			unsigned int cmax = pci_do_scan_bus(child);
1307 			if (cmax > max) max = cmax;
1308 		} else {
1309 			unsigned int cmax = child->subordinate;
1310 			if (cmax > max) max = cmax;
1311 		}
1312 	} else {
1313 		/*
1314 		 * We need to assign a number to this bus which we always
1315 		 * do in the second pass. We also keep all address decoders
1316 		 * on the bridge disabled during scanning.  FIXME: Why?
1317 		 */
1318 		if (!pass)
1319 			return max;
1320 		pci_read_config_word(dev, PCI_COMMAND, &cr);
1321 		pci_write_config_word(dev, PCI_COMMAND, 0x0000);
1322 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1323 
1324 		child = pci_add_new_bus(bus, dev, ++max);
1325 		if (!child)
1326 			return max;
1327 
1328 		buses = (buses & 0xff000000)
1329 		      | ((unsigned int)(child->primary)     <<  0)
1330 		      | ((unsigned int)(child->secondary)   <<  8)
1331 		      | ((unsigned int)(child->subordinate) << 16);
1332 		/*
1333 		 * We need to blast all three values with a single write.
1334 		 */
1335 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1336 		if (!is_cardbus) {
1337 			/* Now we can scan all subordinate buses... */
1338 			max = pci_do_scan_bus(child);
1339 		} else {
1340 			/*
1341 			 * For CardBus bridges, we leave 4 bus numbers
1342 			 * as cards with a PCI-to-PCI bridge can be
1343 			 * inserted later.
1344 			 */
1345 			max += 3;
1346 		}
1347 		/*
1348 		 * Set the subordinate bus number to its real value.
1349 		 */
1350 		child->subordinate = max;
1351 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1352 		pci_write_config_word(dev, PCI_COMMAND, cr);
1353 	}
1354 	sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
1355 	return max;
1356 }
1357 
1358 /*
1359  * Read interrupt line and base address registers.
1360  * The architecture-dependent code can tweak these, of course.
1361  */
pci_read_irq(struct pci_dev * dev)1362 static void pci_read_irq(struct pci_dev *dev)
1363 {
1364 	unsigned char irq;
1365 
1366 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1367 	if (irq)
1368 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1369 	dev->irq = irq;
1370 }
1371 
1372 /**
1373  * pci_setup_device - fill in class and map information of a device
1374  * @dev: the device structure to fill
1375  *
1376  * Initialize the device structure with information about the device's
1377  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1378  * Called at initialisation of the PCI subsystem and by CardBus services.
1379  * Returns 0 on success and -1 if unknown type of device (not normal, bridge
1380  * or CardBus).
1381  */
pci_setup_device(struct pci_dev * dev)1382 int pci_setup_device(struct pci_dev * dev)
1383 {
1384 	u32 class;
1385 
1386 	sprintf(dev->slot_name, "%02x:%02x.%d", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
1387 	sprintf(dev->name, "PCI device %04x:%04x", dev->vendor, dev->device);
1388 
1389 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1390 	class >>= 8;				    /* upper 3 bytes */
1391 	dev->class = class;
1392 	class >>= 8;
1393 
1394 	DBG("Found %02x:%02x [%04x/%04x] %06x %02x\n", dev->bus->number, dev->devfn, dev->vendor, dev->device, class, dev->hdr_type);
1395 
1396 	/* "Unknown power state" */
1397 	dev->current_state = 4;
1398 
1399 	switch (dev->hdr_type) {		    /* header type */
1400 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1401 		if (class == PCI_CLASS_BRIDGE_PCI)
1402 			goto bad;
1403 		pci_read_irq(dev);
1404 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1405 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1406 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1407 		break;
1408 
1409 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1410 		if (class != PCI_CLASS_BRIDGE_PCI)
1411 			goto bad;
1412 		/* The PCI-to-PCI bridge spec requires that subtractive
1413 		   decoding (i.e. transparent) bridge must have programming
1414 		   interface code of 0x01. */
1415 		dev->transparent = ((dev->class & 0xff) == 1);
1416 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1417 		break;
1418 
1419 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1420 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1421 			goto bad;
1422 		pci_read_irq(dev);
1423 		pci_read_bases(dev, 1, 0);
1424 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1425 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1426 		break;
1427 
1428 	default:				    /* unknown header */
1429 		printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
1430 			dev->slot_name, dev->hdr_type);
1431 		return -1;
1432 
1433 	bad:
1434 		printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
1435 		       dev->slot_name, class, dev->hdr_type);
1436 		dev->class = PCI_CLASS_NOT_DEFINED;
1437 	}
1438 
1439 	/* We found a fine healthy device, go go go... */
1440 	return 0;
1441 }
1442 
1443 /*
1444  * Read the config data for a PCI device, sanity-check it
1445  * and fill in the dev structure...
1446  */
pci_scan_device(struct pci_dev * temp)1447 struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
1448 {
1449 	struct pci_dev *dev;
1450 	u32 l;
1451 
1452 	if (pci_read_config_dword(temp, PCI_VENDOR_ID, &l))
1453 		return NULL;
1454 
1455 	/* some broken boards return 0 or ~0 if a slot is empty: */
1456 	if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
1457 		return NULL;
1458 
1459 	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1460 	if (!dev)
1461 		return NULL;
1462 
1463 	memcpy(dev, temp, sizeof(*dev));
1464 	dev->vendor = l & 0xffff;
1465 	dev->device = (l >> 16) & 0xffff;
1466 
1467 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1468 	   set this higher, assuming the system even supports it.  */
1469 	dev->dma_mask = 0xffffffff;
1470 	if (pci_setup_device(dev) < 0) {
1471 		kfree(dev);
1472 		dev = NULL;
1473 	}
1474 	return dev;
1475 }
1476 
pci_scan_slot(struct pci_dev * temp)1477 struct pci_dev * __devinit pci_scan_slot(struct pci_dev *temp)
1478 {
1479 	struct pci_bus *bus = temp->bus;
1480 	struct pci_dev *dev;
1481 	struct pci_dev *first_dev = NULL;
1482 	int func = 0;
1483 	int is_multi = 0;
1484 	u8 hdr_type;
1485 
1486 	for (func = 0; func < 8; func++, temp->devfn++) {
1487 		if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
1488 			continue;
1489 		temp->hdr_type = hdr_type & 0x7f;
1490 
1491 		dev = pci_scan_device(temp);
1492 		if (!pcibios_scan_all_fns() && func == 0) {
1493 			if (!dev)
1494 				break;
1495 		} else {
1496 			if (!dev)
1497 				continue;
1498 			is_multi = 1;
1499 		}
1500 
1501 		pci_name_device(dev);
1502 		if (!first_dev) {
1503 			is_multi = hdr_type & 0x80;
1504 			first_dev = dev;
1505 		}
1506 
1507 		/*
1508 		 * Link the device to both the global PCI device chain and
1509 		 * the per-bus list of devices.
1510 		 */
1511 		list_add_tail(&dev->global_list, &pci_devices);
1512 		list_add_tail(&dev->bus_list, &bus->devices);
1513 
1514 		/* Fix up broken headers */
1515 		pci_fixup_device(PCI_FIXUP_HEADER, dev);
1516 
1517 		/*
1518 		 * If this is a single function device
1519 		 * don't scan past the first function.
1520 		 */
1521 		if (!is_multi)
1522 			break;
1523 
1524 	}
1525 	return first_dev;
1526 }
1527 
pci_do_scan_bus(struct pci_bus * bus)1528 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
1529 {
1530 	unsigned int devfn, max, pass;
1531 	struct list_head *ln;
1532 	struct pci_dev *dev, dev0;
1533 
1534 	DBG("Scanning bus %02x\n", bus->number);
1535 	max = bus->secondary;
1536 
1537 	/* Create a device template */
1538 	memset(&dev0, 0, sizeof(dev0));
1539 	dev0.bus = bus;
1540 	dev0.sysdata = bus->sysdata;
1541 
1542 	/* Go find them, Rover! */
1543 	for (devfn = 0; devfn < 0x100; devfn += 8) {
1544 		dev0.devfn = devfn;
1545 		pci_scan_slot(&dev0);
1546 	}
1547 
1548 	/*
1549 	 * After performing arch-dependent fixup of the bus, look behind
1550 	 * all PCI-to-PCI bridges on this bus.
1551 	 */
1552 	DBG("Fixups for bus %02x\n", bus->number);
1553 	pcibios_fixup_bus(bus);
1554 	for (pass=0; pass < 2; pass++)
1555 		for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
1556 			dev = pci_dev_b(ln);
1557 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1558 				max = pci_scan_bridge(bus, dev, max, pass);
1559 		}
1560 
1561 	/*
1562 	 * We've scanned the bus and so we know all about what's on
1563 	 * the other side of any bridges that may be on this bus plus
1564 	 * any devices.
1565 	 *
1566 	 * Return how far we've got finding sub-buses.
1567 	 */
1568 	DBG("Bus scan for %02x returning with max=%02x\n", bus->number, max);
1569 	return max;
1570 }
1571 
pci_bus_exists(const struct list_head * list,int nr)1572 int __devinit pci_bus_exists(const struct list_head *list, int nr)
1573 {
1574 	const struct list_head *l;
1575 
1576 	for(l=list->next; l != list; l = l->next) {
1577 		const struct pci_bus *b = pci_bus_b(l);
1578 		if (b->number == nr || pci_bus_exists(&b->children, nr))
1579 			return 1;
1580 	}
1581 	return 0;
1582 }
1583 
pci_alloc_primary_bus(int bus)1584 struct pci_bus * __devinit pci_alloc_primary_bus(int bus)
1585 {
1586 	struct pci_bus *b;
1587 
1588 	if (pci_bus_exists(&pci_root_buses, bus)) {
1589 		/* If we already got to this bus through a different bridge, ignore it */
1590 		DBG("PCI: Bus %02x already known\n", bus);
1591 		return NULL;
1592 	}
1593 
1594 	b = pci_alloc_bus();
1595 	list_add_tail(&b->node, &pci_root_buses);
1596 
1597 	b->number = b->secondary = bus;
1598 	b->resource[0] = &ioport_resource;
1599 	b->resource[1] = &iomem_resource;
1600 	return b;
1601 }
1602 
pci_scan_bus(int bus,struct pci_ops * ops,void * sysdata)1603 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
1604 {
1605 	struct pci_bus *b = pci_alloc_primary_bus(bus);
1606 	if (b) {
1607 		b->sysdata = sysdata;
1608 		b->ops = ops;
1609 		b->subordinate = pci_do_scan_bus(b);
1610 	}
1611 	return b;
1612 }
1613 
1614 #ifdef CONFIG_PM
1615 
1616 /*
1617  * PCI Power management..
1618  *
1619  * This needs to be done centralized, so that we power manage PCI
1620  * devices in the right order: we should not shut down PCI bridges
1621  * before we've shut down the devices behind them, and we should
1622  * not wake up devices before we've woken up the bridge to the
1623  * device.. Eh?
1624  *
1625  * We do not touch devices that don't have a driver that exports
1626  * a suspend/resume function. That is just too dangerous. If the default
1627  * PCI suspend/resume functions work for a device, the driver can
1628  * easily implement them (ie just have a suspend function that calls
1629  * the pci_set_power_state() function).
1630  */
1631 
pci_pm_save_state_device(struct pci_dev * dev,u32 state)1632 static int pci_pm_save_state_device(struct pci_dev *dev, u32 state)
1633 {
1634 	int error = 0;
1635 	if (dev) {
1636 		struct pci_driver *driver = dev->driver;
1637 		if (driver && driver->save_state)
1638 			error = driver->save_state(dev,state);
1639 	}
1640 	return error;
1641 }
1642 
pci_pm_suspend_device(struct pci_dev * dev,u32 state)1643 static int pci_pm_suspend_device(struct pci_dev *dev, u32 state)
1644 {
1645 	int error = 0;
1646 	if (dev) {
1647 		struct pci_driver *driver = dev->driver;
1648 		if (driver && driver->suspend)
1649 			error = driver->suspend(dev,state);
1650 	}
1651 	return error;
1652 }
1653 
pci_pm_resume_device(struct pci_dev * dev)1654 static int pci_pm_resume_device(struct pci_dev *dev)
1655 {
1656 	int error = 0;
1657 	if (dev) {
1658 		struct pci_driver *driver = dev->driver;
1659 		if (driver && driver->resume)
1660 			error = driver->resume(dev);
1661 	}
1662 	return error;
1663 }
1664 
pci_pm_save_state_bus(struct pci_bus * bus,u32 state)1665 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
1666 {
1667 	struct list_head *list;
1668 	int error = 0;
1669 
1670 	list_for_each(list, &bus->children) {
1671 		error = pci_pm_save_state_bus(pci_bus_b(list),state);
1672 		if (error) return error;
1673 	}
1674 	list_for_each(list, &bus->devices) {
1675 		error = pci_pm_save_state_device(pci_dev_b(list),state);
1676 		if (error) return error;
1677 	}
1678 	return 0;
1679 }
1680 
pci_pm_suspend_bus(struct pci_bus * bus,u32 state)1681 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
1682 {
1683 	struct list_head *list;
1684 
1685 	/* Walk the bus children list */
1686 	list_for_each(list, &bus->children)
1687 		pci_pm_suspend_bus(pci_bus_b(list),state);
1688 
1689 	/* Walk the device children list */
1690 	list_for_each(list, &bus->devices)
1691 		pci_pm_suspend_device(pci_dev_b(list),state);
1692 	return 0;
1693 }
1694 
pci_pm_resume_bus(struct pci_bus * bus)1695 static int pci_pm_resume_bus(struct pci_bus *bus)
1696 {
1697 	struct list_head *list;
1698 
1699 	/* Walk the device children list */
1700 	list_for_each(list, &bus->devices)
1701 		pci_pm_resume_device(pci_dev_b(list));
1702 
1703 	/* And then walk the bus children */
1704 	list_for_each(list, &bus->children)
1705 		pci_pm_resume_bus(pci_bus_b(list));
1706 	return 0;
1707 }
1708 
pci_pm_save_state(u32 state)1709 static int pci_pm_save_state(u32 state)
1710 {
1711 	struct list_head *list;
1712 	struct pci_bus *bus;
1713 	int error = 0;
1714 
1715 	list_for_each(list, &pci_root_buses) {
1716 		bus = pci_bus_b(list);
1717 		error = pci_pm_save_state_bus(bus,state);
1718 		if (!error)
1719 			error = pci_pm_save_state_device(bus->self,state);
1720 	}
1721 	return error;
1722 }
1723 
pci_pm_suspend(u32 state)1724 static int pci_pm_suspend(u32 state)
1725 {
1726 	struct list_head *list;
1727 	struct pci_bus *bus;
1728 
1729 	list_for_each(list, &pci_root_buses) {
1730 		bus = pci_bus_b(list);
1731 		pci_pm_suspend_bus(bus,state);
1732 		pci_pm_suspend_device(bus->self,state);
1733 	}
1734 	return 0;
1735 }
1736 
pci_pm_resume(void)1737 int pci_pm_resume(void)
1738 {
1739 	struct list_head *list;
1740 	struct pci_bus *bus;
1741 
1742 	list_for_each(list, &pci_root_buses) {
1743 		bus = pci_bus_b(list);
1744 		pci_pm_resume_device(bus->self);
1745 		pci_pm_resume_bus(bus);
1746 	}
1747 	return 0;
1748 }
1749 
1750 static int
pci_pm_callback(struct pm_dev * pm_device,pm_request_t rqst,void * data)1751 pci_pm_callback(struct pm_dev *pm_device, pm_request_t rqst, void *data)
1752 {
1753 	int error = 0;
1754 
1755 	switch (rqst) {
1756 	case PM_SAVE_STATE:
1757 		error = pci_pm_save_state((unsigned long)data);
1758 		break;
1759 	case PM_SUSPEND:
1760 		error = pci_pm_suspend((unsigned long)data);
1761 		break;
1762 	case PM_RESUME:
1763 		error = pci_pm_resume();
1764 		break;
1765 	default: break;
1766 	}
1767 	return error;
1768 }
1769 
1770 #endif
1771 
1772 /*
1773  * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
1774  * small blocks are easily used by drivers for bus mastering controllers.
1775  * This should probably be sharing the guts of the slab allocator.
1776  */
1777 
1778 struct pci_pool {	/* the pool */
1779 	struct list_head	page_list;
1780 	spinlock_t		lock;
1781 	size_t			blocks_per_page;
1782 	size_t			size;
1783 	int			flags;
1784 	struct pci_dev		*dev;
1785 	size_t			allocation;
1786 	char			name [32];
1787 	wait_queue_head_t	waitq;
1788 };
1789 
1790 struct pci_page {	/* cacheable header for 'allocation' bytes */
1791 	struct list_head	page_list;
1792 	void			*vaddr;
1793 	dma_addr_t		dma;
1794 	unsigned long		bitmap [0];
1795 };
1796 
1797 #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
1798 #define	POOL_POISON_BYTE	0xa7
1799 
1800 // #define CONFIG_PCIPOOL_DEBUG
1801 
1802 
1803 /**
1804  * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
1805  * @name: name of pool, for diagnostics
1806  * @pdev: pci device that will be doing the DMA
1807  * @size: size of the blocks in this pool.
1808  * @align: alignment requirement for blocks; must be a power of two
1809  * @allocation: returned blocks won't cross this boundary (or zero)
1810  * @flags: SLAB_* flags (not all are supported).
1811  *
1812  * Returns a pci allocation pool with the requested characteristics, or
1813  * null if one can't be created.  Given one of these pools, pci_pool_alloc()
1814  * may be used to allocate memory.  Such memory will all have "consistent"
1815  * DMA mappings, accessible by the device and its driver without using
1816  * cache flushing primitives.  The actual size of blocks allocated may be
1817  * larger than requested because of alignment.
1818  *
1819  * If allocation is nonzero, objects returned from pci_pool_alloc() won't
1820  * cross that size boundary.  This is useful for devices which have
1821  * addressing restrictions on individual DMA transfers, such as not crossing
1822  * boundaries of 4KBytes.
1823  */
1824 struct pci_pool *
pci_pool_create(const char * name,struct pci_dev * pdev,size_t size,size_t align,size_t allocation,int flags)1825 pci_pool_create (const char *name, struct pci_dev *pdev,
1826 	size_t size, size_t align, size_t allocation, int flags)
1827 {
1828 	struct pci_pool		*retval;
1829 
1830 	if (align == 0)
1831 		align = 1;
1832 	if (size == 0)
1833 		return 0;
1834 	else if (size < align)
1835 		size = align;
1836 	else if ((size % align) != 0) {
1837 		size += align + 1;
1838 		size &= ~(align - 1);
1839 	}
1840 
1841 	if (allocation == 0) {
1842 		if (PAGE_SIZE < size)
1843 			allocation = size;
1844 		else
1845 			allocation = PAGE_SIZE;
1846 		// FIXME: round up for less fragmentation
1847 	} else if (allocation < size)
1848 		return 0;
1849 
1850 	if (!(retval = kmalloc (sizeof *retval, flags)))
1851 		return retval;
1852 
1853 #ifdef	CONFIG_PCIPOOL_DEBUG
1854 	flags |= SLAB_POISON;
1855 #endif
1856 
1857 	strncpy (retval->name, name, sizeof retval->name);
1858 	retval->name [sizeof retval->name - 1] = 0;
1859 
1860 	retval->dev = pdev;
1861 	INIT_LIST_HEAD (&retval->page_list);
1862 	spin_lock_init (&retval->lock);
1863 	retval->size = size;
1864 	retval->flags = flags;
1865 	retval->allocation = allocation;
1866 	retval->blocks_per_page = allocation / size;
1867 	init_waitqueue_head (&retval->waitq);
1868 
1869 #ifdef CONFIG_PCIPOOL_DEBUG
1870 	printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
1871 		pdev ? pdev->slot_name : NULL, retval->name, size,
1872 		retval->blocks_per_page, allocation);
1873 #endif
1874 
1875 	return retval;
1876 }
1877 
1878 
1879 static struct pci_page *
pool_alloc_page(struct pci_pool * pool,int mem_flags)1880 pool_alloc_page (struct pci_pool *pool, int mem_flags)
1881 {
1882 	struct pci_page	*page;
1883 	int		mapsize;
1884 
1885 	mapsize = pool->blocks_per_page;
1886 	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
1887 	mapsize *= sizeof (long);
1888 
1889 	page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
1890 	if (!page)
1891 		return 0;
1892 	page->vaddr = pci_alloc_consistent (pool->dev,
1893 					    pool->allocation,
1894 					    &page->dma);
1895 	if (page->vaddr) {
1896 		memset (page->bitmap, 0xff, mapsize);	// bit set == free
1897 		if (pool->flags & SLAB_POISON)
1898 			memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1899 		list_add (&page->page_list, &pool->page_list);
1900 	} else {
1901 		kfree (page);
1902 		page = 0;
1903 	}
1904 	return page;
1905 }
1906 
1907 
1908 static inline int
is_page_busy(int blocks,unsigned long * bitmap)1909 is_page_busy (int blocks, unsigned long *bitmap)
1910 {
1911 	while (blocks > 0) {
1912 		if (*bitmap++ != ~0UL)
1913 			return 1;
1914 		blocks -= BITS_PER_LONG;
1915 	}
1916 	return 0;
1917 }
1918 
1919 static void
pool_free_page(struct pci_pool * pool,struct pci_page * page)1920 pool_free_page (struct pci_pool *pool, struct pci_page *page)
1921 {
1922 	dma_addr_t	dma = page->dma;
1923 
1924 	if (pool->flags & SLAB_POISON)
1925 		memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1926 	pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
1927 	list_del (&page->page_list);
1928 	kfree (page);
1929 }
1930 
1931 
1932 /**
1933  * pci_pool_destroy - destroys a pool of pci memory blocks.
1934  * @pool: pci pool that will be destroyed
1935  *
1936  * Caller guarantees that no more memory from the pool is in use,
1937  * and that nothing will try to use the pool after this call.
1938  */
1939 void
pci_pool_destroy(struct pci_pool * pool)1940 pci_pool_destroy (struct pci_pool *pool)
1941 {
1942 	unsigned long		flags;
1943 
1944 #ifdef CONFIG_PCIPOOL_DEBUG
1945 	printk (KERN_DEBUG "pcipool destroy %s/%s\n",
1946 		pool->dev ? pool->dev->slot_name : NULL,
1947 		pool->name);
1948 #endif
1949 
1950 	spin_lock_irqsave (&pool->lock, flags);
1951 	while (!list_empty (&pool->page_list)) {
1952 		struct pci_page		*page;
1953 		page = list_entry (pool->page_list.next,
1954 				struct pci_page, page_list);
1955 		if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
1956 			printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
1957 				pool->dev ? pool->dev->slot_name : NULL,
1958 				pool->name, page->vaddr);
1959 			/* leak the still-in-use consistent memory */
1960 			list_del (&page->page_list);
1961 			kfree (page);
1962 		} else
1963 			pool_free_page (pool, page);
1964 	}
1965 	spin_unlock_irqrestore (&pool->lock, flags);
1966 	kfree (pool);
1967 }
1968 
1969 
1970 /**
1971  * pci_pool_alloc - get a block of consistent memory
1972  * @pool: pci pool that will produce the block
1973  * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
1974  * @handle: pointer to dma address of block
1975  *
1976  * This returns the kernel virtual address of a currently unused block,
1977  * and reports its dma address through the handle.
1978  * If such a memory block can't be allocated, null is returned.
1979  */
1980 void *
pci_pool_alloc(struct pci_pool * pool,int mem_flags,dma_addr_t * handle)1981 pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
1982 {
1983 	unsigned long		flags;
1984 	struct list_head	*entry;
1985 	struct pci_page		*page;
1986 	int			map, block;
1987 	size_t			offset;
1988 	void			*retval;
1989 
1990 restart:
1991 	spin_lock_irqsave (&pool->lock, flags);
1992 	list_for_each (entry, &pool->page_list) {
1993 		int		i;
1994 		page = list_entry (entry, struct pci_page, page_list);
1995 		/* only cachable accesses here ... */
1996 		for (map = 0, i = 0;
1997 				i < pool->blocks_per_page;
1998 				i += BITS_PER_LONG, map++) {
1999 			if (page->bitmap [map] == 0)
2000 				continue;
2001 			block = ffz (~ page->bitmap [map]);
2002 			if ((i + block) < pool->blocks_per_page) {
2003 				clear_bit (block, &page->bitmap [map]);
2004 				offset = (BITS_PER_LONG * map) + block;
2005 				offset *= pool->size;
2006 				goto ready;
2007 			}
2008 		}
2009 	}
2010 	if (!(page = pool_alloc_page (pool, mem_flags))) {
2011 		if (mem_flags == SLAB_KERNEL) {
2012 			DECLARE_WAITQUEUE (wait, current);
2013 
2014 			current->state = TASK_INTERRUPTIBLE;
2015 			add_wait_queue (&pool->waitq, &wait);
2016 			spin_unlock_irqrestore (&pool->lock, flags);
2017 
2018 			schedule_timeout (POOL_TIMEOUT_JIFFIES);
2019 
2020 			current->state = TASK_RUNNING;
2021 			remove_wait_queue (&pool->waitq, &wait);
2022 			goto restart;
2023 		}
2024 		retval = 0;
2025 		goto done;
2026 	}
2027 
2028 	clear_bit (0, &page->bitmap [0]);
2029 	offset = 0;
2030 ready:
2031 	retval = offset + page->vaddr;
2032 	*handle = offset + page->dma;
2033 done:
2034 	spin_unlock_irqrestore (&pool->lock, flags);
2035 	return retval;
2036 }
2037 
2038 
2039 static struct pci_page *
pool_find_page(struct pci_pool * pool,dma_addr_t dma)2040 pool_find_page (struct pci_pool *pool, dma_addr_t dma)
2041 {
2042 	unsigned long		flags;
2043 	struct list_head	*entry;
2044 	struct pci_page		*page;
2045 
2046 	spin_lock_irqsave (&pool->lock, flags);
2047 	list_for_each (entry, &pool->page_list) {
2048 		page = list_entry (entry, struct pci_page, page_list);
2049 		if (dma < page->dma)
2050 			continue;
2051 		if (dma < (page->dma + pool->allocation))
2052 			goto done;
2053 	}
2054 	page = 0;
2055 done:
2056 	spin_unlock_irqrestore (&pool->lock, flags);
2057 	return page;
2058 }
2059 
2060 
2061 /**
2062  * pci_pool_free - put block back into pci pool
2063  * @pool: the pci pool holding the block
2064  * @vaddr: virtual address of block
2065  * @dma: dma address of block
2066  *
2067  * Caller promises neither device nor driver will again touch this block
2068  * unless it is first re-allocated.
2069  */
2070 void
pci_pool_free(struct pci_pool * pool,void * vaddr,dma_addr_t dma)2071 pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
2072 {
2073 	struct pci_page		*page;
2074 	unsigned long		flags;
2075 	int			map, block;
2076 
2077 	if ((page = pool_find_page (pool, dma)) == 0) {
2078 		printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n",
2079 			pool->dev ? pool->dev->slot_name : NULL,
2080 			pool->name, vaddr, (int) (dma & 0xffffffff));
2081 		return;
2082 	}
2083 #ifdef	CONFIG_PCIPOOL_DEBUG
2084 	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
2085 		printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
2086 			pool->dev ? pool->dev->slot_name : NULL,
2087 			pool->name, vaddr, (int) (dma & 0xffffffff));
2088 		return;
2089 	}
2090 #endif
2091 
2092 	block = dma - page->dma;
2093 	block /= pool->size;
2094 	map = block / BITS_PER_LONG;
2095 	block %= BITS_PER_LONG;
2096 
2097 #ifdef	CONFIG_PCIPOOL_DEBUG
2098 	if (page->bitmap [map] & (1UL << block)) {
2099 		printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
2100 			pool->dev ? pool->dev->slot_name : NULL,
2101 			pool->name, dma);
2102 		return;
2103 	}
2104 #endif
2105 	if (pool->flags & SLAB_POISON)
2106 		memset (vaddr, POOL_POISON_BYTE, pool->size);
2107 
2108 	spin_lock_irqsave (&pool->lock, flags);
2109 	set_bit (block, &page->bitmap [map]);
2110 	if (waitqueue_active (&pool->waitq))
2111 		wake_up (&pool->waitq);
2112 	/*
2113 	 * Resist a temptation to do
2114 	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
2115 	 * it is not interrupt safe. Better have empty pages hang around.
2116 	 */
2117 	spin_unlock_irqrestore (&pool->lock, flags);
2118 }
2119 
2120 
pci_init(void)2121 void __devinit  pci_init(void)
2122 {
2123 	struct pci_dev *dev;
2124 
2125 	pcibios_init();
2126 
2127 	pci_for_each_dev(dev) {
2128 		pci_fixup_device(PCI_FIXUP_FINAL, dev);
2129 	}
2130 
2131 #ifdef CONFIG_PM
2132 	pm_register(PM_PCI_DEV, 0, pci_pm_callback);
2133 #endif
2134 }
2135 
pci_setup(char * str)2136 static int __devinit pci_setup(char *str)
2137 {
2138 	while (str) {
2139 		char *k = strchr(str, ',');
2140 		if (k)
2141 			*k++ = 0;
2142 		if (*str && (str = pcibios_setup(str)) && *str) {
2143 			/* PCI layer options should be handled here */
2144 			printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
2145 		}
2146 		str = k;
2147 	}
2148 	return 1;
2149 }
2150 
2151 __setup("pci=", pci_setup);
2152 
2153 EXPORT_SYMBOL(pci_read_config_byte);
2154 EXPORT_SYMBOL(pci_read_config_word);
2155 EXPORT_SYMBOL(pci_read_config_dword);
2156 EXPORT_SYMBOL(pci_write_config_byte);
2157 EXPORT_SYMBOL(pci_write_config_word);
2158 EXPORT_SYMBOL(pci_write_config_dword);
2159 EXPORT_SYMBOL(pci_devices);
2160 EXPORT_SYMBOL(pci_root_buses);
2161 EXPORT_SYMBOL(pci_enable_device_bars);
2162 EXPORT_SYMBOL(pci_enable_device);
2163 EXPORT_SYMBOL(pci_disable_device);
2164 EXPORT_SYMBOL(pci_find_capability);
2165 EXPORT_SYMBOL(pci_release_regions);
2166 EXPORT_SYMBOL(pci_request_regions);
2167 EXPORT_SYMBOL(pci_release_region);
2168 EXPORT_SYMBOL(pci_request_region);
2169 EXPORT_SYMBOL(pci_find_class);
2170 EXPORT_SYMBOL(pci_find_device);
2171 EXPORT_SYMBOL(pci_find_slot);
2172 EXPORT_SYMBOL(pci_find_subsys);
2173 EXPORT_SYMBOL(pci_set_master);
2174 EXPORT_SYMBOL(pci_set_mwi);
2175 EXPORT_SYMBOL(pci_clear_mwi);
2176 EXPORT_SYMBOL(pci_set_dma_mask);
2177 EXPORT_SYMBOL(pci_dac_set_dma_mask);
2178 EXPORT_SYMBOL(pci_assign_resource);
2179 EXPORT_SYMBOL(pci_register_driver);
2180 EXPORT_SYMBOL(pci_unregister_driver);
2181 EXPORT_SYMBOL(pci_dev_driver);
2182 EXPORT_SYMBOL(pci_match_device);
2183 EXPORT_SYMBOL(pci_find_parent_resource);
2184 
2185 #ifdef CONFIG_HOTPLUG
2186 EXPORT_SYMBOL(pci_setup_device);
2187 EXPORT_SYMBOL(pci_insert_device);
2188 EXPORT_SYMBOL(pci_remove_device);
2189 EXPORT_SYMBOL(pci_announce_device_to_drivers);
2190 EXPORT_SYMBOL(pci_add_new_bus);
2191 EXPORT_SYMBOL(pci_do_scan_bus);
2192 EXPORT_SYMBOL(pci_scan_slot);
2193 EXPORT_SYMBOL(pci_scan_bus);
2194 EXPORT_SYMBOL(pci_scan_device);
2195 EXPORT_SYMBOL(pci_read_bridge_bases);
2196 #ifdef CONFIG_PROC_FS
2197 EXPORT_SYMBOL(pci_proc_attach_device);
2198 EXPORT_SYMBOL(pci_proc_detach_device);
2199 EXPORT_SYMBOL(pci_proc_attach_bus);
2200 EXPORT_SYMBOL(pci_proc_detach_bus);
2201 EXPORT_SYMBOL(proc_bus_pci_dir);
2202 #endif
2203 #endif
2204 
2205 EXPORT_SYMBOL(pci_set_power_state);
2206 EXPORT_SYMBOL(pci_save_state);
2207 EXPORT_SYMBOL(pci_restore_state);
2208 EXPORT_SYMBOL(pci_enable_wake);
2209 
2210 /* Obsolete functions */
2211 
2212 EXPORT_SYMBOL(pcibios_present);
2213 EXPORT_SYMBOL(pcibios_read_config_byte);
2214 EXPORT_SYMBOL(pcibios_read_config_word);
2215 EXPORT_SYMBOL(pcibios_read_config_dword);
2216 EXPORT_SYMBOL(pcibios_write_config_byte);
2217 EXPORT_SYMBOL(pcibios_write_config_word);
2218 EXPORT_SYMBOL(pcibios_write_config_dword);
2219 EXPORT_SYMBOL(pcibios_find_class);
2220 EXPORT_SYMBOL(pcibios_find_device);
2221 
2222 /* Quirk info */
2223 
2224 EXPORT_SYMBOL(isa_dma_bridge_buggy);
2225 EXPORT_SYMBOL(pci_pci_problems);
2226 
2227 /* Pool allocator */
2228 
2229 EXPORT_SYMBOL (pci_pool_create);
2230 EXPORT_SYMBOL (pci_pool_destroy);
2231 EXPORT_SYMBOL (pci_pool_alloc);
2232 EXPORT_SYMBOL (pci_pool_free);
2233 
2234