1 /*
2  * Copyright IBM Corporation 2001, 2005, 2006
3  * Copyright Dave Engebretsen & Todd Inglett 2001
4  * Copyright Linas Vepstas 2005, 2006
5  * Copyright 2001-2012 IBM Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20  *
21  * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <linux/list.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rbtree.h>
31 #include <linux/seq_file.h>
32 #include <linux/spinlock.h>
33 #include <linux/export.h>
34 #include <linux/of.h>
35 
36 #include <linux/atomic.h>
37 #include <asm/eeh.h>
38 #include <asm/eeh_event.h>
39 #include <asm/io.h>
40 #include <asm/machdep.h>
41 #include <asm/ppc-pci.h>
42 #include <asm/rtas.h>
43 
44 
45 /** Overview:
46  *  EEH, or "Extended Error Handling" is a PCI bridge technology for
47  *  dealing with PCI bus errors that can't be dealt with within the
48  *  usual PCI framework, except by check-stopping the CPU.  Systems
49  *  that are designed for high-availability/reliability cannot afford
50  *  to crash due to a "mere" PCI error, thus the need for EEH.
51  *  An EEH-capable bridge operates by converting a detected error
52  *  into a "slot freeze", taking the PCI adapter off-line, making
53  *  the slot behave, from the OS'es point of view, as if the slot
54  *  were "empty": all reads return 0xff's and all writes are silently
55  *  ignored.  EEH slot isolation events can be triggered by parity
56  *  errors on the address or data busses (e.g. during posted writes),
57  *  which in turn might be caused by low voltage on the bus, dust,
58  *  vibration, humidity, radioactivity or plain-old failed hardware.
59  *
60  *  Note, however, that one of the leading causes of EEH slot
61  *  freeze events are buggy device drivers, buggy device microcode,
62  *  or buggy device hardware.  This is because any attempt by the
63  *  device to bus-master data to a memory address that is not
64  *  assigned to the device will trigger a slot freeze.   (The idea
65  *  is to prevent devices-gone-wild from corrupting system memory).
66  *  Buggy hardware/drivers will have a miserable time co-existing
67  *  with EEH.
68  *
69  *  Ideally, a PCI device driver, when suspecting that an isolation
70  *  event has occurred (e.g. by reading 0xff's), will then ask EEH
71  *  whether this is the case, and then take appropriate steps to
72  *  reset the PCI slot, the PCI device, and then resume operations.
73  *  However, until that day,  the checking is done here, with the
74  *  eeh_check_failure() routine embedded in the MMIO macros.  If
75  *  the slot is found to be isolated, an "EEH Event" is synthesized
76  *  and sent out for processing.
77  */
78 
79 /* If a device driver keeps reading an MMIO register in an interrupt
80  * handler after a slot isolation event, it might be broken.
81  * This sets the threshold for how many read attempts we allow
82  * before printing an error message.
83  */
84 #define EEH_MAX_FAILS	2100000
85 
86 /* Time to wait for a PCI slot to report status, in milliseconds */
87 #define PCI_BUS_RESET_WAIT_MSEC (60*1000)
88 
89 /* Platform dependent EEH operations */
90 struct eeh_ops *eeh_ops = NULL;
91 
92 int eeh_subsystem_enabled;
93 EXPORT_SYMBOL(eeh_subsystem_enabled);
94 
95 /* Lock to avoid races due to multiple reports of an error */
96 static DEFINE_RAW_SPINLOCK(confirm_error_lock);
97 
98 /* Buffer for reporting pci register dumps. Its here in BSS, and
99  * not dynamically alloced, so that it ends up in RMO where RTAS
100  * can access it.
101  */
102 #define EEH_PCI_REGS_LOG_LEN 4096
103 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
104 
105 /*
106  * The struct is used to maintain the EEH global statistic
107  * information. Besides, the EEH global statistics will be
108  * exported to user space through procfs
109  */
110 struct eeh_stats {
111 	u64 no_device;		/* PCI device not found		*/
112 	u64 no_dn;		/* OF node not found		*/
113 	u64 no_cfg_addr;	/* Config address not found	*/
114 	u64 ignored_check;	/* EEH check skipped		*/
115 	u64 total_mmio_ffs;	/* Total EEH checks		*/
116 	u64 false_positives;	/* Unnecessary EEH checks	*/
117 	u64 slot_resets;	/* PE reset			*/
118 };
119 
120 static struct eeh_stats eeh_stats;
121 
122 #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
123 
124 /**
125  * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
126  * @edev: device to report data for
127  * @buf: point to buffer in which to log
128  * @len: amount of room in buffer
129  *
130  * This routine captures assorted PCI configuration space data,
131  * and puts them into a buffer for RTAS error logging.
132  */
eeh_gather_pci_data(struct eeh_dev * edev,char * buf,size_t len)133 static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
134 {
135 	struct device_node *dn = eeh_dev_to_of_node(edev);
136 	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
137 	u32 cfg;
138 	int cap, i;
139 	int n = 0;
140 
141 	n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
142 	printk(KERN_WARNING "EEH: of node=%s\n", dn->full_name);
143 
144 	eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
145 	n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
146 	printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
147 
148 	eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
149 	n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
150 	printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
151 
152 	if (!dev) {
153 		printk(KERN_WARNING "EEH: no PCI device for this of node\n");
154 		return n;
155 	}
156 
157 	/* Gather bridge-specific registers */
158 	if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
159 		eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
160 		n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
161 		printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
162 
163 		eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
164 		n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
165 		printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
166 	}
167 
168 	/* Dump out the PCI-X command and status regs */
169 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
170 	if (cap) {
171 		eeh_ops->read_config(dn, cap, 4, &cfg);
172 		n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
173 		printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
174 
175 		eeh_ops->read_config(dn, cap+4, 4, &cfg);
176 		n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
177 		printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
178 	}
179 
180 	/* If PCI-E capable, dump PCI-E cap 10, and the AER */
181 	cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
182 	if (cap) {
183 		n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
184 		printk(KERN_WARNING
185 		       "EEH: PCI-E capabilities and status follow:\n");
186 
187 		for (i=0; i<=8; i++) {
188 			eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
189 			n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
190 			printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
191 		}
192 
193 		cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
194 		if (cap) {
195 			n += scnprintf(buf+n, len-n, "pci-e AER:\n");
196 			printk(KERN_WARNING
197 			       "EEH: PCI-E AER capability register set follows:\n");
198 
199 			for (i=0; i<14; i++) {
200 				eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
201 				n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
202 				printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
203 			}
204 		}
205 	}
206 
207 	/* Gather status on devices under the bridge */
208 	if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
209 		struct device_node *child;
210 
211 		for_each_child_of_node(dn, child) {
212 			if (of_node_to_eeh_dev(child))
213 				n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
214 		}
215 	}
216 
217 	return n;
218 }
219 
220 /**
221  * eeh_slot_error_detail - Generate combined log including driver log and error log
222  * @edev: device to report error log for
223  * @severity: temporary or permanent error log
224  *
225  * This routine should be called to generate the combined log, which
226  * is comprised of driver log and error log. The driver log is figured
227  * out from the config space of the corresponding PCI device, while
228  * the error log is fetched through platform dependent function call.
229  */
eeh_slot_error_detail(struct eeh_dev * edev,int severity)230 void eeh_slot_error_detail(struct eeh_dev *edev, int severity)
231 {
232 	size_t loglen = 0;
233 	pci_regs_buf[0] = 0;
234 
235 	eeh_pci_enable(edev, EEH_OPT_THAW_MMIO);
236 	eeh_ops->configure_bridge(eeh_dev_to_of_node(edev));
237 	eeh_restore_bars(edev);
238 	loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
239 
240 	eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen);
241 }
242 
243 /**
244  * eeh_token_to_phys - Convert EEH address token to phys address
245  * @token: I/O token, should be address in the form 0xA....
246  *
247  * This routine should be called to convert virtual I/O address
248  * to physical one.
249  */
eeh_token_to_phys(unsigned long token)250 static inline unsigned long eeh_token_to_phys(unsigned long token)
251 {
252 	pte_t *ptep;
253 	unsigned long pa;
254 
255 	ptep = find_linux_pte(init_mm.pgd, token);
256 	if (!ptep)
257 		return token;
258 	pa = pte_pfn(*ptep) << PAGE_SHIFT;
259 
260 	return pa | (token & (PAGE_SIZE-1));
261 }
262 
263 /**
264  * eeh_find_device_pe - Retrieve the PE for the given device
265  * @dn: device node
266  *
267  * Return the PE under which this device lies
268  */
eeh_find_device_pe(struct device_node * dn)269 struct device_node *eeh_find_device_pe(struct device_node *dn)
270 {
271 	while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
272 	       (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
273 		dn = dn->parent;
274 	}
275 	return dn;
276 }
277 
278 /**
279  * __eeh_mark_slot - Mark all child devices as failed
280  * @parent: parent device
281  * @mode_flag: failure flag
282  *
283  * Mark all devices that are children of this device as failed.
284  * Mark the device driver too, so that it can see the failure
285  * immediately; this is critical, since some drivers poll
286  * status registers in interrupts ... If a driver is polling,
287  * and the slot is frozen, then the driver can deadlock in
288  * an interrupt context, which is bad.
289  */
__eeh_mark_slot(struct device_node * parent,int mode_flag)290 static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
291 {
292 	struct device_node *dn;
293 
294 	for_each_child_of_node(parent, dn) {
295 		if (of_node_to_eeh_dev(dn)) {
296 			/* Mark the pci device driver too */
297 			struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
298 
299 			of_node_to_eeh_dev(dn)->mode |= mode_flag;
300 
301 			if (dev && dev->driver)
302 				dev->error_state = pci_channel_io_frozen;
303 
304 			__eeh_mark_slot(dn, mode_flag);
305 		}
306 	}
307 }
308 
309 /**
310  * eeh_mark_slot - Mark the indicated device and its children as failed
311  * @dn: parent device
312  * @mode_flag: failure flag
313  *
314  * Mark the indicated device and its child devices as failed.
315  * The device drivers are marked as failed as well.
316  */
eeh_mark_slot(struct device_node * dn,int mode_flag)317 void eeh_mark_slot(struct device_node *dn, int mode_flag)
318 {
319 	struct pci_dev *dev;
320 	dn = eeh_find_device_pe(dn);
321 
322 	/* Back up one, since config addrs might be shared */
323 	if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
324 		dn = dn->parent;
325 
326 	of_node_to_eeh_dev(dn)->mode |= mode_flag;
327 
328 	/* Mark the pci device too */
329 	dev = of_node_to_eeh_dev(dn)->pdev;
330 	if (dev)
331 		dev->error_state = pci_channel_io_frozen;
332 
333 	__eeh_mark_slot(dn, mode_flag);
334 }
335 
336 /**
337  * __eeh_clear_slot - Clear failure flag for the child devices
338  * @parent: parent device
339  * @mode_flag: flag to be cleared
340  *
341  * Clear failure flag for the child devices.
342  */
__eeh_clear_slot(struct device_node * parent,int mode_flag)343 static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
344 {
345 	struct device_node *dn;
346 
347 	for_each_child_of_node(parent, dn) {
348 		if (of_node_to_eeh_dev(dn)) {
349 			of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
350 			of_node_to_eeh_dev(dn)->check_count = 0;
351 			__eeh_clear_slot(dn, mode_flag);
352 		}
353 	}
354 }
355 
356 /**
357  * eeh_clear_slot - Clear failure flag for the indicated device and its children
358  * @dn: parent device
359  * @mode_flag: flag to be cleared
360  *
361  * Clear failure flag for the indicated device and its children.
362  */
eeh_clear_slot(struct device_node * dn,int mode_flag)363 void eeh_clear_slot(struct device_node *dn, int mode_flag)
364 {
365 	unsigned long flags;
366 	raw_spin_lock_irqsave(&confirm_error_lock, flags);
367 
368 	dn = eeh_find_device_pe(dn);
369 
370 	/* Back up one, since config addrs might be shared */
371 	if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
372 		dn = dn->parent;
373 
374 	of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
375 	of_node_to_eeh_dev(dn)->check_count = 0;
376 	__eeh_clear_slot(dn, mode_flag);
377 	raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
378 }
379 
380 /**
381  * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
382  * @dn: device node
383  * @dev: pci device, if known
384  *
385  * Check for an EEH failure for the given device node.  Call this
386  * routine if the result of a read was all 0xff's and you want to
387  * find out if this is due to an EEH slot freeze.  This routine
388  * will query firmware for the EEH status.
389  *
390  * Returns 0 if there has not been an EEH error; otherwise returns
391  * a non-zero value and queues up a slot isolation event notification.
392  *
393  * It is safe to call this routine in an interrupt context.
394  */
eeh_dn_check_failure(struct device_node * dn,struct pci_dev * dev)395 int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
396 {
397 	int ret;
398 	unsigned long flags;
399 	struct eeh_dev *edev;
400 	int rc = 0;
401 	const char *location;
402 
403 	eeh_stats.total_mmio_ffs++;
404 
405 	if (!eeh_subsystem_enabled)
406 		return 0;
407 
408 	if (!dn) {
409 		eeh_stats.no_dn++;
410 		return 0;
411 	}
412 	dn = eeh_find_device_pe(dn);
413 	edev = of_node_to_eeh_dev(dn);
414 
415 	/* Access to IO BARs might get this far and still not want checking. */
416 	if (!(edev->mode & EEH_MODE_SUPPORTED) ||
417 	    edev->mode & EEH_MODE_NOCHECK) {
418 		eeh_stats.ignored_check++;
419 		pr_debug("EEH: Ignored check (%x) for %s %s\n",
420 			edev->mode, eeh_pci_name(dev), dn->full_name);
421 		return 0;
422 	}
423 
424 	if (!edev->config_addr && !edev->pe_config_addr) {
425 		eeh_stats.no_cfg_addr++;
426 		return 0;
427 	}
428 
429 	/* If we already have a pending isolation event for this
430 	 * slot, we know it's bad already, we don't need to check.
431 	 * Do this checking under a lock; as multiple PCI devices
432 	 * in one slot might report errors simultaneously, and we
433 	 * only want one error recovery routine running.
434 	 */
435 	raw_spin_lock_irqsave(&confirm_error_lock, flags);
436 	rc = 1;
437 	if (edev->mode & EEH_MODE_ISOLATED) {
438 		edev->check_count++;
439 		if (edev->check_count % EEH_MAX_FAILS == 0) {
440 			location = of_get_property(dn, "ibm,loc-code", NULL);
441 			printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
442 				"location=%s driver=%s pci addr=%s\n",
443 				edev->check_count, location,
444 				eeh_driver_name(dev), eeh_pci_name(dev));
445 			printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
446 				eeh_driver_name(dev));
447 			dump_stack();
448 		}
449 		goto dn_unlock;
450 	}
451 
452 	/*
453 	 * Now test for an EEH failure.  This is VERY expensive.
454 	 * Note that the eeh_config_addr may be a parent device
455 	 * in the case of a device behind a bridge, or it may be
456 	 * function zero of a multi-function device.
457 	 * In any case they must share a common PHB.
458 	 */
459 	ret = eeh_ops->get_state(dn, NULL);
460 
461 	/* Note that config-io to empty slots may fail;
462 	 * they are empty when they don't have children.
463 	 * We will punt with the following conditions: Failure to get
464 	 * PE's state, EEH not support and Permanently unavailable
465 	 * state, PE is in good state.
466 	 */
467 	if ((ret < 0) ||
468 	    (ret == EEH_STATE_NOT_SUPPORT) ||
469 	    (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
470 	    (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
471 		eeh_stats.false_positives++;
472 		edev->false_positives ++;
473 		rc = 0;
474 		goto dn_unlock;
475 	}
476 
477 	eeh_stats.slot_resets++;
478 
479 	/* Avoid repeated reports of this failure, including problems
480 	 * with other functions on this device, and functions under
481 	 * bridges.
482 	 */
483 	eeh_mark_slot(dn, EEH_MODE_ISOLATED);
484 	raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
485 
486 	eeh_send_failure_event(edev);
487 
488 	/* Most EEH events are due to device driver bugs.  Having
489 	 * a stack trace will help the device-driver authors figure
490 	 * out what happened.  So print that out.
491 	 */
492 	dump_stack();
493 	return 1;
494 
495 dn_unlock:
496 	raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
497 	return rc;
498 }
499 
500 EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
501 
502 /**
503  * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
504  * @token: I/O token, should be address in the form 0xA....
505  * @val: value, should be all 1's (XXX why do we need this arg??)
506  *
507  * Check for an EEH failure at the given token address.  Call this
508  * routine if the result of a read was all 0xff's and you want to
509  * find out if this is due to an EEH slot freeze event.  This routine
510  * will query firmware for the EEH status.
511  *
512  * Note this routine is safe to call in an interrupt context.
513  */
eeh_check_failure(const volatile void __iomem * token,unsigned long val)514 unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
515 {
516 	unsigned long addr;
517 	struct pci_dev *dev;
518 	struct device_node *dn;
519 
520 	/* Finding the phys addr + pci device; this is pretty quick. */
521 	addr = eeh_token_to_phys((unsigned long __force) token);
522 	dev = pci_addr_cache_get_device(addr);
523 	if (!dev) {
524 		eeh_stats.no_device++;
525 		return val;
526 	}
527 
528 	dn = pci_device_to_OF_node(dev);
529 	eeh_dn_check_failure(dn, dev);
530 
531 	pci_dev_put(dev);
532 	return val;
533 }
534 
535 EXPORT_SYMBOL(eeh_check_failure);
536 
537 
538 /**
539  * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
540  * @edev: pci device node
541  *
542  * This routine should be called to reenable frozen MMIO or DMA
543  * so that it would work correctly again. It's useful while doing
544  * recovery or log collection on the indicated device.
545  */
eeh_pci_enable(struct eeh_dev * edev,int function)546 int eeh_pci_enable(struct eeh_dev *edev, int function)
547 {
548 	int rc;
549 	struct device_node *dn = eeh_dev_to_of_node(edev);
550 
551 	rc = eeh_ops->set_option(dn, function);
552 	if (rc)
553 		printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
554 		        function, rc, dn->full_name);
555 
556 	rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
557 	if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
558 	   (function == EEH_OPT_THAW_MMIO))
559 		return 0;
560 
561 	return rc;
562 }
563 
564 /**
565  * pcibios_set_pcie_slot_reset - Set PCI-E reset state
566  * @dev: pci device struct
567  * @state: reset state to enter
568  *
569  * Return value:
570  * 	0 if success
571  */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)572 int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
573 {
574 	struct device_node *dn = pci_device_to_OF_node(dev);
575 
576 	switch (state) {
577 	case pcie_deassert_reset:
578 		eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
579 		break;
580 	case pcie_hot_reset:
581 		eeh_ops->reset(dn, EEH_RESET_HOT);
582 		break;
583 	case pcie_warm_reset:
584 		eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
585 		break;
586 	default:
587 		return -EINVAL;
588 	};
589 
590 	return 0;
591 }
592 
593 /**
594  * __eeh_set_pe_freset - Check the required reset for child devices
595  * @parent: parent device
596  * @freset: return value
597  *
598  * Each device might have its preferred reset type: fundamental or
599  * hot reset. The routine is used to collect the information from
600  * the child devices so that they could be reset accordingly.
601  */
__eeh_set_pe_freset(struct device_node * parent,unsigned int * freset)602 void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
603 {
604 	struct device_node *dn;
605 
606 	for_each_child_of_node(parent, dn) {
607 		if (of_node_to_eeh_dev(dn)) {
608 			struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
609 
610 			if (dev && dev->driver)
611 				*freset |= dev->needs_freset;
612 
613 			__eeh_set_pe_freset(dn, freset);
614 		}
615 	}
616 }
617 
618 /**
619  * eeh_set_pe_freset - Check the required reset for the indicated device and its children
620  * @dn: parent device
621  * @freset: return value
622  *
623  * Each device might have its preferred reset type: fundamental or
624  * hot reset. The routine is used to collected the information for
625  * the indicated device and its children so that the bunch of the
626  * devices could be reset properly.
627  */
eeh_set_pe_freset(struct device_node * dn,unsigned int * freset)628 void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
629 {
630 	struct pci_dev *dev;
631 	dn = eeh_find_device_pe(dn);
632 
633 	/* Back up one, since config addrs might be shared */
634 	if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
635 		dn = dn->parent;
636 
637 	dev = of_node_to_eeh_dev(dn)->pdev;
638 	if (dev)
639 		*freset |= dev->needs_freset;
640 
641 	__eeh_set_pe_freset(dn, freset);
642 }
643 
644 /**
645  * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
646  * @edev: pci device node to be reset.
647  *
648  * Assert the PCI #RST line for 1/4 second.
649  */
eeh_reset_pe_once(struct eeh_dev * edev)650 static void eeh_reset_pe_once(struct eeh_dev *edev)
651 {
652 	unsigned int freset = 0;
653 	struct device_node *dn = eeh_dev_to_of_node(edev);
654 
655 	/* Determine type of EEH reset required for
656 	 * Partitionable Endpoint, a hot-reset (1)
657 	 * or a fundamental reset (3).
658 	 * A fundamental reset required by any device under
659 	 * Partitionable Endpoint trumps hot-reset.
660   	 */
661 	eeh_set_pe_freset(dn, &freset);
662 
663 	if (freset)
664 		eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
665 	else
666 		eeh_ops->reset(dn, EEH_RESET_HOT);
667 
668 	/* The PCI bus requires that the reset be held high for at least
669 	 * a 100 milliseconds. We wait a bit longer 'just in case'.
670 	 */
671 #define PCI_BUS_RST_HOLD_TIME_MSEC 250
672 	msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
673 
674 	/* We might get hit with another EEH freeze as soon as the
675 	 * pci slot reset line is dropped. Make sure we don't miss
676 	 * these, and clear the flag now.
677 	 */
678 	eeh_clear_slot(dn, EEH_MODE_ISOLATED);
679 
680 	eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
681 
682 	/* After a PCI slot has been reset, the PCI Express spec requires
683 	 * a 1.5 second idle time for the bus to stabilize, before starting
684 	 * up traffic.
685 	 */
686 #define PCI_BUS_SETTLE_TIME_MSEC 1800
687 	msleep(PCI_BUS_SETTLE_TIME_MSEC);
688 }
689 
690 /**
691  * eeh_reset_pe - Reset the indicated PE
692  * @edev: PCI device associated EEH device
693  *
694  * This routine should be called to reset indicated device, including
695  * PE. A PE might include multiple PCI devices and sometimes PCI bridges
696  * might be involved as well.
697  */
eeh_reset_pe(struct eeh_dev * edev)698 int eeh_reset_pe(struct eeh_dev *edev)
699 {
700 	int i, rc;
701 	struct device_node *dn = eeh_dev_to_of_node(edev);
702 
703 	/* Take three shots at resetting the bus */
704 	for (i=0; i<3; i++) {
705 		eeh_reset_pe_once(edev);
706 
707 		rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
708 		if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
709 			return 0;
710 
711 		if (rc < 0) {
712 			printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
713 			       dn->full_name);
714 			return -1;
715 		}
716 		printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
717 		       i+1, dn->full_name, rc);
718 	}
719 
720 	return -1;
721 }
722 
723 /** Save and restore of PCI BARs
724  *
725  * Although firmware will set up BARs during boot, it doesn't
726  * set up device BAR's after a device reset, although it will,
727  * if requested, set up bridge configuration. Thus, we need to
728  * configure the PCI devices ourselves.
729  */
730 
731 /**
732  * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
733  * @edev: PCI device associated EEH device
734  *
735  * Loads the PCI configuration space base address registers,
736  * the expansion ROM base address, the latency timer, and etc.
737  * from the saved values in the device node.
738  */
eeh_restore_one_device_bars(struct eeh_dev * edev)739 static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
740 {
741 	int i;
742 	u32 cmd;
743 	struct device_node *dn = eeh_dev_to_of_node(edev);
744 
745 	if (!edev->phb)
746 		return;
747 
748 	for (i=4; i<10; i++) {
749 		eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
750 	}
751 
752 	/* 12 == Expansion ROM Address */
753 	eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
754 
755 #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
756 #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
757 
758 	eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
759 	            SAVED_BYTE(PCI_CACHE_LINE_SIZE));
760 
761 	eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
762 	            SAVED_BYTE(PCI_LATENCY_TIMER));
763 
764 	/* max latency, min grant, interrupt pin and line */
765 	eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
766 
767 	/* Restore PERR & SERR bits, some devices require it,
768 	 * don't touch the other command bits
769 	 */
770 	eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
771 	if (edev->config_space[1] & PCI_COMMAND_PARITY)
772 		cmd |= PCI_COMMAND_PARITY;
773 	else
774 		cmd &= ~PCI_COMMAND_PARITY;
775 	if (edev->config_space[1] & PCI_COMMAND_SERR)
776 		cmd |= PCI_COMMAND_SERR;
777 	else
778 		cmd &= ~PCI_COMMAND_SERR;
779 	eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
780 }
781 
782 /**
783  * eeh_restore_bars - Restore the PCI config space info
784  * @edev: EEH device
785  *
786  * This routine performs a recursive walk to the children
787  * of this device as well.
788  */
eeh_restore_bars(struct eeh_dev * edev)789 void eeh_restore_bars(struct eeh_dev *edev)
790 {
791 	struct device_node *dn;
792 	if (!edev)
793 		return;
794 
795 	if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
796 		eeh_restore_one_device_bars(edev);
797 
798 	for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
799 		eeh_restore_bars(of_node_to_eeh_dev(dn));
800 }
801 
802 /**
803  * eeh_save_bars - Save device bars
804  * @edev: PCI device associated EEH device
805  *
806  * Save the values of the device bars. Unlike the restore
807  * routine, this routine is *not* recursive. This is because
808  * PCI devices are added individually; but, for the restore,
809  * an entire slot is reset at a time.
810  */
eeh_save_bars(struct eeh_dev * edev)811 static void eeh_save_bars(struct eeh_dev *edev)
812 {
813 	int i;
814 	struct device_node *dn;
815 
816 	if (!edev)
817 		return;
818 	dn = eeh_dev_to_of_node(edev);
819 
820 	for (i = 0; i < 16; i++)
821 		eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
822 }
823 
824 /**
825  * eeh_early_enable - Early enable EEH on the indicated device
826  * @dn: device node
827  * @data: BUID
828  *
829  * Enable EEH functionality on the specified PCI device. The function
830  * is expected to be called before real PCI probing is done. However,
831  * the PHBs have been initialized at this point.
832  */
eeh_early_enable(struct device_node * dn,void * data)833 static void *eeh_early_enable(struct device_node *dn, void *data)
834 {
835 	int ret;
836 	const u32 *class_code = of_get_property(dn, "class-code", NULL);
837 	const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
838 	const u32 *device_id = of_get_property(dn, "device-id", NULL);
839 	const u32 *regs;
840 	int enable;
841 	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
842 
843 	edev->class_code = 0;
844 	edev->mode = 0;
845 	edev->check_count = 0;
846 	edev->freeze_count = 0;
847 	edev->false_positives = 0;
848 
849 	if (!of_device_is_available(dn))
850 		return NULL;
851 
852 	/* Ignore bad nodes. */
853 	if (!class_code || !vendor_id || !device_id)
854 		return NULL;
855 
856 	/* There is nothing to check on PCI to ISA bridges */
857 	if (dn->type && !strcmp(dn->type, "isa")) {
858 		edev->mode |= EEH_MODE_NOCHECK;
859 		return NULL;
860 	}
861 	edev->class_code = *class_code;
862 
863 	/* Ok... see if this device supports EEH.  Some do, some don't,
864 	 * and the only way to find out is to check each and every one.
865 	 */
866 	regs = of_get_property(dn, "reg", NULL);
867 	if (regs) {
868 		/* First register entry is addr (00BBSS00)  */
869 		/* Try to enable eeh */
870 		ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
871 
872 		enable = 0;
873 		if (ret == 0) {
874 			edev->config_addr = regs[0];
875 
876 			/* If the newer, better, ibm,get-config-addr-info is supported,
877 			 * then use that instead.
878 			 */
879 			edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
880 
881 			/* Some older systems (Power4) allow the
882 			 * ibm,set-eeh-option call to succeed even on nodes
883 			 * where EEH is not supported. Verify support
884 			 * explicitly.
885 			 */
886 			ret = eeh_ops->get_state(dn, NULL);
887 			if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
888 				enable = 1;
889 		}
890 
891 		if (enable) {
892 			eeh_subsystem_enabled = 1;
893 			edev->mode |= EEH_MODE_SUPPORTED;
894 
895 			pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
896 				 dn->full_name, edev->config_addr,
897 				 edev->pe_config_addr);
898 		} else {
899 
900 			/* This device doesn't support EEH, but it may have an
901 			 * EEH parent, in which case we mark it as supported.
902 			 */
903 			if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
904 			    (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
905 				/* Parent supports EEH. */
906 				edev->mode |= EEH_MODE_SUPPORTED;
907 				edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
908 				return NULL;
909 			}
910 		}
911 	} else {
912 		printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
913 		       dn->full_name);
914 	}
915 
916 	eeh_save_bars(edev);
917 	return NULL;
918 }
919 
920 /**
921  * eeh_ops_register - Register platform dependent EEH operations
922  * @ops: platform dependent EEH operations
923  *
924  * Register the platform dependent EEH operation callback
925  * functions. The platform should call this function before
926  * any other EEH operations.
927  */
eeh_ops_register(struct eeh_ops * ops)928 int __init eeh_ops_register(struct eeh_ops *ops)
929 {
930 	if (!ops->name) {
931 		pr_warning("%s: Invalid EEH ops name for %p\n",
932 			__func__, ops);
933 		return -EINVAL;
934 	}
935 
936 	if (eeh_ops && eeh_ops != ops) {
937 		pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
938 			__func__, eeh_ops->name, ops->name);
939 		return -EEXIST;
940 	}
941 
942 	eeh_ops = ops;
943 
944 	return 0;
945 }
946 
947 /**
948  * eeh_ops_unregister - Unreigster platform dependent EEH operations
949  * @name: name of EEH platform operations
950  *
951  * Unregister the platform dependent EEH operation callback
952  * functions.
953  */
eeh_ops_unregister(const char * name)954 int __exit eeh_ops_unregister(const char *name)
955 {
956 	if (!name || !strlen(name)) {
957 		pr_warning("%s: Invalid EEH ops name\n",
958 			__func__);
959 		return -EINVAL;
960 	}
961 
962 	if (eeh_ops && !strcmp(eeh_ops->name, name)) {
963 		eeh_ops = NULL;
964 		return 0;
965 	}
966 
967 	return -EEXIST;
968 }
969 
970 /**
971  * eeh_init - EEH initialization
972  *
973  * Initialize EEH by trying to enable it for all of the adapters in the system.
974  * As a side effect we can determine here if eeh is supported at all.
975  * Note that we leave EEH on so failed config cycles won't cause a machine
976  * check.  If a user turns off EEH for a particular adapter they are really
977  * telling Linux to ignore errors.  Some hardware (e.g. POWER5) won't
978  * grant access to a slot if EEH isn't enabled, and so we always enable
979  * EEH for all slots/all devices.
980  *
981  * The eeh-force-off option disables EEH checking globally, for all slots.
982  * Even if force-off is set, the EEH hardware is still enabled, so that
983  * newer systems can boot.
984  */
eeh_init(void)985 void __init eeh_init(void)
986 {
987 	struct pci_controller *hose, *tmp;
988 	struct device_node *phb;
989 	int ret;
990 
991 	/* call platform initialization function */
992 	if (!eeh_ops) {
993 		pr_warning("%s: Platform EEH operation not found\n",
994 			__func__);
995 		return;
996 	} else if ((ret = eeh_ops->init())) {
997 		pr_warning("%s: Failed to call platform init function (%d)\n",
998 			__func__, ret);
999 		return;
1000 	}
1001 
1002 	raw_spin_lock_init(&confirm_error_lock);
1003 
1004 	/* Enable EEH for all adapters */
1005 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1006 		phb = hose->dn;
1007 		traverse_pci_devices(phb, eeh_early_enable, NULL);
1008 	}
1009 
1010 	if (eeh_subsystem_enabled)
1011 		printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
1012 	else
1013 		printk(KERN_WARNING "EEH: No capable adapters found\n");
1014 }
1015 
1016 /**
1017  * eeh_add_device_early - Enable EEH for the indicated device_node
1018  * @dn: device node for which to set up EEH
1019  *
1020  * This routine must be used to perform EEH initialization for PCI
1021  * devices that were added after system boot (e.g. hotplug, dlpar).
1022  * This routine must be called before any i/o is performed to the
1023  * adapter (inluding any config-space i/o).
1024  * Whether this actually enables EEH or not for this device depends
1025  * on the CEC architecture, type of the device, on earlier boot
1026  * command-line arguments & etc.
1027  */
eeh_add_device_early(struct device_node * dn)1028 static void eeh_add_device_early(struct device_node *dn)
1029 {
1030 	struct pci_controller *phb;
1031 
1032 	if (!of_node_to_eeh_dev(dn))
1033 		return;
1034 	phb = of_node_to_eeh_dev(dn)->phb;
1035 
1036 	/* USB Bus children of PCI devices will not have BUID's */
1037 	if (NULL == phb || 0 == phb->buid)
1038 		return;
1039 
1040 	eeh_early_enable(dn, NULL);
1041 }
1042 
1043 /**
1044  * eeh_add_device_tree_early - Enable EEH for the indicated device
1045  * @dn: device node
1046  *
1047  * This routine must be used to perform EEH initialization for the
1048  * indicated PCI device that was added after system boot (e.g.
1049  * hotplug, dlpar).
1050  */
eeh_add_device_tree_early(struct device_node * dn)1051 void eeh_add_device_tree_early(struct device_node *dn)
1052 {
1053 	struct device_node *sib;
1054 
1055 	for_each_child_of_node(dn, sib)
1056 		eeh_add_device_tree_early(sib);
1057 	eeh_add_device_early(dn);
1058 }
1059 EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1060 
1061 /**
1062  * eeh_add_device_late - Perform EEH initialization for the indicated pci device
1063  * @dev: pci device for which to set up EEH
1064  *
1065  * This routine must be used to complete EEH initialization for PCI
1066  * devices that were added after system boot (e.g. hotplug, dlpar).
1067  */
eeh_add_device_late(struct pci_dev * dev)1068 static void eeh_add_device_late(struct pci_dev *dev)
1069 {
1070 	struct device_node *dn;
1071 	struct eeh_dev *edev;
1072 
1073 	if (!dev || !eeh_subsystem_enabled)
1074 		return;
1075 
1076 	pr_debug("EEH: Adding device %s\n", pci_name(dev));
1077 
1078 	dn = pci_device_to_OF_node(dev);
1079 	edev = of_node_to_eeh_dev(dn);
1080 	if (edev->pdev == dev) {
1081 		pr_debug("EEH: Already referenced !\n");
1082 		return;
1083 	}
1084 	WARN_ON(edev->pdev);
1085 
1086 	pci_dev_get(dev);
1087 	edev->pdev = dev;
1088 	dev->dev.archdata.edev = edev;
1089 
1090 	pci_addr_cache_insert_device(dev);
1091 	eeh_sysfs_add_device(dev);
1092 }
1093 
1094 /**
1095  * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
1096  * @bus: PCI bus
1097  *
1098  * This routine must be used to perform EEH initialization for PCI
1099  * devices which are attached to the indicated PCI bus. The PCI bus
1100  * is added after system boot through hotplug or dlpar.
1101  */
eeh_add_device_tree_late(struct pci_bus * bus)1102 void eeh_add_device_tree_late(struct pci_bus *bus)
1103 {
1104 	struct pci_dev *dev;
1105 
1106 	list_for_each_entry(dev, &bus->devices, bus_list) {
1107  		eeh_add_device_late(dev);
1108  		if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1109  			struct pci_bus *subbus = dev->subordinate;
1110  			if (subbus)
1111  				eeh_add_device_tree_late(subbus);
1112  		}
1113 	}
1114 }
1115 EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1116 
1117 /**
1118  * eeh_remove_device - Undo EEH setup for the indicated pci device
1119  * @dev: pci device to be removed
1120  *
1121  * This routine should be called when a device is removed from
1122  * a running system (e.g. by hotplug or dlpar).  It unregisters
1123  * the PCI device from the EEH subsystem.  I/O errors affecting
1124  * this device will no longer be detected after this call; thus,
1125  * i/o errors affecting this slot may leave this device unusable.
1126  */
eeh_remove_device(struct pci_dev * dev)1127 static void eeh_remove_device(struct pci_dev *dev)
1128 {
1129 	struct eeh_dev *edev;
1130 
1131 	if (!dev || !eeh_subsystem_enabled)
1132 		return;
1133 	edev = pci_dev_to_eeh_dev(dev);
1134 
1135 	/* Unregister the device with the EEH/PCI address search system */
1136 	pr_debug("EEH: Removing device %s\n", pci_name(dev));
1137 
1138 	if (!edev || !edev->pdev) {
1139 		pr_debug("EEH: Not referenced !\n");
1140 		return;
1141 	}
1142 	edev->pdev = NULL;
1143 	dev->dev.archdata.edev = NULL;
1144 	pci_dev_put(dev);
1145 
1146 	pci_addr_cache_remove_device(dev);
1147 	eeh_sysfs_remove_device(dev);
1148 }
1149 
1150 /**
1151  * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1152  * @dev: PCI device
1153  *
1154  * This routine must be called when a device is removed from the
1155  * running system through hotplug or dlpar. The corresponding
1156  * PCI address cache will be removed.
1157  */
eeh_remove_bus_device(struct pci_dev * dev)1158 void eeh_remove_bus_device(struct pci_dev *dev)
1159 {
1160 	struct pci_bus *bus = dev->subordinate;
1161 	struct pci_dev *child, *tmp;
1162 
1163 	eeh_remove_device(dev);
1164 
1165 	if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1166 		list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1167 			 eeh_remove_bus_device(child);
1168 	}
1169 }
1170 EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1171 
proc_eeh_show(struct seq_file * m,void * v)1172 static int proc_eeh_show(struct seq_file *m, void *v)
1173 {
1174 	if (0 == eeh_subsystem_enabled) {
1175 		seq_printf(m, "EEH Subsystem is globally disabled\n");
1176 		seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1177 	} else {
1178 		seq_printf(m, "EEH Subsystem is enabled\n");
1179 		seq_printf(m,
1180 				"no device=%llu\n"
1181 				"no device node=%llu\n"
1182 				"no config address=%llu\n"
1183 				"check not wanted=%llu\n"
1184 				"eeh_total_mmio_ffs=%llu\n"
1185 				"eeh_false_positives=%llu\n"
1186 				"eeh_slot_resets=%llu\n",
1187 				eeh_stats.no_device,
1188 				eeh_stats.no_dn,
1189 				eeh_stats.no_cfg_addr,
1190 				eeh_stats.ignored_check,
1191 				eeh_stats.total_mmio_ffs,
1192 				eeh_stats.false_positives,
1193 				eeh_stats.slot_resets);
1194 	}
1195 
1196 	return 0;
1197 }
1198 
proc_eeh_open(struct inode * inode,struct file * file)1199 static int proc_eeh_open(struct inode *inode, struct file *file)
1200 {
1201 	return single_open(file, proc_eeh_show, NULL);
1202 }
1203 
1204 static const struct file_operations proc_eeh_operations = {
1205 	.open      = proc_eeh_open,
1206 	.read      = seq_read,
1207 	.llseek    = seq_lseek,
1208 	.release   = single_release,
1209 };
1210 
eeh_init_proc(void)1211 static int __init eeh_init_proc(void)
1212 {
1213 	if (machine_is(pseries))
1214 		proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1215 	return 0;
1216 }
1217 __initcall(eeh_init_proc);
1218