1 
2 /*
3  * edac_device.c
4  * (C) 2007 www.douglaskthompson.com
5  *
6  * This file may be distributed under the terms of the
7  * GNU General Public License.
8  *
9  * Written by Doug Thompson <norsk5@xmission.com>
10  *
11  * edac_device API implementation
12  * 19 Jan 2007
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/smp.h>
18 #include <linux/init.h>
19 #include <linux/sysctl.h>
20 #include <linux/highmem.h>
21 #include <linux/timer.h>
22 #include <linux/slab.h>
23 #include <linux/jiffies.h>
24 #include <linux/spinlock.h>
25 #include <linux/list.h>
26 #include <linux/ctype.h>
27 #include <linux/workqueue.h>
28 #include <asm/uaccess.h>
29 #include <asm/page.h>
30 
31 #include "edac_core.h"
32 #include "edac_module.h"
33 
34 /* lock for the list: 'edac_device_list', manipulation of this list
35  * is protected by the 'device_ctls_mutex' lock
36  */
37 static DEFINE_MUTEX(device_ctls_mutex);
38 static LIST_HEAD(edac_device_list);
39 
40 #ifdef CONFIG_EDAC_DEBUG
edac_device_dump_device(struct edac_device_ctl_info * edac_dev)41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42 {
43 	debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
44 	debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
45 	debugf3("\tdev = %p\n", edac_dev->dev);
46 	debugf3("\tmod_name:ctl_name = %s:%s\n",
47 		edac_dev->mod_name, edac_dev->ctl_name);
48 	debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
49 }
50 #endif				/* CONFIG_EDAC_DEBUG */
51 
52 
53 /*
54  * edac_device_alloc_ctl_info()
55  *	Allocate a new edac device control info structure
56  *
57  *	The control structure is allocated in complete chunk
58  *	from the OS. It is in turn sub allocated to the
59  *	various objects that compose the struture
60  *
61  *	The structure has a 'nr_instance' array within itself.
62  *	Each instance represents a major component
63  *		Example:  L1 cache and L2 cache are 2 instance components
64  *
65  *	Within each instance is an array of 'nr_blocks' blockoffsets
66  */
edac_device_alloc_ctl_info(unsigned sz_private,char * edac_device_name,unsigned nr_instances,char * edac_block_name,unsigned nr_blocks,unsigned offset_value,struct edac_dev_sysfs_block_attribute * attrib_spec,unsigned nr_attrib,int device_index)67 struct edac_device_ctl_info *edac_device_alloc_ctl_info(
68 	unsigned sz_private,
69 	char *edac_device_name, unsigned nr_instances,
70 	char *edac_block_name, unsigned nr_blocks,
71 	unsigned offset_value,		/* zero, 1, or other based offset */
72 	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
73 	int device_index)
74 {
75 	struct edac_device_ctl_info *dev_ctl;
76 	struct edac_device_instance *dev_inst, *inst;
77 	struct edac_device_block *dev_blk, *blk_p, *blk;
78 	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
79 	unsigned total_size;
80 	unsigned count;
81 	unsigned instance, block, attr;
82 	void *pvt;
83 	int err;
84 
85 	debugf4("%s() instances=%d blocks=%d\n",
86 		__func__, nr_instances, nr_blocks);
87 
88 	/* Calculate the size of memory we need to allocate AND
89 	 * determine the offsets of the various item arrays
90 	 * (instance,block,attrib) from the start of an  allocated structure.
91 	 * We want the alignment of each item  (instance,block,attrib)
92 	 * to be at least as stringent as what the compiler would
93 	 * provide if we could simply hardcode everything into a single struct.
94 	 */
95 	dev_ctl = (struct edac_device_ctl_info *)NULL;
96 
97 	/* Calc the 'end' offset past end of ONE ctl_info structure
98 	 * which will become the start of the 'instance' array
99 	 */
100 	dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
101 
102 	/* Calc the 'end' offset past the instance array within the ctl_info
103 	 * which will become the start of the block array
104 	 */
105 	dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
106 
107 	/* Calc the 'end' offset past the dev_blk array
108 	 * which will become the start of the attrib array, if any.
109 	 */
110 	count = nr_instances * nr_blocks;
111 	dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
112 
113 	/* Check for case of when an attribute array is specified */
114 	if (nr_attrib > 0) {
115 		/* calc how many nr_attrib we need */
116 		count *= nr_attrib;
117 
118 		/* Calc the 'end' offset past the attributes array */
119 		pvt = edac_align_ptr(&dev_attrib[count], sz_private);
120 	} else {
121 		/* no attribute array specificed */
122 		pvt = edac_align_ptr(dev_attrib, sz_private);
123 	}
124 
125 	/* 'pvt' now points to where the private data area is.
126 	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
127 	 * is baselined at ZERO
128 	 */
129 	total_size = ((unsigned long)pvt) + sz_private;
130 
131 	/* Allocate the amount of memory for the set of control structures */
132 	dev_ctl = kzalloc(total_size, GFP_KERNEL);
133 	if (dev_ctl == NULL)
134 		return NULL;
135 
136 	/* Adjust pointers so they point within the actual memory we
137 	 * just allocated rather than an imaginary chunk of memory
138 	 * located at address 0.
139 	 * 'dev_ctl' points to REAL memory, while the others are
140 	 * ZERO based and thus need to be adjusted to point within
141 	 * the allocated memory.
142 	 */
143 	dev_inst = (struct edac_device_instance *)
144 		(((char *)dev_ctl) + ((unsigned long)dev_inst));
145 	dev_blk = (struct edac_device_block *)
146 		(((char *)dev_ctl) + ((unsigned long)dev_blk));
147 	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
148 		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
149 	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
150 
151 	/* Begin storing the information into the control info structure */
152 	dev_ctl->dev_idx = device_index;
153 	dev_ctl->nr_instances = nr_instances;
154 	dev_ctl->instances = dev_inst;
155 	dev_ctl->pvt_info = pvt;
156 
157 	/* Default logging of CEs and UEs */
158 	dev_ctl->log_ce = 1;
159 	dev_ctl->log_ue = 1;
160 
161 	/* Name of this edac device */
162 	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
163 
164 	debugf4("%s() edac_dev=%p next after end=%p\n",
165 		__func__, dev_ctl, pvt + sz_private );
166 
167 	/* Initialize every Instance */
168 	for (instance = 0; instance < nr_instances; instance++) {
169 		inst = &dev_inst[instance];
170 		inst->ctl = dev_ctl;
171 		inst->nr_blocks = nr_blocks;
172 		blk_p = &dev_blk[instance * nr_blocks];
173 		inst->blocks = blk_p;
174 
175 		/* name of this instance */
176 		snprintf(inst->name, sizeof(inst->name),
177 			 "%s%u", edac_device_name, instance);
178 
179 		/* Initialize every block in each instance */
180 		for (block = 0; block < nr_blocks; block++) {
181 			blk = &blk_p[block];
182 			blk->instance = inst;
183 			snprintf(blk->name, sizeof(blk->name),
184 				 "%s%d", edac_block_name, block+offset_value);
185 
186 			debugf4("%s() instance=%d inst_p=%p block=#%d "
187 				"block_p=%p name='%s'\n",
188 				__func__, instance, inst, block,
189 				blk, blk->name);
190 
191 			/* if there are NO attributes OR no attribute pointer
192 			 * then continue on to next block iteration
193 			 */
194 			if ((nr_attrib == 0) || (attrib_spec == NULL))
195 				continue;
196 
197 			/* setup the attribute array for this block */
198 			blk->nr_attribs = nr_attrib;
199 			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
200 			blk->block_attributes = attrib_p;
201 
202 			debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
203 				__func__, blk->block_attributes);
204 
205 			/* Initialize every user specified attribute in this
206 			 * block with the data the caller passed in
207 			 * Each block gets its own copy of pointers,
208 			 * and its unique 'value'
209 			 */
210 			for (attr = 0; attr < nr_attrib; attr++) {
211 				attrib = &attrib_p[attr];
212 
213 				/* populate the unique per attrib
214 				 * with the code pointers and info
215 				 */
216 				attrib->attr = attrib_spec[attr].attr;
217 				attrib->show = attrib_spec[attr].show;
218 				attrib->store = attrib_spec[attr].store;
219 
220 				attrib->block = blk;	/* up link */
221 
222 				debugf4("%s() alloc-attrib=%p attrib_name='%s' "
223 					"attrib-spec=%p spec-name=%s\n",
224 					__func__, attrib, attrib->attr.name,
225 					&attrib_spec[attr],
226 					attrib_spec[attr].attr.name
227 					);
228 			}
229 		}
230 	}
231 
232 	/* Mark this instance as merely ALLOCATED */
233 	dev_ctl->op_state = OP_ALLOC;
234 
235 	/*
236 	 * Initialize the 'root' kobj for the edac_device controller
237 	 */
238 	err = edac_device_register_sysfs_main_kobj(dev_ctl);
239 	if (err) {
240 		kfree(dev_ctl);
241 		return NULL;
242 	}
243 
244 	/* at this point, the root kobj is valid, and in order to
245 	 * 'free' the object, then the function:
246 	 *	edac_device_unregister_sysfs_main_kobj() must be called
247 	 * which will perform kobj unregistration and the actual free
248 	 * will occur during the kobject callback operation
249 	 */
250 
251 	return dev_ctl;
252 }
253 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
254 
255 /*
256  * edac_device_free_ctl_info()
257  *	frees the memory allocated by the edac_device_alloc_ctl_info()
258  *	function
259  */
edac_device_free_ctl_info(struct edac_device_ctl_info * ctl_info)260 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
261 {
262 	edac_device_unregister_sysfs_main_kobj(ctl_info);
263 }
264 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
265 
266 /*
267  * find_edac_device_by_dev
268  *	scans the edac_device list for a specific 'struct device *'
269  *
270  *	lock to be held prior to call:	device_ctls_mutex
271  *
272  *	Return:
273  *		pointer to control structure managing 'dev'
274  *		NULL if not found on list
275  */
find_edac_device_by_dev(struct device * dev)276 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
277 {
278 	struct edac_device_ctl_info *edac_dev;
279 	struct list_head *item;
280 
281 	debugf0("%s()\n", __func__);
282 
283 	list_for_each(item, &edac_device_list) {
284 		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
285 
286 		if (edac_dev->dev == dev)
287 			return edac_dev;
288 	}
289 
290 	return NULL;
291 }
292 
293 /*
294  * add_edac_dev_to_global_list
295  *	Before calling this function, caller must
296  *	assign a unique value to edac_dev->dev_idx.
297  *
298  *	lock to be held prior to call:	device_ctls_mutex
299  *
300  *	Return:
301  *		0 on success
302  *		1 on failure.
303  */
add_edac_dev_to_global_list(struct edac_device_ctl_info * edac_dev)304 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
305 {
306 	struct list_head *item, *insert_before;
307 	struct edac_device_ctl_info *rover;
308 
309 	insert_before = &edac_device_list;
310 
311 	/* Determine if already on the list */
312 	rover = find_edac_device_by_dev(edac_dev->dev);
313 	if (unlikely(rover != NULL))
314 		goto fail0;
315 
316 	/* Insert in ascending order by 'dev_idx', so find position */
317 	list_for_each(item, &edac_device_list) {
318 		rover = list_entry(item, struct edac_device_ctl_info, link);
319 
320 		if (rover->dev_idx >= edac_dev->dev_idx) {
321 			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
322 				goto fail1;
323 
324 			insert_before = item;
325 			break;
326 		}
327 	}
328 
329 	list_add_tail_rcu(&edac_dev->link, insert_before);
330 	return 0;
331 
332 fail0:
333 	edac_printk(KERN_WARNING, EDAC_MC,
334 			"%s (%s) %s %s already assigned %d\n",
335 			dev_name(rover->dev), edac_dev_name(rover),
336 			rover->mod_name, rover->ctl_name, rover->dev_idx);
337 	return 1;
338 
339 fail1:
340 	edac_printk(KERN_WARNING, EDAC_MC,
341 			"bug in low-level driver: attempt to assign\n"
342 			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
343 			__func__);
344 	return 1;
345 }
346 
347 /*
348  * del_edac_device_from_global_list
349  */
del_edac_device_from_global_list(struct edac_device_ctl_info * edac_device)350 static void del_edac_device_from_global_list(struct edac_device_ctl_info
351 						*edac_device)
352 {
353 	list_del_rcu(&edac_device->link);
354 
355 	/* these are for safe removal of devices from global list while
356 	 * NMI handlers may be traversing list
357 	 */
358 	synchronize_rcu();
359 	INIT_LIST_HEAD(&edac_device->link);
360 }
361 
362 /*
363  * edac_device_workq_function
364  *	performs the operation scheduled by a workq request
365  *
366  *	this workq is embedded within an edac_device_ctl_info
367  *	structure, that needs to be polled for possible error events.
368  *
369  *	This operation is to acquire the list mutex lock
370  *	(thus preventing insertation or deletion)
371  *	and then call the device's poll function IFF this device is
372  *	running polled and there is a poll function defined.
373  */
edac_device_workq_function(struct work_struct * work_req)374 static void edac_device_workq_function(struct work_struct *work_req)
375 {
376 	struct delayed_work *d_work = to_delayed_work(work_req);
377 	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
378 
379 	mutex_lock(&device_ctls_mutex);
380 
381 	/* If we are being removed, bail out immediately */
382 	if (edac_dev->op_state == OP_OFFLINE) {
383 		mutex_unlock(&device_ctls_mutex);
384 		return;
385 	}
386 
387 	/* Only poll controllers that are running polled and have a check */
388 	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
389 		(edac_dev->edac_check != NULL)) {
390 			edac_dev->edac_check(edac_dev);
391 	}
392 
393 	mutex_unlock(&device_ctls_mutex);
394 
395 	/* Reschedule the workq for the next time period to start again
396 	 * if the number of msec is for 1 sec, then adjust to the next
397 	 * whole one second to save timers fireing all over the period
398 	 * between integral seconds
399 	 */
400 	if (edac_dev->poll_msec == 1000)
401 		queue_delayed_work(edac_workqueue, &edac_dev->work,
402 				round_jiffies_relative(edac_dev->delay));
403 	else
404 		queue_delayed_work(edac_workqueue, &edac_dev->work,
405 				edac_dev->delay);
406 }
407 
408 /*
409  * edac_device_workq_setup
410  *	initialize a workq item for this edac_device instance
411  *	passing in the new delay period in msec
412  */
edac_device_workq_setup(struct edac_device_ctl_info * edac_dev,unsigned msec)413 void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
414 				unsigned msec)
415 {
416 	debugf0("%s()\n", __func__);
417 
418 	/* take the arg 'msec' and set it into the control structure
419 	 * to used in the time period calculation
420 	 * then calc the number of jiffies that represents
421 	 */
422 	edac_dev->poll_msec = msec;
423 	edac_dev->delay = msecs_to_jiffies(msec);
424 
425 	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
426 
427 	/* optimize here for the 1 second case, which will be normal value, to
428 	 * fire ON the 1 second time event. This helps reduce all sorts of
429 	 * timers firing on sub-second basis, while they are happy
430 	 * to fire together on the 1 second exactly
431 	 */
432 	if (edac_dev->poll_msec == 1000)
433 		queue_delayed_work(edac_workqueue, &edac_dev->work,
434 				round_jiffies_relative(edac_dev->delay));
435 	else
436 		queue_delayed_work(edac_workqueue, &edac_dev->work,
437 				edac_dev->delay);
438 }
439 
440 /*
441  * edac_device_workq_teardown
442  *	stop the workq processing on this edac_dev
443  */
edac_device_workq_teardown(struct edac_device_ctl_info * edac_dev)444 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
445 {
446 	int status;
447 
448 	status = cancel_delayed_work(&edac_dev->work);
449 	if (status == 0) {
450 		/* workq instance might be running, wait for it */
451 		flush_workqueue(edac_workqueue);
452 	}
453 }
454 
455 /*
456  * edac_device_reset_delay_period
457  *
458  *	need to stop any outstanding workq queued up at this time
459  *	because we will be resetting the sleep time.
460  *	Then restart the workq on the new delay
461  */
edac_device_reset_delay_period(struct edac_device_ctl_info * edac_dev,unsigned long value)462 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
463 					unsigned long value)
464 {
465 	/* cancel the current workq request, without the mutex lock */
466 	edac_device_workq_teardown(edac_dev);
467 
468 	/* acquire the mutex before doing the workq setup */
469 	mutex_lock(&device_ctls_mutex);
470 
471 	/* restart the workq request, with new delay value */
472 	edac_device_workq_setup(edac_dev, value);
473 
474 	mutex_unlock(&device_ctls_mutex);
475 }
476 
477 /*
478  * edac_device_alloc_index: Allocate a unique device index number
479  *
480  * Return:
481  *	allocated index number
482  */
edac_device_alloc_index(void)483 int edac_device_alloc_index(void)
484 {
485 	static atomic_t device_indexes = ATOMIC_INIT(0);
486 
487 	return atomic_inc_return(&device_indexes) - 1;
488 }
489 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
490 
491 /**
492  * edac_device_add_device: Insert the 'edac_dev' structure into the
493  * edac_device global list and create sysfs entries associated with
494  * edac_device structure.
495  * @edac_device: pointer to the edac_device structure to be added to the list
496  * 'edac_device' structure.
497  *
498  * Return:
499  *	0	Success
500  *	!0	Failure
501  */
edac_device_add_device(struct edac_device_ctl_info * edac_dev)502 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
503 {
504 	debugf0("%s()\n", __func__);
505 
506 #ifdef CONFIG_EDAC_DEBUG
507 	if (edac_debug_level >= 3)
508 		edac_device_dump_device(edac_dev);
509 #endif
510 	mutex_lock(&device_ctls_mutex);
511 
512 	if (add_edac_dev_to_global_list(edac_dev))
513 		goto fail0;
514 
515 	/* set load time so that error rate can be tracked */
516 	edac_dev->start_time = jiffies;
517 
518 	/* create this instance's sysfs entries */
519 	if (edac_device_create_sysfs(edac_dev)) {
520 		edac_device_printk(edac_dev, KERN_WARNING,
521 					"failed to create sysfs device\n");
522 		goto fail1;
523 	}
524 
525 	/* If there IS a check routine, then we are running POLLED */
526 	if (edac_dev->edac_check != NULL) {
527 		/* This instance is NOW RUNNING */
528 		edac_dev->op_state = OP_RUNNING_POLL;
529 
530 		/*
531 		 * enable workq processing on this instance,
532 		 * default = 1000 msec
533 		 */
534 		edac_device_workq_setup(edac_dev, 1000);
535 	} else {
536 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
537 	}
538 
539 	/* Report action taken */
540 	edac_device_printk(edac_dev, KERN_INFO,
541 				"Giving out device to module '%s' controller "
542 				"'%s': DEV '%s' (%s)\n",
543 				edac_dev->mod_name,
544 				edac_dev->ctl_name,
545 				edac_dev_name(edac_dev),
546 				edac_op_state_to_string(edac_dev->op_state));
547 
548 	mutex_unlock(&device_ctls_mutex);
549 	return 0;
550 
551 fail1:
552 	/* Some error, so remove the entry from the lsit */
553 	del_edac_device_from_global_list(edac_dev);
554 
555 fail0:
556 	mutex_unlock(&device_ctls_mutex);
557 	return 1;
558 }
559 EXPORT_SYMBOL_GPL(edac_device_add_device);
560 
561 /**
562  * edac_device_del_device:
563  *	Remove sysfs entries for specified edac_device structure and
564  *	then remove edac_device structure from global list
565  *
566  * @pdev:
567  *	Pointer to 'struct device' representing edac_device
568  *	structure to remove.
569  *
570  * Return:
571  *	Pointer to removed edac_device structure,
572  *	OR NULL if device not found.
573  */
edac_device_del_device(struct device * dev)574 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
575 {
576 	struct edac_device_ctl_info *edac_dev;
577 
578 	debugf0("%s()\n", __func__);
579 
580 	mutex_lock(&device_ctls_mutex);
581 
582 	/* Find the structure on the list, if not there, then leave */
583 	edac_dev = find_edac_device_by_dev(dev);
584 	if (edac_dev == NULL) {
585 		mutex_unlock(&device_ctls_mutex);
586 		return NULL;
587 	}
588 
589 	/* mark this instance as OFFLINE */
590 	edac_dev->op_state = OP_OFFLINE;
591 
592 	/* deregister from global list */
593 	del_edac_device_from_global_list(edac_dev);
594 
595 	mutex_unlock(&device_ctls_mutex);
596 
597 	/* clear workq processing on this instance */
598 	edac_device_workq_teardown(edac_dev);
599 
600 	/* Tear down the sysfs entries for this instance */
601 	edac_device_remove_sysfs(edac_dev);
602 
603 	edac_printk(KERN_INFO, EDAC_MC,
604 		"Removed device %d for %s %s: DEV %s\n",
605 		edac_dev->dev_idx,
606 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
607 
608 	return edac_dev;
609 }
610 EXPORT_SYMBOL_GPL(edac_device_del_device);
611 
edac_device_get_log_ce(struct edac_device_ctl_info * edac_dev)612 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
613 {
614 	return edac_dev->log_ce;
615 }
616 
edac_device_get_log_ue(struct edac_device_ctl_info * edac_dev)617 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
618 {
619 	return edac_dev->log_ue;
620 }
621 
edac_device_get_panic_on_ue(struct edac_device_ctl_info * edac_dev)622 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
623 					*edac_dev)
624 {
625 	return edac_dev->panic_on_ue;
626 }
627 
628 /*
629  * edac_device_handle_ce
630  *	perform a common output and handling of an 'edac_dev' CE event
631  */
edac_device_handle_ce(struct edac_device_ctl_info * edac_dev,int inst_nr,int block_nr,const char * msg)632 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
633 			int inst_nr, int block_nr, const char *msg)
634 {
635 	struct edac_device_instance *instance;
636 	struct edac_device_block *block = NULL;
637 
638 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
639 		edac_device_printk(edac_dev, KERN_ERR,
640 				"INTERNAL ERROR: 'instance' out of range "
641 				"(%d >= %d)\n", inst_nr,
642 				edac_dev->nr_instances);
643 		return;
644 	}
645 
646 	instance = edac_dev->instances + inst_nr;
647 
648 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
649 		edac_device_printk(edac_dev, KERN_ERR,
650 				"INTERNAL ERROR: instance %d 'block' "
651 				"out of range (%d >= %d)\n",
652 				inst_nr, block_nr,
653 				instance->nr_blocks);
654 		return;
655 	}
656 
657 	if (instance->nr_blocks > 0) {
658 		block = instance->blocks + block_nr;
659 		block->counters.ce_count++;
660 	}
661 
662 	/* Propagate the count up the 'totals' tree */
663 	instance->counters.ce_count++;
664 	edac_dev->counters.ce_count++;
665 
666 	if (edac_device_get_log_ce(edac_dev))
667 		edac_device_printk(edac_dev, KERN_WARNING,
668 				"CE: %s instance: %s block: %s '%s'\n",
669 				edac_dev->ctl_name, instance->name,
670 				block ? block->name : "N/A", msg);
671 }
672 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
673 
674 /*
675  * edac_device_handle_ue
676  *	perform a common output and handling of an 'edac_dev' UE event
677  */
edac_device_handle_ue(struct edac_device_ctl_info * edac_dev,int inst_nr,int block_nr,const char * msg)678 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
679 			int inst_nr, int block_nr, const char *msg)
680 {
681 	struct edac_device_instance *instance;
682 	struct edac_device_block *block = NULL;
683 
684 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
685 		edac_device_printk(edac_dev, KERN_ERR,
686 				"INTERNAL ERROR: 'instance' out of range "
687 				"(%d >= %d)\n", inst_nr,
688 				edac_dev->nr_instances);
689 		return;
690 	}
691 
692 	instance = edac_dev->instances + inst_nr;
693 
694 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
695 		edac_device_printk(edac_dev, KERN_ERR,
696 				"INTERNAL ERROR: instance %d 'block' "
697 				"out of range (%d >= %d)\n",
698 				inst_nr, block_nr,
699 				instance->nr_blocks);
700 		return;
701 	}
702 
703 	if (instance->nr_blocks > 0) {
704 		block = instance->blocks + block_nr;
705 		block->counters.ue_count++;
706 	}
707 
708 	/* Propagate the count up the 'totals' tree */
709 	instance->counters.ue_count++;
710 	edac_dev->counters.ue_count++;
711 
712 	if (edac_device_get_log_ue(edac_dev))
713 		edac_device_printk(edac_dev, KERN_EMERG,
714 				"UE: %s instance: %s block: %s '%s'\n",
715 				edac_dev->ctl_name, instance->name,
716 				block ? block->name : "N/A", msg);
717 
718 	if (edac_device_get_panic_on_ue(edac_dev))
719 		panic("EDAC %s: UE instance: %s block %s '%s'\n",
720 			edac_dev->ctl_name, instance->name,
721 			block ? block->name : "N/A", msg);
722 }
723 EXPORT_SYMBOL_GPL(edac_device_handle_ue);
724