1 /******************************************************************************
2  *
3  * Module Name: evgpeblk - GPE block creation and initialization.
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2004, R. Byron Moore
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include <acpi/acevents.h>
46 #include <acpi/acnamesp.h>
47 
48 #define _COMPONENT          ACPI_EVENTS
49 	 ACPI_MODULE_NAME    ("evgpeblk")
50 
51 
52 /*******************************************************************************
53  *
54  * FUNCTION:    acpi_ev_valid_gpe_event
55  *
56  * PARAMETERS:  gpe_event_info - Info for this GPE
57  *
58  * RETURN:      TRUE if the gpe_event is valid
59  *
60  * DESCRIPTION: Validate a GPE event.  DO NOT CALL FROM INTERRUPT LEVEL.
61  *              Should be called only when the GPE lists are semaphore locked
62  *              and not subject to change.
63  *
64  ******************************************************************************/
65 
66 u8
acpi_ev_valid_gpe_event(struct acpi_gpe_event_info * gpe_event_info)67 acpi_ev_valid_gpe_event (
68 	struct acpi_gpe_event_info      *gpe_event_info)
69 {
70 	struct acpi_gpe_xrupt_info      *gpe_xrupt_block;
71 	struct acpi_gpe_block_info      *gpe_block;
72 
73 
74 	ACPI_FUNCTION_ENTRY ();
75 
76 
77 	/* No need for spin lock since we are not changing any list elements */
78 
79 	/* Walk the GPE interrupt levels */
80 
81 	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
82 	while (gpe_xrupt_block) {
83 		gpe_block = gpe_xrupt_block->gpe_block_list_head;
84 
85 		/* Walk the GPE blocks on this interrupt level */
86 
87 		while (gpe_block) {
88 			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
89 				(&gpe_block->event_info[((acpi_size) gpe_block->register_count) * 8] > gpe_event_info)) {
90 				return (TRUE);
91 			}
92 
93 			gpe_block = gpe_block->next;
94 		}
95 
96 		gpe_xrupt_block = gpe_xrupt_block->next;
97 	}
98 
99 	return (FALSE);
100 }
101 
102 
103 /*******************************************************************************
104  *
105  * FUNCTION:    acpi_ev_walk_gpe_list
106  *
107  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
108  *
109  * RETURN:      Status
110  *
111  * DESCRIPTION: Walk the GPE lists.
112  *              FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
113  *
114  ******************************************************************************/
115 
116 acpi_status
acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)117 acpi_ev_walk_gpe_list (
118 	ACPI_GPE_CALLBACK       gpe_walk_callback)
119 {
120 	struct acpi_gpe_block_info      *gpe_block;
121 	struct acpi_gpe_xrupt_info      *gpe_xrupt_info;
122 	acpi_status                     status = AE_OK;
123 
124 
125 	ACPI_FUNCTION_TRACE ("ev_walk_gpe_list");
126 
127 
128 	acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR);
129 
130 	/* Walk the interrupt level descriptor list */
131 
132 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
133 	while (gpe_xrupt_info) {
134 		/* Walk all Gpe Blocks attached to this interrupt level */
135 
136 		gpe_block = gpe_xrupt_info->gpe_block_list_head;
137 		while (gpe_block) {
138 			/* One callback per GPE block */
139 
140 			status = gpe_walk_callback (gpe_xrupt_info, gpe_block);
141 			if (ACPI_FAILURE (status)) {
142 				goto unlock_and_exit;
143 			}
144 
145 			gpe_block = gpe_block->next;
146 		}
147 
148 		gpe_xrupt_info = gpe_xrupt_info->next;
149 	}
150 
151 unlock_and_exit:
152 	acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR);
153 	return_ACPI_STATUS (status);
154 }
155 
156 
157 /*******************************************************************************
158  *
159  * FUNCTION:    acpi_ev_save_method_info
160  *
161  * PARAMETERS:  Callback from walk_namespace
162  *
163  * RETURN:      Status
164  *
165  * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
166  *              control method under the _GPE portion of the namespace.
167  *              Extract the name and GPE type from the object, saving this
168  *              information for quick lookup during GPE dispatch
169  *
170  *              The name of each GPE control method is of the form:
171  *              "_Lxx" or "_Exx"
172  *              Where:
173  *                  L      - means that the GPE is level triggered
174  *                  E      - means that the GPE is edge triggered
175  *                  xx     - is the GPE number [in HEX]
176  *
177  ******************************************************************************/
178 
179 static acpi_status
acpi_ev_save_method_info(acpi_handle obj_handle,u32 level,void * obj_desc,void ** return_value)180 acpi_ev_save_method_info (
181 	acpi_handle                     obj_handle,
182 	u32                             level,
183 	void                            *obj_desc,
184 	void                            **return_value)
185 {
186 	struct acpi_gpe_block_info      *gpe_block = (void *) obj_desc;
187 	struct acpi_gpe_event_info      *gpe_event_info;
188 	u32                             gpe_number;
189 	char                            name[ACPI_NAME_SIZE + 1];
190 	u8                              type;
191 
192 
193 	ACPI_FUNCTION_TRACE ("ev_save_method_info");
194 
195 
196 	/*
197 	 * _Lxx and _Exx GPE method support
198 	 *
199 	 * 1) Extract the name from the object and convert to a string
200 	 */
201 	ACPI_MOVE_32_TO_32 (name,
202 			   &((struct acpi_namespace_node *) obj_handle)->name.integer);
203 	name[ACPI_NAME_SIZE] = 0;
204 
205 	/*
206 	 * 2) Edge/Level determination is based on the 2nd character
207 	 *    of the method name
208 	 *
209 	 * NOTE: Default GPE type is RUNTIME.  May be changed later to WAKE if a
210 	 * _PRW object is found that points to this GPE.
211 	 */
212 	switch (name[1]) {
213 	case 'L':
214 		type = ACPI_GPE_LEVEL_TRIGGERED | ACPI_GPE_TYPE_RUNTIME;
215 		break;
216 
217 	case 'E':
218 		type = ACPI_GPE_EDGE_TRIGGERED | ACPI_GPE_TYPE_RUNTIME;
219 		break;
220 
221 	default:
222 		/* Unknown method type, just ignore it! */
223 
224 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
225 			"Unknown GPE method type: %s (name not of form _Lxx or _Exx)\n",
226 			name));
227 		return_ACPI_STATUS (AE_OK);
228 	}
229 
230 	/* Convert the last two characters of the name to the GPE Number */
231 
232 	gpe_number = ACPI_STRTOUL (&name[2], NULL, 16);
233 	if (gpe_number == ACPI_UINT32_MAX) {
234 		/* Conversion failed; invalid method, just ignore it */
235 
236 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
237 			"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)\n",
238 			name));
239 		return_ACPI_STATUS (AE_OK);
240 	}
241 
242 	/* Ensure that we have a valid GPE number for this GPE block */
243 
244 	if ((gpe_number < gpe_block->block_base_number) ||
245 		(gpe_number >= (gpe_block->block_base_number + (gpe_block->register_count * 8)))) {
246 		/*
247 		 * Not valid for this GPE block, just ignore it
248 		 * However, it may be valid for a different GPE block, since GPE0 and GPE1
249 		 * methods both appear under \_GPE.
250 		 */
251 		return_ACPI_STATUS (AE_OK);
252 	}
253 
254 	/*
255 	 * Now we can add this information to the gpe_event_info block
256 	 * for use during dispatch of this GPE.
257 	 */
258 	gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
259 
260 	gpe_event_info->flags    = type;
261 	gpe_event_info->method_node = (struct acpi_namespace_node *) obj_handle;
262 
263 	ACPI_DEBUG_PRINT ((ACPI_DB_LOAD,
264 		"Registered GPE method %s as GPE number 0x%.2X\n",
265 		name, gpe_number));
266 	return_ACPI_STATUS (AE_OK);
267 }
268 
269 
270 /*******************************************************************************
271  *
272  * FUNCTION:    acpi_ev_get_gpe_type
273  *
274  * PARAMETERS:  Callback from walk_namespace
275  *
276  * RETURN:      Status
277  *
278  * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
279  *              Device.  Run the _PRW method.  If present, extract the GPE
280  *              number and mark the GPE as a WAKE GPE.
281  *
282  ******************************************************************************/
283 
284 static acpi_status
acpi_ev_get_gpe_type(acpi_handle obj_handle,u32 level,void * info,void ** return_value)285 acpi_ev_get_gpe_type (
286 	acpi_handle                     obj_handle,
287 	u32                             level,
288 	void                            *info,
289 	void                            **return_value)
290 {
291 	struct acpi_gpe_walk_info       *gpe_info = (void *) info;
292 	struct acpi_namespace_node      *gpe_device;
293 	struct acpi_gpe_block_info      *gpe_block;
294 	struct acpi_namespace_node      *target_gpe_device;
295 	struct acpi_gpe_event_info      *gpe_event_info;
296 	union acpi_operand_object       *pkg_desc;
297 	union acpi_operand_object       *obj_desc;
298 	u32                             gpe_number;
299 	acpi_status                     status;
300 
301 
302 	ACPI_FUNCTION_TRACE ("ev_get_gpe_type");
303 
304 
305 	/* Check for a _PRW method under this device */
306 
307 	status = acpi_ut_evaluate_object (obj_handle, METHOD_NAME__PRW,
308 			 ACPI_BTYPE_PACKAGE, &pkg_desc);
309 	if (status == AE_NOT_FOUND) {
310 		return_ACPI_STATUS (AE_OK);
311 	}
312 	else if (ACPI_FAILURE (status)) {
313 		return_ACPI_STATUS (status);
314 	}
315 
316 	/* The returned _PRW package must have at least two elements */
317 
318 	if (pkg_desc->package.count < 2) {
319 		goto cleanup;
320 	}
321 
322 	/* Extract pointers from the input context */
323 
324 	gpe_device = gpe_info->gpe_device;
325 	gpe_block = gpe_info->gpe_block;
326 
327 	/*
328 	 * The _PRW object must return a package, we are only interested
329 	 * in the first element
330 	 */
331 	obj_desc = pkg_desc->package.elements[0];
332 
333 	if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) {
334 		/* Use FADT-defined GPE device (from definition of _PRW) */
335 
336 		target_gpe_device = acpi_gbl_fadt_gpe_device;
337 
338 		/* Integer is the GPE number in the FADT described GPE blocks */
339 
340 		gpe_number = (u32) obj_desc->integer.value;
341 	}
342 	else if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_PACKAGE) {
343 		/* Package contains a GPE reference and GPE number within a GPE block */
344 
345 		if ((obj_desc->package.count < 2) ||
346 			(ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[0]) != ACPI_TYPE_LOCAL_REFERENCE) ||
347 			(ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[1]) != ACPI_TYPE_INTEGER)) {
348 			goto cleanup;
349 		}
350 
351 		/* Get GPE block reference and decode */
352 
353 		target_gpe_device = obj_desc->package.elements[0]->reference.node;
354 		gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
355 	}
356 	else {
357 		/* Unknown type, just ignore it */
358 
359 		goto cleanup;
360 	}
361 
362 	/*
363 	 * Is this GPE within this block?
364 	 *
365 	 * TRUE iff these conditions are true:
366 	 *     1) The GPE devices match.
367 	 *     2) The GPE index(number) is within the range of the Gpe Block
368 	 *          associated with the GPE device.
369 	 */
370 	if ((gpe_device == target_gpe_device) &&
371 		(gpe_number >= gpe_block->block_base_number) &&
372 		(gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) {
373 		/* Mark GPE for WAKE but DISABLED (even for wake) */
374 
375 		gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
376 		gpe_event_info->flags |= ACPI_GPE_TYPE_WAKE;
377 	}
378 
379 cleanup:
380 	acpi_ut_remove_reference (pkg_desc);
381 
382 	return_ACPI_STATUS (status);
383 }
384 
385 
386 /*******************************************************************************
387  *
388  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
389  *
390  * PARAMETERS:  interrupt_level     - Interrupt for a GPE block
391  *
392  * RETURN:      A GPE interrupt block
393  *
394  * DESCRIPTION: Get or Create a GPE interrupt block.  There is one interrupt
395  *              block per unique interrupt level used for GPEs.
396  *              Should be called only when the GPE lists are semaphore locked
397  *              and not subject to change.
398  *
399  ******************************************************************************/
400 
401 static struct acpi_gpe_xrupt_info *
acpi_ev_get_gpe_xrupt_block(u32 interrupt_level)402 acpi_ev_get_gpe_xrupt_block (
403 	u32                             interrupt_level)
404 {
405 	struct acpi_gpe_xrupt_info      *next_gpe_xrupt;
406 	struct acpi_gpe_xrupt_info      *gpe_xrupt;
407 	acpi_status                     status;
408 
409 
410 	ACPI_FUNCTION_TRACE ("ev_get_gpe_xrupt_block");
411 
412 
413 	/* No need for spin lock since we are not changing any list elements here */
414 
415 	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
416 	while (next_gpe_xrupt) {
417 		if (next_gpe_xrupt->interrupt_level == interrupt_level) {
418 			return_PTR (next_gpe_xrupt);
419 		}
420 
421 		next_gpe_xrupt = next_gpe_xrupt->next;
422 	}
423 
424 	/* Not found, must allocate a new xrupt descriptor */
425 
426 	gpe_xrupt = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_xrupt_info));
427 	if (!gpe_xrupt) {
428 		return_PTR (NULL);
429 	}
430 
431 	gpe_xrupt->interrupt_level = interrupt_level;
432 
433 	/* Install new interrupt descriptor with spin lock */
434 
435 	acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
436 	if (acpi_gbl_gpe_xrupt_list_head) {
437 		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
438 		while (next_gpe_xrupt->next) {
439 			next_gpe_xrupt = next_gpe_xrupt->next;
440 		}
441 
442 		next_gpe_xrupt->next = gpe_xrupt;
443 		gpe_xrupt->previous = next_gpe_xrupt;
444 	}
445 	else {
446 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
447 	}
448 	acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
449 
450 	/* Install new interrupt handler if not SCI_INT */
451 
452 	if (interrupt_level != acpi_gbl_FADT->sci_int) {
453 		status = acpi_os_install_interrupt_handler (interrupt_level,
454 				 acpi_ev_gpe_xrupt_handler, gpe_xrupt);
455 		if (ACPI_FAILURE (status)) {
456 			ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
457 				"Could not install GPE interrupt handler at level 0x%X\n",
458 				interrupt_level));
459 			return_PTR (NULL);
460 		}
461 	}
462 
463 	return_PTR (gpe_xrupt);
464 }
465 
466 
467 /*******************************************************************************
468  *
469  * FUNCTION:    acpi_ev_delete_gpe_xrupt
470  *
471  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
472  *
473  * RETURN:      Status
474  *
475  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
476  *              interrupt handler if not the SCI interrupt.
477  *
478  ******************************************************************************/
479 
480 static acpi_status
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info * gpe_xrupt)481 acpi_ev_delete_gpe_xrupt (
482 	struct acpi_gpe_xrupt_info      *gpe_xrupt)
483 {
484 	acpi_status                     status;
485 
486 
487 	ACPI_FUNCTION_TRACE ("ev_delete_gpe_xrupt");
488 
489 
490 	/* We never want to remove the SCI interrupt handler */
491 
492 	if (gpe_xrupt->interrupt_level == acpi_gbl_FADT->sci_int) {
493 		gpe_xrupt->gpe_block_list_head = NULL;
494 		return_ACPI_STATUS (AE_OK);
495 	}
496 
497 	/* Disable this interrupt */
498 
499 	status = acpi_os_remove_interrupt_handler (gpe_xrupt->interrupt_level,
500 			   acpi_ev_gpe_xrupt_handler);
501 	if (ACPI_FAILURE (status)) {
502 		return_ACPI_STATUS (status);
503 	}
504 
505 	/* Unlink the interrupt block with lock */
506 
507 	acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
508 	if (gpe_xrupt->previous) {
509 		gpe_xrupt->previous->next = gpe_xrupt->next;
510 	}
511 
512 	if (gpe_xrupt->next) {
513 		gpe_xrupt->next->previous = gpe_xrupt->previous;
514 	}
515 	acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
516 
517 	/* Free the block */
518 
519 	ACPI_MEM_FREE (gpe_xrupt);
520 	return_ACPI_STATUS (AE_OK);
521 }
522 
523 
524 /*******************************************************************************
525  *
526  * FUNCTION:    acpi_ev_install_gpe_block
527  *
528  * PARAMETERS:  gpe_block       - New GPE block
529  *              interrupt_level - Level to be associated with this GPE block
530  *
531  * RETURN:      Status
532  *
533  * DESCRIPTION: Install new GPE block with mutex support
534  *
535  ******************************************************************************/
536 
537 static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info * gpe_block,u32 interrupt_level)538 acpi_ev_install_gpe_block (
539 	struct acpi_gpe_block_info      *gpe_block,
540 	u32                             interrupt_level)
541 {
542 	struct acpi_gpe_block_info      *next_gpe_block;
543 	struct acpi_gpe_xrupt_info      *gpe_xrupt_block;
544 	acpi_status                     status;
545 
546 
547 	ACPI_FUNCTION_TRACE ("ev_install_gpe_block");
548 
549 
550 	status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS);
551 	if (ACPI_FAILURE (status)) {
552 		return_ACPI_STATUS (status);
553 	}
554 
555 	gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block (interrupt_level);
556 	if (!gpe_xrupt_block) {
557 		status = AE_NO_MEMORY;
558 		goto unlock_and_exit;
559 	}
560 
561 	/* Install the new block at the end of the list for this interrupt with lock */
562 
563 	acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
564 	if (gpe_xrupt_block->gpe_block_list_head) {
565 		next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
566 		while (next_gpe_block->next) {
567 			next_gpe_block = next_gpe_block->next;
568 		}
569 
570 		next_gpe_block->next = gpe_block;
571 		gpe_block->previous = next_gpe_block;
572 	}
573 	else {
574 		gpe_xrupt_block->gpe_block_list_head = gpe_block;
575 	}
576 
577 	gpe_block->xrupt_block = gpe_xrupt_block;
578 	acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
579 
580 unlock_and_exit:
581 	status = acpi_ut_release_mutex (ACPI_MTX_EVENTS);
582 	return_ACPI_STATUS (status);
583 }
584 
585 
586 /*******************************************************************************
587  *
588  * FUNCTION:    acpi_ev_delete_gpe_block
589  *
590  * PARAMETERS:  gpe_block       - Existing GPE block
591  *
592  * RETURN:      Status
593  *
594  * DESCRIPTION: Remove a GPE block
595  *
596  ******************************************************************************/
597 
598 acpi_status
acpi_ev_delete_gpe_block(struct acpi_gpe_block_info * gpe_block)599 acpi_ev_delete_gpe_block (
600 	struct acpi_gpe_block_info      *gpe_block)
601 {
602 	acpi_status                     status;
603 
604 
605 	ACPI_FUNCTION_TRACE ("ev_install_gpe_block");
606 
607 
608 	status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS);
609 	if (ACPI_FAILURE (status)) {
610 		return_ACPI_STATUS (status);
611 	}
612 
613 	/* Disable all GPEs in this block */
614 
615 	status = acpi_hw_disable_gpe_block (gpe_block->xrupt_block, gpe_block);
616 
617 	if (!gpe_block->previous && !gpe_block->next) {
618 		/* This is the last gpe_block on this interrupt */
619 
620 		status = acpi_ev_delete_gpe_xrupt (gpe_block->xrupt_block);
621 		if (ACPI_FAILURE (status)) {
622 			goto unlock_and_exit;
623 		}
624 	}
625 	else {
626 		/* Remove the block on this interrupt with lock */
627 
628 		acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
629 		if (gpe_block->previous) {
630 			gpe_block->previous->next = gpe_block->next;
631 		}
632 		else {
633 			gpe_block->xrupt_block->gpe_block_list_head = gpe_block->next;
634 		}
635 
636 		if (gpe_block->next) {
637 			gpe_block->next->previous = gpe_block->previous;
638 		}
639 		acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
640 	}
641 
642 	/* Free the gpe_block */
643 
644 	ACPI_MEM_FREE (gpe_block->register_info);
645 	ACPI_MEM_FREE (gpe_block->event_info);
646 	ACPI_MEM_FREE (gpe_block);
647 
648 unlock_and_exit:
649 	status = acpi_ut_release_mutex (ACPI_MTX_EVENTS);
650 	return_ACPI_STATUS (status);
651 }
652 
653 
654 /*******************************************************************************
655  *
656  * FUNCTION:    acpi_ev_create_gpe_info_blocks
657  *
658  * PARAMETERS:  gpe_block   - New GPE block
659  *
660  * RETURN:      Status
661  *
662  * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
663  *
664  ******************************************************************************/
665 
666 static acpi_status
acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info * gpe_block)667 acpi_ev_create_gpe_info_blocks (
668 	struct acpi_gpe_block_info      *gpe_block)
669 {
670 	struct acpi_gpe_register_info   *gpe_register_info = NULL;
671 	struct acpi_gpe_event_info      *gpe_event_info = NULL;
672 	struct acpi_gpe_event_info      *this_event;
673 	struct acpi_gpe_register_info   *this_register;
674 	acpi_native_uint                i;
675 	acpi_native_uint                j;
676 	acpi_status                     status;
677 
678 
679 	ACPI_FUNCTION_TRACE ("ev_create_gpe_info_blocks");
680 
681 
682 	/* Allocate the GPE register information block */
683 
684 	gpe_register_info = ACPI_MEM_CALLOCATE (
685 			  (acpi_size) gpe_block->register_count *
686 			  sizeof (struct acpi_gpe_register_info));
687 	if (!gpe_register_info) {
688 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
689 			"Could not allocate the gpe_register_info table\n"));
690 		return_ACPI_STATUS (AE_NO_MEMORY);
691 	}
692 
693 	/*
694 	 * Allocate the GPE event_info block. There are eight distinct GPEs
695 	 * per register.  Initialization to zeros is sufficient.
696 	 */
697 	gpe_event_info = ACPI_MEM_CALLOCATE (
698 			   ((acpi_size) gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) *
699 			   sizeof (struct acpi_gpe_event_info));
700 	if (!gpe_event_info) {
701 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not allocate the gpe_event_info table\n"));
702 		status = AE_NO_MEMORY;
703 		goto error_exit;
704 	}
705 
706 	/* Save the new Info arrays in the GPE block */
707 
708 	gpe_block->register_info = gpe_register_info;
709 	gpe_block->event_info  = gpe_event_info;
710 
711 	/*
712 	 * Initialize the GPE Register and Event structures.  A goal of these
713 	 * tables is to hide the fact that there are two separate GPE register sets
714 	 * in a given gpe hardware block, the status registers occupy the first half,
715 	 * and the enable registers occupy the second half.
716 	 */
717 	this_register = gpe_register_info;
718 	this_event   = gpe_event_info;
719 
720 	for (i = 0; i < gpe_block->register_count; i++) {
721 		/* Init the register_info for this GPE register (8 GPEs) */
722 
723 		this_register->base_gpe_number = (u8) (gpe_block->block_base_number +
724 				   (i * ACPI_GPE_REGISTER_WIDTH));
725 
726 		ACPI_STORE_ADDRESS (this_register->status_address.address,
727 				 (gpe_block->block_address.address
728 				 + i));
729 
730 		ACPI_STORE_ADDRESS (this_register->enable_address.address,
731 				 (gpe_block->block_address.address
732 				 + i
733 				 + gpe_block->register_count));
734 
735 		this_register->status_address.address_space_id = gpe_block->block_address.address_space_id;
736 		this_register->enable_address.address_space_id = gpe_block->block_address.address_space_id;
737 		this_register->status_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH;
738 		this_register->enable_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH;
739 		this_register->status_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH;
740 		this_register->enable_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH;
741 
742 		/* Init the event_info for each GPE within this register */
743 
744 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
745 			this_event->bit_mask = acpi_gbl_decode_to8bit[j];
746 			this_event->register_info = this_register;
747 			this_event++;
748 		}
749 
750 		/*
751 		 * Clear the status/enable registers.  Note that status registers
752 		 * are cleared by writing a '1', while enable registers are cleared
753 		 * by writing a '0'.
754 		 */
755 		status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0x00,
756 				 &this_register->enable_address);
757 		if (ACPI_FAILURE (status)) {
758 			goto error_exit;
759 		}
760 
761 		status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0xFF,
762 				 &this_register->status_address);
763 		if (ACPI_FAILURE (status)) {
764 			goto error_exit;
765 		}
766 
767 		this_register++;
768 	}
769 
770 	return_ACPI_STATUS (AE_OK);
771 
772 
773 error_exit:
774 	if (gpe_register_info) {
775 		ACPI_MEM_FREE (gpe_register_info);
776 	}
777 	if (gpe_event_info) {
778 		ACPI_MEM_FREE (gpe_event_info);
779 	}
780 
781 	return_ACPI_STATUS (status);
782 }
783 
784 
785 /*******************************************************************************
786  *
787  * FUNCTION:    acpi_ev_create_gpe_block
788  *
789  * PARAMETERS:  gpe_device          - Handle to the parent GPE block
790  *              gpe_block_address   - Address and space_iD
791  *              register_count      - Number of GPE register pairs in the block
792  *              gpe_block_base_number - Starting GPE number for the block
793  *              interrupt_level     - H/W interrupt for the block
794  *              return_gpe_block    - Where the new block descriptor is returned
795  *
796  * RETURN:      Status
797  *
798  * DESCRIPTION: Create and Install a block of GPE registers
799  *
800  ******************************************************************************/
801 
802 acpi_status
acpi_ev_create_gpe_block(struct acpi_namespace_node * gpe_device,struct acpi_generic_address * gpe_block_address,u32 register_count,u8 gpe_block_base_number,u32 interrupt_level,struct acpi_gpe_block_info ** return_gpe_block)803 acpi_ev_create_gpe_block (
804 	struct acpi_namespace_node      *gpe_device,
805 	struct acpi_generic_address     *gpe_block_address,
806 	u32                             register_count,
807 	u8                              gpe_block_base_number,
808 	u32                             interrupt_level,
809 	struct acpi_gpe_block_info      **return_gpe_block)
810 {
811 	struct acpi_gpe_block_info      *gpe_block;
812 	struct acpi_gpe_event_info      *gpe_event_info;
813 	acpi_native_uint                i;
814 	acpi_native_uint                j;
815 	u32                             wake_gpe_count;
816 	u32                             gpe_enabled_count;
817 	acpi_status                     status;
818 	struct acpi_gpe_walk_info       gpe_info;
819 
820 	ACPI_FUNCTION_TRACE ("ev_create_gpe_block");
821 
822 
823 	if (!register_count) {
824 		return_ACPI_STATUS (AE_OK);
825 	}
826 
827 	/* Allocate a new GPE block */
828 
829 	gpe_block = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_block_info));
830 	if (!gpe_block) {
831 		return_ACPI_STATUS (AE_NO_MEMORY);
832 	}
833 
834 	/* Initialize the new GPE block */
835 
836 	gpe_block->register_count = register_count;
837 	gpe_block->block_base_number = gpe_block_base_number;
838 
839 	ACPI_MEMCPY (&gpe_block->block_address, gpe_block_address, sizeof (struct acpi_generic_address));
840 
841 	/* Create the register_info and event_info sub-structures */
842 
843 	status = acpi_ev_create_gpe_info_blocks (gpe_block);
844 	if (ACPI_FAILURE (status)) {
845 		ACPI_MEM_FREE (gpe_block);
846 		return_ACPI_STATUS (status);
847 	}
848 
849 	/* Install the new block in the global list(s) */
850 
851 	status = acpi_ev_install_gpe_block (gpe_block, interrupt_level);
852 	if (ACPI_FAILURE (status)) {
853 		ACPI_MEM_FREE (gpe_block);
854 		return_ACPI_STATUS (status);
855 	}
856 
857 	/* Dump info about this GPE block */
858 
859 	ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
860 		"GPE %02d to %02d [%4.4s] %d regs at %8.8X%8.8X on int %d\n",
861 		gpe_block->block_base_number,
862 		(u32) (gpe_block->block_base_number +
863 				((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)),
864 		gpe_device->name.ascii,
865 		gpe_block->register_count,
866 		ACPI_FORMAT_UINT64 (gpe_block->block_address.address),
867 		interrupt_level));
868 
869 	/* Find all GPE methods (_Lxx, _Exx) for this block */
870 
871 	status = acpi_ns_walk_namespace (ACPI_TYPE_METHOD, gpe_device,
872 			  ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_save_method_info,
873 			  gpe_block, NULL);
874 
875 	/*
876 	 * Runtime option: Should Wake GPEs be enabled at runtime?  The default is
877 	 * No,they should only be enabled just as the machine goes to sleep.
878 	 */
879 	if (acpi_gbl_leave_wake_gpes_disabled) {
880 		/*
881 		 * Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods. (Each
882 		 * GPE that has one or more _PRWs that reference it is by definition a
883 		 * WAKE GPE and will not be enabled while the machine is running.)
884 		 */
885 		gpe_info.gpe_block = gpe_block;
886 		gpe_info.gpe_device = gpe_device;
887 
888 		status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
889 				  ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_get_gpe_type,
890 				  &gpe_info, NULL);
891 	}
892 
893 	/*
894 	 * Enable all GPEs in this block that are 1) "runtime" GPEs, and 2) have
895 	 * a corresponding _Lxx or _Exx method.  All other GPEs must be enabled via
896 	 * the acpi_enable_gpe() external interface.
897 	 */
898 	wake_gpe_count = 0;
899 	gpe_enabled_count = 0;
900 
901 	for (i = 0; i < gpe_block->register_count; i++) {
902 		for (j = 0; j < 8; j++) {
903 			/* Get the info block for this particular GPE */
904 
905 			gpe_event_info = &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
906 			if ((gpe_event_info->method_node) &&
907 			   ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == ACPI_GPE_TYPE_RUNTIME)) {
908 				/* Enable this GPE, it is 1) RUNTIME and 2) has an _Lxx or _Exx method */
909 
910 				status = acpi_hw_enable_gpe (gpe_event_info);
911 				if (ACPI_FAILURE (status)) {
912 					return_ACPI_STATUS (status);
913 				}
914 				gpe_enabled_count++;
915 			}
916 
917 			if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == ACPI_GPE_TYPE_WAKE) {
918 				wake_gpe_count++;
919 			}
920 		}
921 	}
922 
923 	ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
924 			"Found %u Wake, Enabled %u Runtime GPEs in this block\n",
925 			wake_gpe_count, gpe_enabled_count));
926 
927 	/* Return the new block */
928 
929 	if (return_gpe_block) {
930 		(*return_gpe_block) = gpe_block;
931 	}
932 
933 	return_ACPI_STATUS (AE_OK);
934 }
935 
936 
937 /*******************************************************************************
938  *
939  * FUNCTION:    acpi_ev_gpe_initialize
940  *
941  * PARAMETERS:  None
942  *
943  * RETURN:      Status
944  *
945  * DESCRIPTION: Initialize the GPE data structures
946  *
947  ******************************************************************************/
948 
949 acpi_status
acpi_ev_gpe_initialize(void)950 acpi_ev_gpe_initialize (
951 	void)
952 {
953 	u32                             register_count0 = 0;
954 	u32                             register_count1 = 0;
955 	u32                             gpe_number_max = 0;
956 	acpi_status                     status;
957 
958 
959 	ACPI_FUNCTION_TRACE ("ev_gpe_initialize");
960 
961 
962 	status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
963 	if (ACPI_FAILURE (status)) {
964 		return_ACPI_STATUS (status);
965 	}
966 
967 	/*
968 	 * Initialize the GPE Block(s) defined in the FADT
969 	 *
970 	 * Why the GPE register block lengths are divided by 2:  From the ACPI Spec,
971 	 * section "General-Purpose Event Registers", we have:
972 	 *
973 	 * "Each register block contains two registers of equal length
974 	 *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
975 	 *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
976 	 *  The length of the GPE1_STS and GPE1_EN registers is equal to
977 	 *  half the GPE1_LEN. If a generic register block is not supported
978 	 *  then its respective block pointer and block length values in the
979 	 *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
980 	 *  to be the same size."
981 	 */
982 
983 	/*
984 	 * Determine the maximum GPE number for this machine.
985 	 *
986 	 * Note: both GPE0 and GPE1 are optional, and either can exist without
987 	 * the other.
988 	 *
989 	 * If EITHER the register length OR the block address are zero, then that
990 	 * particular block is not supported.
991 	 */
992 	if (acpi_gbl_FADT->gpe0_blk_len &&
993 		acpi_gbl_FADT->xgpe0_blk.address) {
994 		/* GPE block 0 exists (has both length and address > 0) */
995 
996 		register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
997 
998 		gpe_number_max = (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
999 
1000 		/* Install GPE Block 0 */
1001 
1002 		status = acpi_ev_create_gpe_block (acpi_gbl_fadt_gpe_device, &acpi_gbl_FADT->xgpe0_blk,
1003 				 register_count0, 0, acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[0]);
1004 
1005 		if (ACPI_FAILURE (status)) {
1006 			ACPI_REPORT_ERROR ((
1007 				"Could not create GPE Block 0, %s\n",
1008 				acpi_format_exception (status)));
1009 		}
1010 	}
1011 
1012 	if (acpi_gbl_FADT->gpe1_blk_len &&
1013 		acpi_gbl_FADT->xgpe1_blk.address) {
1014 		/* GPE block 1 exists (has both length and address > 0) */
1015 
1016 		register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
1017 
1018 		/* Check for GPE0/GPE1 overlap (if both banks exist) */
1019 
1020 		if ((register_count0) &&
1021 			(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
1022 			ACPI_REPORT_ERROR ((
1023 				"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n",
1024 				gpe_number_max, acpi_gbl_FADT->gpe1_base,
1025 				acpi_gbl_FADT->gpe1_base +
1026 				((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1)));
1027 
1028 			/* Ignore GPE1 block by setting the register count to zero */
1029 
1030 			register_count1 = 0;
1031 		}
1032 		else {
1033 			/* Install GPE Block 1 */
1034 
1035 			status = acpi_ev_create_gpe_block (acpi_gbl_fadt_gpe_device, &acpi_gbl_FADT->xgpe1_blk,
1036 					 register_count1, acpi_gbl_FADT->gpe1_base,
1037 					 acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[1]);
1038 
1039 			if (ACPI_FAILURE (status)) {
1040 				ACPI_REPORT_ERROR ((
1041 					"Could not create GPE Block 1, %s\n",
1042 					acpi_format_exception (status)));
1043 			}
1044 
1045 			/*
1046 			 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1047 			 * space. However, GPE0 always starts at GPE number zero.
1048 			 */
1049 			gpe_number_max = acpi_gbl_FADT->gpe1_base +
1050 					   ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1051 		}
1052 	}
1053 
1054 	/* Exit if there are no GPE registers */
1055 
1056 	if ((register_count0 + register_count1) == 0) {
1057 		/* GPEs are not required by ACPI, this is OK */
1058 
1059 		ACPI_REPORT_INFO (("There are no GPE blocks defined in the FADT\n"));
1060 		status = AE_OK;
1061 		goto cleanup;
1062 	}
1063 
1064 	/* Check for Max GPE number out-of-range */
1065 
1066 	if (gpe_number_max > ACPI_GPE_MAX) {
1067 		ACPI_REPORT_ERROR (("Maximum GPE number from FADT is too large: 0x%X\n",
1068 			gpe_number_max));
1069 		status = AE_BAD_VALUE;
1070 		goto cleanup;
1071 	}
1072 
1073 cleanup:
1074 	(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
1075 	return_ACPI_STATUS (AE_OK);
1076 }
1077 
1078 
1079