1 /******************************************************************************
2  *
3  * Module Name: evgpeutil - GPE utilities
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2011, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 
48 #define _COMPONENT          ACPI_EVENTS
49 ACPI_MODULE_NAME("evgpeutil")
50 
51 /*******************************************************************************
52  *
53  * FUNCTION:    acpi_ev_walk_gpe_list
54  *
55  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
56  *              Context             - Value passed to callback
57  *
58  * RETURN:      Status
59  *
60  * DESCRIPTION: Walk the GPE lists.
61  *
62  ******************************************************************************/
63 acpi_status
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback,void * context)64 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
65 {
66 	struct acpi_gpe_block_info *gpe_block;
67 	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
68 	acpi_status status = AE_OK;
69 	acpi_cpu_flags flags;
70 
71 	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
72 
73 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
74 
75 	/* Walk the interrupt level descriptor list */
76 
77 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
78 	while (gpe_xrupt_info) {
79 
80 		/* Walk all Gpe Blocks attached to this interrupt level */
81 
82 		gpe_block = gpe_xrupt_info->gpe_block_list_head;
83 		while (gpe_block) {
84 
85 			/* One callback per GPE block */
86 
87 			status =
88 			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
89 					      context);
90 			if (ACPI_FAILURE(status)) {
91 				if (status == AE_CTRL_END) {	/* Callback abort */
92 					status = AE_OK;
93 				}
94 				goto unlock_and_exit;
95 			}
96 
97 			gpe_block = gpe_block->next;
98 		}
99 
100 		gpe_xrupt_info = gpe_xrupt_info->next;
101 	}
102 
103       unlock_and_exit:
104 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
105 	return_ACPI_STATUS(status);
106 }
107 
108 /*******************************************************************************
109  *
110  * FUNCTION:    acpi_ev_valid_gpe_event
111  *
112  * PARAMETERS:  gpe_event_info              - Info for this GPE
113  *
114  * RETURN:      TRUE if the gpe_event is valid
115  *
116  * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
117  *              Should be called only when the GPE lists are semaphore locked
118  *              and not subject to change.
119  *
120  ******************************************************************************/
121 
acpi_ev_valid_gpe_event(struct acpi_gpe_event_info * gpe_event_info)122 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
123 {
124 	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
125 	struct acpi_gpe_block_info *gpe_block;
126 
127 	ACPI_FUNCTION_ENTRY();
128 
129 	/* No need for spin lock since we are not changing any list elements */
130 
131 	/* Walk the GPE interrupt levels */
132 
133 	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
134 	while (gpe_xrupt_block) {
135 		gpe_block = gpe_xrupt_block->gpe_block_list_head;
136 
137 		/* Walk the GPE blocks on this interrupt level */
138 
139 		while (gpe_block) {
140 			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
141 			    (&gpe_block->event_info[gpe_block->gpe_count] >
142 			     gpe_event_info)) {
143 				return (TRUE);
144 			}
145 
146 			gpe_block = gpe_block->next;
147 		}
148 
149 		gpe_xrupt_block = gpe_xrupt_block->next;
150 	}
151 
152 	return (FALSE);
153 }
154 
155 /*******************************************************************************
156  *
157  * FUNCTION:    acpi_ev_get_gpe_device
158  *
159  * PARAMETERS:  GPE_WALK_CALLBACK
160  *
161  * RETURN:      Status
162  *
163  * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
164  *              block device. NULL if the GPE is one of the FADT-defined GPEs.
165  *
166  ******************************************************************************/
167 
168 acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info * gpe_xrupt_info,struct acpi_gpe_block_info * gpe_block,void * context)169 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
170 		       struct acpi_gpe_block_info *gpe_block, void *context)
171 {
172 	struct acpi_gpe_device_info *info = context;
173 
174 	/* Increment Index by the number of GPEs in this block */
175 
176 	info->next_block_base_index += gpe_block->gpe_count;
177 
178 	if (info->index < info->next_block_base_index) {
179 		/*
180 		 * The GPE index is within this block, get the node. Leave the node
181 		 * NULL for the FADT-defined GPEs
182 		 */
183 		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
184 			info->gpe_device = gpe_block->node;
185 		}
186 
187 		info->status = AE_OK;
188 		return (AE_CTRL_END);
189 	}
190 
191 	return (AE_OK);
192 }
193 
194 /*******************************************************************************
195  *
196  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
197  *
198  * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
199  *
200  * RETURN:      A GPE interrupt block
201  *
202  * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
203  *              block per unique interrupt level used for GPEs. Should be
204  *              called only when the GPE lists are semaphore locked and not
205  *              subject to change.
206  *
207  ******************************************************************************/
208 
acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)209 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
210 {
211 	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
212 	struct acpi_gpe_xrupt_info *gpe_xrupt;
213 	acpi_status status;
214 	acpi_cpu_flags flags;
215 
216 	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
217 
218 	/* No need for lock since we are not changing any list elements here */
219 
220 	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
221 	while (next_gpe_xrupt) {
222 		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
223 			return_PTR(next_gpe_xrupt);
224 		}
225 
226 		next_gpe_xrupt = next_gpe_xrupt->next;
227 	}
228 
229 	/* Not found, must allocate a new xrupt descriptor */
230 
231 	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
232 	if (!gpe_xrupt) {
233 		return_PTR(NULL);
234 	}
235 
236 	gpe_xrupt->interrupt_number = interrupt_number;
237 
238 	/* Install new interrupt descriptor with spin lock */
239 
240 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
241 	if (acpi_gbl_gpe_xrupt_list_head) {
242 		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
243 		while (next_gpe_xrupt->next) {
244 			next_gpe_xrupt = next_gpe_xrupt->next;
245 		}
246 
247 		next_gpe_xrupt->next = gpe_xrupt;
248 		gpe_xrupt->previous = next_gpe_xrupt;
249 	} else {
250 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
251 	}
252 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
253 
254 	/* Install new interrupt handler if not SCI_INT */
255 
256 	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
257 		status = acpi_os_install_interrupt_handler(interrupt_number,
258 							   acpi_ev_gpe_xrupt_handler,
259 							   gpe_xrupt);
260 		if (ACPI_FAILURE(status)) {
261 			ACPI_ERROR((AE_INFO,
262 				    "Could not install GPE interrupt handler at level 0x%X",
263 				    interrupt_number));
264 			return_PTR(NULL);
265 		}
266 	}
267 
268 	return_PTR(gpe_xrupt);
269 }
270 
271 /*******************************************************************************
272  *
273  * FUNCTION:    acpi_ev_delete_gpe_xrupt
274  *
275  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
276  *
277  * RETURN:      Status
278  *
279  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
280  *              interrupt handler if not the SCI interrupt.
281  *
282  ******************************************************************************/
283 
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info * gpe_xrupt)284 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
285 {
286 	acpi_status status;
287 	acpi_cpu_flags flags;
288 
289 	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
290 
291 	/* We never want to remove the SCI interrupt handler */
292 
293 	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
294 		gpe_xrupt->gpe_block_list_head = NULL;
295 		return_ACPI_STATUS(AE_OK);
296 	}
297 
298 	/* Disable this interrupt */
299 
300 	status =
301 	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
302 					     acpi_ev_gpe_xrupt_handler);
303 	if (ACPI_FAILURE(status)) {
304 		return_ACPI_STATUS(status);
305 	}
306 
307 	/* Unlink the interrupt block with lock */
308 
309 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
310 	if (gpe_xrupt->previous) {
311 		gpe_xrupt->previous->next = gpe_xrupt->next;
312 	} else {
313 		/* No previous, update list head */
314 
315 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
316 	}
317 
318 	if (gpe_xrupt->next) {
319 		gpe_xrupt->next->previous = gpe_xrupt->previous;
320 	}
321 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
322 
323 	/* Free the block */
324 
325 	ACPI_FREE(gpe_xrupt);
326 	return_ACPI_STATUS(AE_OK);
327 }
328 
329 /*******************************************************************************
330  *
331  * FUNCTION:    acpi_ev_delete_gpe_handlers
332  *
333  * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
334  *              gpe_block           - Gpe Block info
335  *
336  * RETURN:      Status
337  *
338  * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
339  *              Used only prior to termination.
340  *
341  ******************************************************************************/
342 
343 acpi_status
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info * gpe_xrupt_info,struct acpi_gpe_block_info * gpe_block,void * context)344 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
345 			    struct acpi_gpe_block_info *gpe_block,
346 			    void *context)
347 {
348 	struct acpi_gpe_event_info *gpe_event_info;
349 	u32 i;
350 	u32 j;
351 
352 	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
353 
354 	/* Examine each GPE Register within the block */
355 
356 	for (i = 0; i < gpe_block->register_count; i++) {
357 
358 		/* Now look at the individual GPEs in this byte register */
359 
360 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
361 			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
362 								 ACPI_GPE_REGISTER_WIDTH)
363 								+ j];
364 
365 			if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
366 			    ACPI_GPE_DISPATCH_HANDLER) {
367 				ACPI_FREE(gpe_event_info->dispatch.handler);
368 				gpe_event_info->dispatch.handler = NULL;
369 				gpe_event_info->flags &=
370 				    ~ACPI_GPE_DISPATCH_MASK;
371 			}
372 		}
373 	}
374 
375 	return_ACPI_STATUS(AE_OK);
376 }
377