1 /******************************************************************************
2 *
3 * Module Name: evgpe - General Purpose Event handling and dispatch
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2004, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 #include <acpi/acpi.h>
45 #include <acpi/acevents.h>
46 #include <acpi/acnamesp.h>
47
48 #define _COMPONENT ACPI_EVENTS
49 ACPI_MODULE_NAME ("evgpe")
50
51
52 /*******************************************************************************
53 *
54 * FUNCTION: acpi_ev_get_gpe_event_info
55 *
56 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
57 * gpe_number - Raw GPE number
58 *
59 * RETURN: A GPE event_info struct. NULL if not a valid GPE
60 *
61 * DESCRIPTION: Returns the event_info struct associated with this GPE.
62 * Validates the gpe_block and the gpe_number
63 *
64 * Should be called only when the GPE lists are semaphore locked
65 * and not subject to change.
66 *
67 ******************************************************************************/
68
69 struct acpi_gpe_event_info *
acpi_ev_get_gpe_event_info(acpi_handle gpe_device,u32 gpe_number)70 acpi_ev_get_gpe_event_info (
71 acpi_handle gpe_device,
72 u32 gpe_number)
73 {
74 union acpi_operand_object *obj_desc;
75 struct acpi_gpe_block_info *gpe_block;
76 acpi_native_uint i;
77
78
79 ACPI_FUNCTION_ENTRY ();
80
81
82 /* A NULL gpe_block means use the FADT-defined GPE block(s) */
83
84 if (!gpe_device) {
85 /* Examine GPE Block 0 and 1 (These blocks are permanent) */
86
87 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
88 gpe_block = acpi_gbl_gpe_fadt_blocks[i];
89 if (gpe_block) {
90 if ((gpe_number >= gpe_block->block_base_number) &&
91 (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) {
92 return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]);
93 }
94 }
95 }
96
97 /* The gpe_number was not in the range of either FADT GPE block */
98
99 return (NULL);
100 }
101
102 /* A Non-NULL gpe_device means this is a GPE Block Device */
103
104 obj_desc = acpi_ns_get_attached_object ((struct acpi_namespace_node *) gpe_device);
105 if (!obj_desc ||
106 !obj_desc->device.gpe_block) {
107 return (NULL);
108 }
109
110 gpe_block = obj_desc->device.gpe_block;
111
112 if ((gpe_number >= gpe_block->block_base_number) &&
113 (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) {
114 return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]);
115 }
116
117 return (NULL);
118 }
119
120
121 /*******************************************************************************
122 *
123 * FUNCTION: acpi_ev_gpe_detect
124 *
125 * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
126 * Can have multiple GPE blocks attached.
127 *
128 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
129 *
130 * DESCRIPTION: Detect if any GP events have occurred. This function is
131 * executed at interrupt level.
132 *
133 ******************************************************************************/
134
135 u32
acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)136 acpi_ev_gpe_detect (
137 struct acpi_gpe_xrupt_info *gpe_xrupt_list)
138 {
139 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
140 u8 enabled_status_byte;
141 struct acpi_gpe_register_info *gpe_register_info;
142 u32 in_value;
143 acpi_status status;
144 struct acpi_gpe_block_info *gpe_block;
145 u32 i;
146 u32 j;
147
148
149 ACPI_FUNCTION_NAME ("ev_gpe_detect");
150
151 /* Check for the case where there are no GPEs */
152
153 if (!gpe_xrupt_list) {
154 return (int_status);
155 }
156
157 /* Examine all GPE blocks attached to this interrupt level */
158
159 acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR);
160 gpe_block = gpe_xrupt_list->gpe_block_list_head;
161 while (gpe_block) {
162 /*
163 * Read all of the 8-bit GPE status and enable registers
164 * in this GPE block, saving all of them.
165 * Find all currently active GP events.
166 */
167 for (i = 0; i < gpe_block->register_count; i++) {
168 /* Get the next status/enable pair */
169
170 gpe_register_info = &gpe_block->register_info[i];
171
172 /* Read the Status Register */
173
174 status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value,
175 &gpe_register_info->status_address);
176 gpe_register_info->status = (u8) in_value;
177 if (ACPI_FAILURE (status)) {
178 goto unlock_and_exit;
179 }
180
181 /* Read the Enable Register */
182
183 status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value,
184 &gpe_register_info->enable_address);
185 gpe_register_info->enable = (u8) in_value;
186 if (ACPI_FAILURE (status)) {
187 goto unlock_and_exit;
188 }
189
190 ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS,
191 "GPE pair: Status %8.8X%8.8X = %02X, Enable %8.8X%8.8X = %02X\n",
192 ACPI_FORMAT_UINT64 (gpe_register_info->status_address.address),
193 gpe_register_info->status,
194 ACPI_FORMAT_UINT64 (gpe_register_info->enable_address.address),
195 gpe_register_info->enable));
196
197 /* First check if there is anything active at all in this register */
198
199 enabled_status_byte = (u8) (gpe_register_info->status &
200 gpe_register_info->enable);
201 if (!enabled_status_byte) {
202 /* No active GPEs in this register, move on */
203
204 continue;
205 }
206
207 /* Now look at the individual GPEs in this byte register */
208
209 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
210 /* Examine one GPE bit */
211
212 if (enabled_status_byte & acpi_gbl_decode_to8bit[j]) {
213 /*
214 * Found an active GPE. Dispatch the event to a handler
215 * or method.
216 */
217 int_status |= acpi_ev_gpe_dispatch (
218 &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j],
219 j + gpe_register_info->base_gpe_number);
220 }
221 }
222 }
223
224 gpe_block = gpe_block->next;
225 }
226
227 unlock_and_exit:
228
229 acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR);
230 return (int_status);
231 }
232
233
234 /*******************************************************************************
235 *
236 * FUNCTION: acpi_ev_asynch_execute_gpe_method
237 *
238 * PARAMETERS: Context (gpe_event_info) - Info for this GPE
239 *
240 * RETURN: None
241 *
242 * DESCRIPTION: Perform the actual execution of a GPE control method. This
243 * function is called from an invocation of acpi_os_queue_for_execution
244 * (and therefore does NOT execute at interrupt level) so that
245 * the control method itself is not executed in the context of
246 * an interrupt handler.
247 *
248 ******************************************************************************/
249
250 static void ACPI_SYSTEM_XFACE
acpi_ev_asynch_execute_gpe_method(void * context)251 acpi_ev_asynch_execute_gpe_method (
252 void *context)
253 {
254 struct acpi_gpe_event_info *gpe_event_info = (void *) context;
255 u32 gpe_number = 0;
256 acpi_status status;
257 struct acpi_gpe_event_info local_gpe_event_info;
258
259
260 ACPI_FUNCTION_TRACE ("ev_asynch_execute_gpe_method");
261
262
263 status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS);
264 if (ACPI_FAILURE (status)) {
265 return_VOID;
266 }
267
268 /* Must revalidate the gpe_number/gpe_block */
269
270 if (!acpi_ev_valid_gpe_event (gpe_event_info)) {
271 status = acpi_ut_release_mutex (ACPI_MTX_EVENTS);
272 return_VOID;
273 }
274
275 /*
276 * Take a snapshot of the GPE info for this level - we copy the
277 * info to prevent a race condition with remove_handler/remove_block.
278 */
279 ACPI_MEMCPY (&local_gpe_event_info, gpe_event_info, sizeof (struct acpi_gpe_event_info));
280
281 status = acpi_ut_release_mutex (ACPI_MTX_EVENTS);
282 if (ACPI_FAILURE (status)) {
283 return_VOID;
284 }
285
286 if (local_gpe_event_info.method_node) {
287 /*
288 * Invoke the GPE Method (_Lxx, _Exx):
289 * (Evaluate the _Lxx/_Exx control method that corresponds to this GPE.)
290 */
291 status = acpi_ns_evaluate_by_handle (local_gpe_event_info.method_node, NULL, NULL);
292 if (ACPI_FAILURE (status)) {
293 ACPI_REPORT_ERROR (("%s while evaluating method [%4.4s] for GPE[%2X]\n",
294 acpi_format_exception (status),
295 acpi_ut_get_node_name (local_gpe_event_info.method_node), gpe_number));
296 }
297 }
298
299 if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) {
300 /*
301 * GPE is level-triggered, we clear the GPE status bit after handling
302 * the event.
303 */
304 status = acpi_hw_clear_gpe (&local_gpe_event_info);
305 if (ACPI_FAILURE (status)) {
306 return_VOID;
307 }
308 }
309
310 /* Enable this GPE */
311
312 (void) acpi_hw_enable_gpe (&local_gpe_event_info);
313 return_VOID;
314 }
315
316
317 /*******************************************************************************
318 *
319 * FUNCTION: acpi_ev_gpe_dispatch
320 *
321 * PARAMETERS: gpe_event_info - info for this GPE
322 * gpe_number - Number relative to the parent GPE block
323 *
324 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
325 *
326 * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
327 * or method (e.g. _Lxx/_Exx) handler.
328 *
329 * This function executes at interrupt level.
330 *
331 ******************************************************************************/
332
333 u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info * gpe_event_info,u32 gpe_number)334 acpi_ev_gpe_dispatch (
335 struct acpi_gpe_event_info *gpe_event_info,
336 u32 gpe_number)
337 {
338 acpi_status status;
339
340
341 ACPI_FUNCTION_TRACE ("ev_gpe_dispatch");
342
343
344 /*
345 * If edge-triggered, clear the GPE status bit now. Note that
346 * level-triggered events are cleared after the GPE is serviced.
347 */
348 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) {
349 status = acpi_hw_clear_gpe (gpe_event_info);
350 if (ACPI_FAILURE (status)) {
351 ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n",
352 gpe_number));
353 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
354 }
355 }
356
357 /*
358 * Dispatch the GPE to either an installed handler, or the control
359 * method associated with this GPE (_Lxx or _Exx).
360 * If a handler exists, we invoke it and do not attempt to run the method.
361 * If there is neither a handler nor a method, we disable the level to
362 * prevent further events from coming in here.
363 */
364 if (gpe_event_info->handler) {
365 /* Invoke the installed handler (at interrupt level) */
366
367 gpe_event_info->handler (gpe_event_info->context);
368
369 /* It is now safe to clear level-triggered events. */
370
371 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) {
372 status = acpi_hw_clear_gpe (gpe_event_info);
373 if (ACPI_FAILURE (status)) {
374 ACPI_REPORT_ERROR ((
375 "acpi_ev_gpe_dispatch: Unable to clear GPE[%2X]\n",
376 gpe_number));
377 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
378 }
379 }
380 }
381 else if (gpe_event_info->method_node) {
382 /*
383 * Disable GPE, so it doesn't keep firing before the method has a
384 * chance to run.
385 */
386 status = acpi_hw_disable_gpe (gpe_event_info);
387 if (ACPI_FAILURE (status)) {
388 ACPI_REPORT_ERROR ((
389 "acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
390 gpe_number));
391 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
392 }
393
394 /*
395 * Execute the method associated with the GPE
396 * NOTE: Level-triggered GPEs are cleared after the method completes.
397 */
398 if (ACPI_FAILURE (acpi_os_queue_for_execution (OSD_PRIORITY_GPE,
399 acpi_ev_asynch_execute_gpe_method,
400 gpe_event_info))) {
401 ACPI_REPORT_ERROR ((
402 "acpi_ev_gpe_dispatch: Unable to queue handler for GPE[%2X], event is disabled\n",
403 gpe_number));
404 }
405 }
406 else {
407 /* No handler or method to run! */
408
409 ACPI_REPORT_ERROR ((
410 "acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n",
411 gpe_number));
412
413 /*
414 * Disable the GPE. The GPE will remain disabled until the ACPI
415 * Core Subsystem is restarted, or a handler is installed.
416 */
417 status = acpi_hw_disable_gpe (gpe_event_info);
418 if (ACPI_FAILURE (status)) {
419 ACPI_REPORT_ERROR ((
420 "acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
421 gpe_number));
422 return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
423 }
424 }
425
426 return_VALUE (ACPI_INTERRUPT_HANDLED);
427 }
428
429