1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 80 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27 
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/init.h>
37 #include <linux/nmi.h>
38 #include <asm/io.h>
39 #include <acpi/acpi_bus.h>
40 #include <acpi/acpi.h>
41 
42 #ifdef CONFIG_ACPI_EFI
43 #include <linux/efi.h>
44 u64 efi_mem_attributes (u64 phys_addr);
45 #endif
46 
47 
48 #define _COMPONENT		ACPI_OS_SERVICES
49 ACPI_MODULE_NAME	("osl")
50 
51 #define PREFIX		"ACPI: "
52 
53 struct acpi_os_dpc
54 {
55     OSD_EXECUTION_CALLBACK  function;
56     void		    *context;
57 };
58 
59 
60 #ifdef ENABLE_DEBUGGER
61 #include <linux/kdb.h>
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 extern char line_buf[80];
65 #endif /*ENABLE_DEBUGGER*/
66 
67 static int acpi_irq_irq;
68 static OSD_HANDLER acpi_irq_handler;
69 static void *acpi_irq_context;
70 
71 
72 acpi_status
acpi_os_initialize(void)73 acpi_os_initialize(void)
74 {
75 	return AE_OK;
76 }
77 
78 acpi_status
acpi_os_initialize1(void)79 acpi_os_initialize1(void)
80 {
81 	/*
82 	 * Initialize PCI configuration space access, as we'll need to access
83 	 * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
84 	 */
85 #ifdef CONFIG_ACPI_PCI
86 	pcibios_config_init();
87 	if (!pci_config_read || !pci_config_write) {
88 		printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n");
89 		return AE_NULL_ENTRY;
90 	}
91 #endif
92 
93 	return AE_OK;
94 }
95 
96 acpi_status
acpi_os_terminate(void)97 acpi_os_terminate(void)
98 {
99 	if (acpi_irq_handler) {
100 		acpi_os_remove_interrupt_handler(acpi_irq_irq,
101 						 acpi_irq_handler);
102 	}
103 
104 	return AE_OK;
105 }
106 
107 void
acpi_os_printf(const char * fmt,...)108 acpi_os_printf(const char *fmt,...)
109 {
110 	va_list args;
111 	va_start(args, fmt);
112 	acpi_os_vprintf(fmt, args);
113 	va_end(args);
114 }
115 
116 void
acpi_os_vprintf(const char * fmt,va_list args)117 acpi_os_vprintf(const char *fmt, va_list args)
118 {
119 	static char buffer[512];
120 
121 	vsprintf(buffer, fmt, args);
122 
123 #ifdef ENABLE_DEBUGGER
124 	if (acpi_in_debugger) {
125 		kdb_printf("%s", buffer);
126 	} else {
127 		printk("%s", buffer);
128 	}
129 #else
130 	printk("%s", buffer);
131 #endif
132 }
133 
134 void *
acpi_os_allocate(acpi_size size)135 acpi_os_allocate(acpi_size size)
136 {
137 	return kmalloc(size, GFP_KERNEL);
138 }
139 
140 void
acpi_os_free(void * ptr)141 acpi_os_free(void *ptr)
142 {
143 	kfree(ptr);
144 }
145 
146 acpi_status
acpi_os_get_root_pointer(u32 flags,struct acpi_pointer * addr)147 acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
148 {
149 #ifdef CONFIG_ACPI_EFI
150 	addr->pointer_type = ACPI_PHYSICAL_POINTER;
151 	if (efi.acpi20)
152 		addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi20);
153 	else if (efi.acpi)
154 		addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi);
155 	else {
156 		printk(KERN_ERR PREFIX "System description tables not found\n");
157 		return AE_NOT_FOUND;
158 	}
159 #else
160 	if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) {
161 		printk(KERN_ERR PREFIX "System description tables not found\n");
162 		return AE_NOT_FOUND;
163 	}
164 #endif /*CONFIG_ACPI_EFI*/
165 
166 	return AE_OK;
167 }
168 
169 acpi_status
acpi_os_map_memory(acpi_physical_address phys,acpi_size size,void ** virt)170 acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void **virt)
171 {
172 #ifdef CONFIG_ACPI_EFI
173 	if (EFI_MEMORY_WB & efi_mem_attributes(phys)) {
174 		*virt = phys_to_virt(phys);
175 	} else {
176 		*virt = ioremap(phys, size);
177 	}
178 #else
179 	if (phys > ULONG_MAX) {
180 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
181 		return AE_BAD_PARAMETER;
182 	}
183 	/*
184 	 * ioremap checks to ensure this is in reserved space
185 	 */
186 	*virt = ioremap((unsigned long) phys, size);
187 #endif
188 
189 	if (!*virt)
190 		return AE_NO_MEMORY;
191 
192 	return AE_OK;
193 }
194 
195 void
acpi_os_unmap_memory(void * virt,acpi_size size)196 acpi_os_unmap_memory(void *virt, acpi_size size)
197 {
198 	iounmap(virt);
199 }
200 
201 acpi_status
acpi_os_get_physical_address(void * virt,acpi_physical_address * phys)202 acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
203 {
204 	if(!phys || !virt)
205 		return AE_BAD_PARAMETER;
206 
207 	*phys = virt_to_phys(virt);
208 
209 	return AE_OK;
210 }
211 
212 #define ACPI_MAX_OVERRIDE_LEN 100
213 
214 static char __initdata acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
215 
216 acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names * init_val,acpi_string * new_val)217 acpi_os_predefined_override (const struct acpi_predefined_names *init_val,
218 		             acpi_string *new_val)
219 {
220 	if (!init_val || !new_val)
221 		return AE_BAD_PARAMETER;
222 
223 	*new_val = NULL;
224 	if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
225 		printk(KERN_INFO PREFIX "Overriding _OS definition: %s\n",
226 		       acpi_os_name);
227 		*new_val = acpi_os_name;
228 	}
229 
230 	return AE_OK;
231 }
232 
233 acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,struct acpi_table_header ** new_table)234 acpi_os_table_override (struct acpi_table_header *existing_table,
235 			struct acpi_table_header **new_table)
236 {
237 	if (!existing_table || !new_table)
238 		return AE_BAD_PARAMETER;
239 
240 	*new_table = NULL;
241 	return AE_OK;
242 }
243 
244 static void
acpi_irq(int irq,void * dev_id,struct pt_regs * regs)245 acpi_irq(int irq, void *dev_id, struct pt_regs *regs)
246 {
247 	(*acpi_irq_handler)(acpi_irq_context);
248 }
249 
250 acpi_status
acpi_os_install_interrupt_handler(u32 irq,OSD_HANDLER handler,void * context)251 acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context)
252 {
253 	/*
254 	 * Ignore the irq from the core, and use the value in our copy of the
255 	 * FADT. It may not be the same if an interrupt source override exists
256 	 * for the SCI.
257 	 */
258 	irq = acpi_fadt.sci_int;
259 
260 #ifdef CONFIG_IA64
261 	irq = acpi_irq_to_vector(irq);
262 	if (irq < 0) {
263 		printk(KERN_ERR PREFIX "SCI (ACPI interrupt %d) not registered\n",
264 		       acpi_fadt.sci_int);
265 		return AE_OK;
266 	}
267 #endif
268 	acpi_irq_irq = irq;
269 	acpi_irq_handler = handler;
270 	acpi_irq_context = context;
271 	if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) {
272 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
273 		return AE_NOT_ACQUIRED;
274 	}
275 
276 	return AE_OK;
277 }
278 
279 acpi_status
acpi_os_remove_interrupt_handler(u32 irq,OSD_HANDLER handler)280 acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler)
281 {
282 	if (acpi_irq_handler) {
283 #ifdef CONFIG_IA64
284 		irq = acpi_irq_to_vector(irq);
285 #endif
286 		free_irq(irq, acpi_irq);
287 		acpi_irq_handler = NULL;
288 	}
289 
290 	return AE_OK;
291 }
292 
293 /*
294  * Running in interpreter thread context, safe to sleep
295  */
296 
297 void
acpi_os_sleep(u32 sec,u32 ms)298 acpi_os_sleep(u32 sec, u32 ms)
299 {
300 	current->state = TASK_INTERRUPTIBLE;
301 	schedule_timeout(HZ * sec + (ms * HZ) / 1000);
302 }
303 
304 void
acpi_os_stall(u32 us)305 acpi_os_stall(u32 us)
306 {
307 	while (us) {
308 		u32 delay = 1000;
309 
310 		if (delay > us)
311 			delay = us;
312 		udelay(delay);
313 		touch_nmi_watchdog();
314 		us -= delay;
315 	}
316 }
317 
318 acpi_status
acpi_os_read_port(acpi_io_address port,u32 * value,u32 width)319 acpi_os_read_port(
320 	acpi_io_address	port,
321 	u32		*value,
322 	u32		width)
323 {
324 	u32 dummy;
325 
326 	if (!value)
327 		value = &dummy;
328 
329 	switch (width)
330 	{
331 	case 8:
332 		*(u8*)  value = inb(port);
333 		break;
334 	case 16:
335 		*(u16*) value = inw(port);
336 		break;
337 	case 32:
338 		*(u32*) value = inl(port);
339 		break;
340 	default:
341 		BUG();
342 	}
343 
344 	return AE_OK;
345 }
346 
347 acpi_status
acpi_os_write_port(acpi_io_address port,u32 value,u32 width)348 acpi_os_write_port(
349 	acpi_io_address	port,
350 	u32		value,
351 	u32		width)
352 {
353 	switch (width)
354 	{
355 	case 8:
356 		outb(value, port);
357 		break;
358 	case 16:
359 		outw(value, port);
360 		break;
361 	case 32:
362 		outl(value, port);
363 		break;
364 	default:
365 		BUG();
366 	}
367 
368 	return AE_OK;
369 }
370 
371 acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr,u32 * value,u32 width)372 acpi_os_read_memory(
373 	acpi_physical_address	phys_addr,
374 	u32			*value,
375 	u32			width)
376 {
377 	u32			dummy;
378 	void			*virt_addr;
379 
380 #ifdef CONFIG_ACPI_EFI
381 	int			iomem = 0;
382 
383 	if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
384 		virt_addr = phys_to_virt(phys_addr);
385 	} else {
386 		iomem = 1;
387 		virt_addr = ioremap(phys_addr, width);
388 	}
389 #else
390 	virt_addr = phys_to_virt(phys_addr);
391 #endif
392 	if (!value)
393 		value = &dummy;
394 
395 	switch (width) {
396 	case 8:
397 		*(u8*) value = *(u8*) virt_addr;
398 		break;
399 	case 16:
400 		*(u16*) value = *(u16*) virt_addr;
401 		break;
402 	case 32:
403 		*(u32*) value = *(u32*) virt_addr;
404 		break;
405 	default:
406 		BUG();
407 	}
408 
409 #ifdef CONFIG_ACPI_EFI
410 	if (iomem)
411 		iounmap(virt_addr);
412 #endif
413 
414 	return AE_OK;
415 }
416 
417 acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr,u32 value,u32 width)418 acpi_os_write_memory(
419 	acpi_physical_address	phys_addr,
420 	u32			value,
421 	u32			width)
422 {
423 	void			*virt_addr;
424 
425 #ifdef CONFIG_ACPI_EFI
426 	int			iomem = 0;
427 
428 	if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
429 		virt_addr = phys_to_virt(phys_addr);
430 	} else {
431 		iomem = 1;
432 		virt_addr = ioremap(phys_addr, width);
433 	}
434 #else
435 	virt_addr = phys_to_virt(phys_addr);
436 #endif
437 
438 	switch (width) {
439 	case 8:
440 		*(u8*) virt_addr = value;
441 		break;
442 	case 16:
443 		*(u16*) virt_addr = value;
444 		break;
445 	case 32:
446 		*(u32*) virt_addr = value;
447 		break;
448 	default:
449 		BUG();
450 	}
451 
452 #ifdef CONFIG_ACPI_EFI
453 	if (iomem)
454 		iounmap(virt_addr);
455 #endif
456 
457 	return AE_OK;
458 }
459 
460 #ifdef CONFIG_ACPI_PCI
461 
462 acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,void * value,u32 width)463 acpi_os_read_pci_configuration (
464 	struct acpi_pci_id	*pci_id,
465 	u32			reg,
466 	void			*value,
467 	u32			width)
468 {
469 	int			result = 0;
470 	if (!value)
471 		return AE_BAD_PARAMETER;
472 
473 	switch (width)
474 	{
475 	case 8:
476 		result = pci_config_read(pci_id->segment, pci_id->bus,
477 			pci_id->device, pci_id->function, reg, 1, value);
478 		break;
479 	case 16:
480 		result = pci_config_read(pci_id->segment, pci_id->bus,
481 			pci_id->device, pci_id->function, reg, 2, value);
482 		break;
483 	case 32:
484 		result = pci_config_read(pci_id->segment, pci_id->bus,
485 			pci_id->device, pci_id->function, reg, 4, value);
486 		break;
487 	default:
488 		BUG();
489 	}
490 
491 	return (result ? AE_ERROR : AE_OK);
492 }
493 
494 acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,acpi_integer value,u32 width)495 acpi_os_write_pci_configuration (
496 	struct acpi_pci_id	*pci_id,
497 	u32			reg,
498 	acpi_integer		value,
499 	u32			width)
500 {
501 	int			result = 0;
502 
503 	switch (width)
504 	{
505 	case 8:
506 		result = pci_config_write(pci_id->segment, pci_id->bus,
507 			pci_id->device, pci_id->function, reg, 1, value);
508 		break;
509 	case 16:
510 		result = pci_config_write(pci_id->segment, pci_id->bus,
511 			pci_id->device, pci_id->function, reg, 2, value);
512 		break;
513 	case 32:
514 		result = pci_config_write(pci_id->segment, pci_id->bus,
515 			pci_id->device, pci_id->function, reg, 4, value);
516 		break;
517 	default:
518 		BUG();
519 	}
520 
521 	return (result ? AE_ERROR : AE_OK);
522 }
523 
524 static void
acpi_os_derive_pci_id_2(acpi_handle rhandle,acpi_handle chandle,struct acpi_pci_id ** id,int * is_bridge,u8 * bus_number)525 acpi_os_derive_pci_id_2 (
526 	acpi_handle		rhandle,        /* upper bound  */
527 	acpi_handle		chandle,        /* current node */
528 	struct acpi_pci_id	**id,
529 	int			*is_bridge,
530 	u8			*bus_number)
531 {
532 	acpi_handle		handle;
533 	struct acpi_pci_id	*pci_id = *id;
534 	acpi_status		status;
535 	unsigned long		temp;
536 	acpi_object_type	type;
537 	u8			tu8;
538 
539 	acpi_get_parent(chandle, &handle);
540 	if (handle != rhandle) {
541 		acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, bus_number);
542 
543 		status = acpi_get_type(handle, &type);
544 		if ( (ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE) )
545 			return;
546 
547 		status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &temp);
548 		if (ACPI_SUCCESS(status)) {
549 			pci_id->device  = ACPI_HIWORD (ACPI_LODWORD (temp));
550 			pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp));
551 
552 			if (*is_bridge)
553 				pci_id->bus = *bus_number;
554 
555 			/* any nicer way to get bus number of bridge ? */
556 			status = acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 8);
557 			if (ACPI_SUCCESS(status) &&
558 			    ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
559 				status = acpi_os_read_pci_configuration(pci_id, 0x18, &tu8, 8);
560 				if (!ACPI_SUCCESS(status)) {
561 					/* Certainly broken...  FIX ME */
562 					return;
563 				}
564 				*is_bridge = 1;
565 				pci_id->bus = tu8;
566 				status = acpi_os_read_pci_configuration(pci_id, 0x19, &tu8, 8);
567 				if (ACPI_SUCCESS(status)) {
568 					*bus_number = tu8;
569 				}
570 			} else
571 				*is_bridge = 0;
572 		}
573 	}
574 }
575 
576 void
acpi_os_derive_pci_id(acpi_handle rhandle,acpi_handle chandle,struct acpi_pci_id ** id)577 acpi_os_derive_pci_id (
578 	acpi_handle		rhandle,        /* upper bound  */
579 	acpi_handle		chandle,        /* current node */
580 	struct acpi_pci_id	**id)
581 {
582 	int is_bridge = 1;
583 	u8 bus_number = (*id)->bus;
584 
585 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
586 }
587 
588 #else /*!CONFIG_ACPI_PCI*/
589 
590 acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,acpi_integer value,u32 width)591 acpi_os_write_pci_configuration (
592 	struct acpi_pci_id	*pci_id,
593 	u32			reg,
594 	acpi_integer		value,
595 	u32			width)
596 {
597 	return (AE_SUPPORT);
598 }
599 
600 acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,void * value,u32 width)601 acpi_os_read_pci_configuration (
602 	struct acpi_pci_id	*pci_id,
603 	u32			reg,
604 	void			*value,
605 	u32			width)
606 {
607 	return (AE_SUPPORT);
608 }
609 
610 void
acpi_os_derive_pci_id(acpi_handle rhandle,acpi_handle chandle,struct acpi_pci_id ** id)611 acpi_os_derive_pci_id (
612 	acpi_handle		rhandle,        /* upper bound  */
613 	acpi_handle		chandle,        /* current node */
614 	struct acpi_pci_id	**id)
615 {
616 }
617 
618 #endif /*CONFIG_ACPI_PCI*/
619 
620 static void
acpi_os_execute_deferred(void * context)621 acpi_os_execute_deferred (
622 	void *context)
623 {
624 	struct acpi_os_dpc	*dpc = NULL;
625 
626 	ACPI_FUNCTION_TRACE ("os_execute_deferred");
627 
628 	dpc = (struct acpi_os_dpc *) context;
629 	if (!dpc) {
630 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
631 		return_VOID;
632 	}
633 
634 	dpc->function(dpc->context);
635 
636 	kfree(dpc);
637 
638 	return_VOID;
639 }
640 
641 acpi_status
acpi_os_queue_for_execution(u32 priority,OSD_EXECUTION_CALLBACK function,void * context)642 acpi_os_queue_for_execution(
643 	u32			priority,
644 	OSD_EXECUTION_CALLBACK	function,
645 	void			*context)
646 {
647 	acpi_status 		status = AE_OK;
648 	struct acpi_os_dpc	*dpc = NULL;
649 	struct tq_struct	*task;
650 
651 	ACPI_FUNCTION_TRACE ("os_queue_for_execution");
652 
653 	ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context));
654 
655 	if (!function)
656 		return_ACPI_STATUS (AE_BAD_PARAMETER);
657 
658 	/*
659 	 * Allocate/initialize DPC structure.  Note that this memory will be
660 	 * freed by the callee.  The kernel handles the tq_struct list  in a
661 	 * way that allows us to also free its memory inside the callee.
662 	 * Because we may want to schedule several tasks with different
663 	 * parameters we can't use the approach some kernel code uses of
664 	 * having a static tq_struct.
665 	 * We can save time and code by allocating the DPC and tq_structs
666 	 * from the same memory.
667 	 */
668 	dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct tq_struct), GFP_ATOMIC);
669 	if (!dpc)
670 		return_ACPI_STATUS (AE_NO_MEMORY);
671 
672 	dpc->function = function;
673 	dpc->context = context;
674 
675 	task = (void *)(dpc+1);
676 	INIT_TQUEUE(task, acpi_os_execute_deferred, (void*)dpc);
677 
678 	if (!schedule_task(task)) {
679 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n"));
680 		kfree(dpc);
681 		status = AE_ERROR;
682 	}
683 
684 	return_ACPI_STATUS (status);
685 }
686 
687 /*
688  * Allocate the memory for a spinlock and initialize it.
689  */
690 acpi_status
acpi_os_create_lock(acpi_handle * out_handle)691 acpi_os_create_lock (
692 	acpi_handle	*out_handle)
693 {
694 	spinlock_t *lock_ptr;
695 
696 	ACPI_FUNCTION_TRACE ("os_create_lock");
697 
698 	lock_ptr = acpi_os_allocate(sizeof(spinlock_t));
699 
700 	spin_lock_init(lock_ptr);
701 
702 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr));
703 
704 	*out_handle = lock_ptr;
705 
706 	return_ACPI_STATUS (AE_OK);
707 }
708 
709 
710 /*
711  * Deallocate the memory for a spinlock.
712  */
713 void
acpi_os_delete_lock(acpi_handle handle)714 acpi_os_delete_lock (
715 	acpi_handle	handle)
716 {
717 	ACPI_FUNCTION_TRACE ("os_create_lock");
718 
719 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle));
720 
721 	acpi_os_free(handle);
722 
723 	return_VOID;
724 }
725 
726 /*
727  * Acquire a spinlock.
728  *
729  * handle is a pointer to the spinlock_t.
730  * flags is *not* the result of save_flags - it is an ACPI-specific flag variable
731  *   that indicates whether we are at interrupt level.
732  */
733 void
acpi_os_acquire_lock(acpi_handle handle,u32 flags)734 acpi_os_acquire_lock (
735 	acpi_handle	handle,
736 	u32		flags)
737 {
738 	ACPI_FUNCTION_TRACE ("os_acquire_lock");
739 
740 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle,
741 		((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
742 
743 	if (flags & ACPI_NOT_ISR)
744 		ACPI_DISABLE_IRQS();
745 
746 	spin_lock((spinlock_t *)handle);
747 
748 	return_VOID;
749 }
750 
751 
752 /*
753  * Release a spinlock. See above.
754  */
755 void
acpi_os_release_lock(acpi_handle handle,u32 flags)756 acpi_os_release_lock (
757 	acpi_handle	handle,
758 	u32		flags)
759 {
760 	ACPI_FUNCTION_TRACE ("os_release_lock");
761 
762 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle,
763 		((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
764 
765 	spin_unlock((spinlock_t *)handle);
766 
767 	if (flags & ACPI_NOT_ISR)
768 		ACPI_ENABLE_IRQS();
769 
770 	return_VOID;
771 }
772 
773 
774 acpi_status
acpi_os_create_semaphore(u32 max_units,u32 initial_units,acpi_handle * handle)775 acpi_os_create_semaphore(
776 	u32		max_units,
777 	u32		initial_units,
778 	acpi_handle	*handle)
779 {
780 	struct semaphore	*sem = NULL;
781 
782 	ACPI_FUNCTION_TRACE ("os_create_semaphore");
783 
784 	sem = acpi_os_allocate(sizeof(struct semaphore));
785 	if (!sem)
786 		return_ACPI_STATUS (AE_NO_MEMORY);
787 	memset(sem, 0, sizeof(struct semaphore));
788 
789 	sema_init(sem, initial_units);
790 
791 	*handle = (acpi_handle*)sem;
792 
793 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units));
794 
795 	return_ACPI_STATUS (AE_OK);
796 }
797 
798 
799 /*
800  * TODO: A better way to delete semaphores?  Linux doesn't have a
801  * 'delete_semaphore()' function -- may result in an invalid
802  * pointer dereference for non-synchronized consumers.	Should
803  * we at least check for blocked threads and signal/cancel them?
804  */
805 
806 acpi_status
acpi_os_delete_semaphore(acpi_handle handle)807 acpi_os_delete_semaphore(
808 	acpi_handle	handle)
809 {
810 	struct semaphore *sem = (struct semaphore*) handle;
811 
812 	ACPI_FUNCTION_TRACE ("os_delete_semaphore");
813 
814 	if (!sem)
815 		return_ACPI_STATUS (AE_BAD_PARAMETER);
816 
817 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
818 
819 	acpi_os_free(sem); sem =  NULL;
820 
821 	return_ACPI_STATUS (AE_OK);
822 }
823 
824 
825 /*
826  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
827  * improvise.  The process is to sleep for one scheduler quantum
828  * until the semaphore becomes available.  Downside is that this
829  * may result in starvation for timeout-based waits when there's
830  * lots of semaphore activity.
831  *
832  * TODO: Support for units > 1?
833  */
834 acpi_status
acpi_os_wait_semaphore(acpi_handle handle,u32 units,u16 timeout)835 acpi_os_wait_semaphore(
836 	acpi_handle		handle,
837 	u32			units,
838 	u16			timeout)
839 {
840 	acpi_status		status = AE_OK;
841 	struct semaphore	*sem = (struct semaphore*)handle;
842 	int			ret = 0;
843 
844 	ACPI_FUNCTION_TRACE ("os_wait_semaphore");
845 
846 	if (!sem || (units < 1))
847 		return_ACPI_STATUS (AE_BAD_PARAMETER);
848 
849 	if (units > 1)
850 		return_ACPI_STATUS (AE_SUPPORT);
851 
852 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout));
853 
854 	if (in_interrupt())
855 		timeout = 0;
856 
857 	switch (timeout)
858 	{
859 		/*
860 		 * No Wait:
861 		 * --------
862 		 * A zero timeout value indicates that we shouldn't wait - just
863 		 * acquire the semaphore if available otherwise return AE_TIME
864 		 * (a.k.a. 'would block').
865 		 */
866 		case 0:
867 		if(down_trylock(sem))
868 			status = AE_TIME;
869 		break;
870 
871 		/*
872 		 * Wait Indefinitely:
873 		 * ------------------
874 		 */
875 		case ACPI_WAIT_FOREVER:
876 		down(sem);
877 		break;
878 
879 		/*
880 		 * Wait w/ Timeout:
881 		 * ----------------
882 		 */
883 		default:
884 		// TODO: A better timeout algorithm?
885 		{
886 			int i = 0;
887 			static const int quantum_ms = 1000/HZ;
888 
889 			ret = down_trylock(sem);
890 			for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) {
891 				current->state = TASK_INTERRUPTIBLE;
892 				schedule_timeout(1);
893 				ret = down_trylock(sem);
894 			}
895 
896 			if (ret != 0)
897 				status = AE_TIME;
898 		}
899 		break;
900 	}
901 
902 	if (ACPI_FAILURE(status)) {
903 		ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Failed to acquire semaphore[%p|%d|%d], %s\n",
904 			handle, units, timeout, acpi_format_exception(status)));
905 	}
906 	else {
907 		ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout));
908 	}
909 
910 	return_ACPI_STATUS (status);
911 }
912 
913 
914 /*
915  * TODO: Support for units > 1?
916  */
917 acpi_status
acpi_os_signal_semaphore(acpi_handle handle,u32 units)918 acpi_os_signal_semaphore(
919     acpi_handle 	    handle,
920     u32 		    units)
921 {
922 	struct semaphore *sem = (struct semaphore *) handle;
923 
924 	ACPI_FUNCTION_TRACE ("os_signal_semaphore");
925 
926 	if (!sem || (units < 1))
927 		return_ACPI_STATUS (AE_BAD_PARAMETER);
928 
929 	if (units > 1)
930 		return_ACPI_STATUS (AE_SUPPORT);
931 
932 	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units));
933 
934 	up(sem);
935 
936 	return_ACPI_STATUS (AE_OK);
937 }
938 
939 u32
acpi_os_get_line(char * buffer)940 acpi_os_get_line(char *buffer)
941 {
942 
943 #ifdef ENABLE_DEBUGGER
944 	if (acpi_in_debugger) {
945 		u32 chars;
946 
947 		kdb_read(buffer, sizeof(line_buf));
948 
949 		/* remove the CR kdb includes */
950 		chars = strlen(buffer) - 1;
951 		buffer[chars] = '\0';
952 	}
953 #endif
954 
955 	return 0;
956 }
957 
958 /*
959  * We just have to assume we're dealing with valid memory
960  */
961 
962 u8
acpi_os_readable(void * ptr,acpi_size len)963 acpi_os_readable(void *ptr, acpi_size len)
964 {
965 	return 1;
966 }
967 
968 u8
acpi_os_writable(void * ptr,acpi_size len)969 acpi_os_writable(void *ptr, acpi_size len)
970 {
971 	return 1;
972 }
973 
974 u32
acpi_os_get_thread_id(void)975 acpi_os_get_thread_id (void)
976 {
977 	if (!in_interrupt())
978 		return current->pid;
979 
980 	return 0;
981 }
982 
983 acpi_status
acpi_os_signal(u32 function,void * info)984 acpi_os_signal (
985     u32		function,
986     void	*info)
987 {
988 	switch (function)
989 	{
990 	case ACPI_SIGNAL_FATAL:
991 		printk(KERN_ERR PREFIX "Fatal opcode executed\n");
992 		break;
993 	case ACPI_SIGNAL_BREAKPOINT:
994 		{
995 			char *bp_info = (char*) info;
996 
997 			printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info);
998 		}
999 	default:
1000 		break;
1001 	}
1002 
1003 	return AE_OK;
1004 }
1005 
1006 int __init
acpi_os_name_setup(char * str)1007 acpi_os_name_setup(char *str)
1008 {
1009 	char *p = acpi_os_name;
1010 	int count = ACPI_MAX_OVERRIDE_LEN-1;
1011 
1012 	if (!str || !*str)
1013 		return 0;
1014 
1015 	for (; count-- && str && *str; str++) {
1016 		if (isalnum(*str) || *str == ' ' || *str == ':')
1017 			*p++ = *str;
1018 		else if (*str == '\'' || *str == '"')
1019 			continue;
1020 		else
1021 			break;
1022 	}
1023 	*p = 0;
1024 
1025 	return 1;
1026 
1027 }
1028 
1029 __setup("acpi_os_name=", acpi_os_name_setup);
1030 
1031 /*
1032  * _OSI control
1033  * empty string disables _OSI
1034  * TBD additional string adds to _OSI
1035  */
1036 int __init
acpi_osi_setup(char * str)1037 acpi_osi_setup(char *str)
1038 {
1039 	if (str == NULL || *str == '\0') {
1040 		printk(KERN_INFO PREFIX "_OSI method disabled\n");
1041 		acpi_gbl_create_osi_method = FALSE;
1042 	} else
1043 	{
1044 		/* TBD */
1045 		printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n", str);
1046 	}
1047 
1048 	return 1;
1049 }
1050 
1051 __setup("acpi_osi=", acpi_osi_setup);
1052 
1053 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1054 int __init
acpi_serialize_setup(char * str)1055 acpi_serialize_setup(char *str)
1056 {
1057 	printk(KERN_INFO PREFIX "serialize enabled\n");
1058 
1059 	acpi_gbl_all_methods_serialized = TRUE;
1060 
1061 	return 1;
1062 }
1063 
1064 __setup("acpi_serialize", acpi_serialize_setup);
1065 
1066 /*
1067  * Wake and Run-Time GPES are expected to be separate.
1068  * We disable wake-GPEs at run-time to prevent spurious
1069  * interrupts.
1070  *
1071  * However, if a system exists that shares Wake and
1072  * Run-time events on the same GPE this flag is available
1073  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1074  */
1075 int __init
acpi_wake_gpes_always_on_setup(char * str)1076 acpi_wake_gpes_always_on_setup(char *str)
1077 {
1078 	printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1079 
1080 	acpi_gbl_leave_wake_gpes_disabled = FALSE;
1081 
1082 	return 1;
1083 }
1084 
1085 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1086 
1087