2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
54 acpi_osd_exec_callback function;
56 struct work_struct work;
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
62 /* stuff for debugger support */
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif /*ENABLE_DEBUGGER */
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
81 * This list of permanent mappings is for memory that may be accessed from
82 * interrupt context, where we can't do the ioremap().
85 struct list_head list;
87 acpi_physical_address phys;
89 unsigned long refcount;
92 static LIST_HEAD(acpi_ioremaps);
93 static DEFINE_MUTEX(acpi_ioremap_lock);
95 static void __init acpi_osi_setup_late(void);
98 * The story of _OSI(Linux)
100 * From pre-history through Linux-2.6.22,
101 * Linux responded TRUE upon a BIOS OSI(Linux) query.
103 * Unfortunately, reference BIOS writers got wind of this
104 * and put OSI(Linux) in their example code, quickly exposing
105 * this string as ill-conceived and opening the door to
106 * an un-bounded number of BIOS incompatibilities.
108 * For example, OSI(Linux) was used on resume to re-POST a
109 * video card on one system, because Linux at that time
110 * could not do a speedy restore in its native driver.
111 * But then upon gaining quick native restore capability,
112 * Linux has no way to tell the BIOS to skip the time-consuming
113 * POST -- putting Linux at a permanent performance disadvantage.
114 * On another system, the BIOS writer used OSI(Linux)
115 * to infer native OS support for IPMI! On other systems,
116 * OSI(Linux) simply got in the way of Linux claiming to
117 * be compatible with other operating systems, exposing
118 * BIOS issues such as skipped device initialization.
120 * So "Linux" turned out to be a really poor chose of
121 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
123 * BIOS writers should NOT query _OSI(Linux) on future systems.
124 * Linux will complain on the console when it sees it, and return FALSE.
125 * To get Linux to return TRUE for your system will require
126 * a kernel source update to add a DMI entry,
127 * or boot with "acpi_osi=Linux"
130 static struct osi_linux {
131 unsigned int enable:1;
133 unsigned int cmdline:1;
134 unsigned int default_disabling:1;
135 } osi_linux = {0, 0, 0, 0};
137 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
139 if (!strcmp("Linux", interface)) {
141 printk_once(KERN_NOTICE FW_BUG PREFIX
142 "BIOS _OSI(Linux) query %s%s\n",
143 osi_linux.enable ? "honored" : "ignored",
144 osi_linux.cmdline ? " via cmdline" :
145 osi_linux.dmi ? " via DMI" : "");
148 if (!strcmp("Darwin", interface)) {
150 * Apple firmware will behave poorly if it receives positive
151 * answers to "Darwin" and any other OS. Respond positively
152 * to Darwin and then disable all other vendor strings.
154 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
155 supported = ACPI_UINT32_MAX;
161 static void __init acpi_request_region (struct acpi_generic_address *gas,
162 unsigned int length, char *desc)
166 /* Handle possible alignment issues */
167 memcpy(&addr, &gas->address, sizeof(addr));
168 if (!addr || !length)
171 /* Resources are never freed */
172 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
173 request_region(addr, length, desc);
174 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
175 request_mem_region(addr, length, desc);
178 static int __init acpi_reserve_resources(void)
180 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
181 "ACPI PM1a_EVT_BLK");
183 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
184 "ACPI PM1b_EVT_BLK");
186 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
187 "ACPI PM1a_CNT_BLK");
189 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
190 "ACPI PM1b_CNT_BLK");
192 if (acpi_gbl_FADT.pm_timer_length == 4)
193 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
195 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
198 /* Length of GPE blocks must be a non-negative multiple of 2 */
200 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
201 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
202 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
204 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
205 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
206 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
210 fs_initcall_sync(acpi_reserve_resources);
212 void acpi_os_printf(const char *fmt, ...)
216 acpi_os_vprintf(fmt, args);
219 EXPORT_SYMBOL(acpi_os_printf);
221 void acpi_os_vprintf(const char *fmt, va_list args)
223 static char buffer[512];
225 vsprintf(buffer, fmt, args);
227 #ifdef ENABLE_DEBUGGER
228 if (acpi_in_debugger) {
229 kdb_printf("%s", buffer);
231 printk(KERN_CONT "%s", buffer);
234 if (acpi_debugger_write_log(buffer) < 0)
235 printk(KERN_CONT "%s", buffer);
240 static unsigned long acpi_rsdp;
241 static int __init setup_acpi_rsdp(char *arg)
243 if (kstrtoul(arg, 16, &acpi_rsdp))
247 early_param("acpi_rsdp", setup_acpi_rsdp);
250 acpi_physical_address __init acpi_os_get_root_pointer(void)
257 if (efi_enabled(EFI_CONFIG_TABLES)) {
258 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
260 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
263 printk(KERN_ERR PREFIX
264 "System description tables not found\n");
267 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
268 acpi_physical_address pa = 0;
270 acpi_find_root_pointer(&pa);
277 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
278 static struct acpi_ioremap *
279 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
281 struct acpi_ioremap *map;
283 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
284 if (map->phys <= phys &&
285 phys + size <= map->phys + map->size)
291 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
292 static void __iomem *
293 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
295 struct acpi_ioremap *map;
297 map = acpi_map_lookup(phys, size);
299 return map->virt + (phys - map->phys);
304 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
306 struct acpi_ioremap *map;
307 void __iomem *virt = NULL;
309 mutex_lock(&acpi_ioremap_lock);
310 map = acpi_map_lookup(phys, size);
312 virt = map->virt + (phys - map->phys);
315 mutex_unlock(&acpi_ioremap_lock);
318 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
320 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
321 static struct acpi_ioremap *
322 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
324 struct acpi_ioremap *map;
326 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
327 if (map->virt <= virt &&
328 virt + size <= map->virt + map->size)
334 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
335 /* ioremap will take care of cache attributes */
336 #define should_use_kmap(pfn) 0
338 #define should_use_kmap(pfn) page_is_ram(pfn)
341 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
345 pfn = pg_off >> PAGE_SHIFT;
346 if (should_use_kmap(pfn)) {
347 if (pg_sz > PAGE_SIZE)
349 return (void __iomem __force *)kmap(pfn_to_page(pfn));
351 return acpi_os_ioremap(pg_off, pg_sz);
354 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
358 pfn = pg_off >> PAGE_SHIFT;
359 if (should_use_kmap(pfn))
360 kunmap(pfn_to_page(pfn));
366 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
367 * @phys: Start of the physical address range to map.
368 * @size: Size of the physical address range to map.
370 * Look up the given physical address range in the list of existing ACPI memory
371 * mappings. If found, get a reference to it and return a pointer to it (its
372 * virtual address). If not found, map it, add it to that list and return a
375 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
376 * routine simply calls __acpi_map_table() to get the job done.
378 void __iomem *__init_refok
379 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
381 struct acpi_ioremap *map;
383 acpi_physical_address pg_off;
386 if (phys > ULONG_MAX) {
387 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
391 if (!acpi_gbl_permanent_mmap)
392 return __acpi_map_table((unsigned long)phys, size);
394 mutex_lock(&acpi_ioremap_lock);
395 /* Check if there's a suitable mapping already. */
396 map = acpi_map_lookup(phys, size);
402 map = kzalloc(sizeof(*map), GFP_KERNEL);
404 mutex_unlock(&acpi_ioremap_lock);
408 pg_off = round_down(phys, PAGE_SIZE);
409 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
410 virt = acpi_map(pg_off, pg_sz);
412 mutex_unlock(&acpi_ioremap_lock);
417 INIT_LIST_HEAD(&map->list);
423 list_add_tail_rcu(&map->list, &acpi_ioremaps);
426 mutex_unlock(&acpi_ioremap_lock);
427 return map->virt + (phys - map->phys);
429 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
432 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
434 return (void *)acpi_os_map_iomem(phys, size);
436 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
438 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
440 if (!--map->refcount)
441 list_del_rcu(&map->list);
444 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
446 if (!map->refcount) {
447 synchronize_rcu_expedited();
448 acpi_unmap(map->phys, map->virt);
454 * acpi_os_unmap_iomem - Drop a memory mapping reference.
455 * @virt: Start of the address range to drop a reference to.
456 * @size: Size of the address range to drop a reference to.
458 * Look up the given virtual address range in the list of existing ACPI memory
459 * mappings, drop a reference to it and unmap it if there are no more active
462 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
463 * routine simply calls __acpi_unmap_table() to get the job done. Since
464 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
467 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
469 struct acpi_ioremap *map;
471 if (!acpi_gbl_permanent_mmap) {
472 __acpi_unmap_table(virt, size);
476 mutex_lock(&acpi_ioremap_lock);
477 map = acpi_map_lookup_virt(virt, size);
479 mutex_unlock(&acpi_ioremap_lock);
480 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
483 acpi_os_drop_map_ref(map);
484 mutex_unlock(&acpi_ioremap_lock);
486 acpi_os_map_cleanup(map);
488 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
490 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
492 return acpi_os_unmap_iomem((void __iomem *)virt, size);
494 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
496 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
498 if (!acpi_gbl_permanent_mmap)
499 __acpi_unmap_table(virt, size);
502 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
507 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
510 /* Handle possible alignment issues */
511 memcpy(&addr, &gas->address, sizeof(addr));
512 if (!addr || !gas->bit_width)
515 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
521 EXPORT_SYMBOL(acpi_os_map_generic_address);
523 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
526 struct acpi_ioremap *map;
528 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
531 /* Handle possible alignment issues */
532 memcpy(&addr, &gas->address, sizeof(addr));
533 if (!addr || !gas->bit_width)
536 mutex_lock(&acpi_ioremap_lock);
537 map = acpi_map_lookup(addr, gas->bit_width / 8);
539 mutex_unlock(&acpi_ioremap_lock);
542 acpi_os_drop_map_ref(map);
543 mutex_unlock(&acpi_ioremap_lock);
545 acpi_os_map_cleanup(map);
547 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
549 #ifdef ACPI_FUTURE_USAGE
551 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
554 return AE_BAD_PARAMETER;
556 *phys = virt_to_phys(virt);
562 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
563 static bool acpi_rev_override;
565 int __init acpi_rev_override_setup(char *str)
567 acpi_rev_override = true;
570 __setup("acpi_rev_override", acpi_rev_override_setup);
572 #define acpi_rev_override false
575 #define ACPI_MAX_OVERRIDE_LEN 100
577 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
580 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
583 if (!init_val || !new_val)
584 return AE_BAD_PARAMETER;
587 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
588 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
590 *new_val = acpi_os_name;
593 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
594 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
595 *new_val = (char *)5;
601 static irqreturn_t acpi_irq(int irq, void *dev_id)
605 handled = (*acpi_irq_handler) (acpi_irq_context);
611 acpi_irq_not_handled++;
617 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
622 acpi_irq_stats_init();
625 * ACPI interrupts different from the SCI in our copy of the FADT are
628 if (gsi != acpi_gbl_FADT.sci_interrupt)
629 return AE_BAD_PARAMETER;
631 if (acpi_irq_handler)
632 return AE_ALREADY_ACQUIRED;
634 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
635 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
640 acpi_irq_handler = handler;
641 acpi_irq_context = context;
642 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
643 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
644 acpi_irq_handler = NULL;
645 return AE_NOT_ACQUIRED;
652 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
654 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
655 return AE_BAD_PARAMETER;
657 free_irq(acpi_sci_irq, acpi_irq);
658 acpi_irq_handler = NULL;
659 acpi_sci_irq = INVALID_ACPI_IRQ;
665 * Running in interpreter thread context, safe to sleep
668 void acpi_os_sleep(u64 ms)
673 void acpi_os_stall(u32 us)
681 touch_nmi_watchdog();
687 * Support ACPI 3.0 AML Timer operand
688 * Returns 64-bit free-running, monotonically increasing timer
689 * with 100ns granularity
691 u64 acpi_os_get_timer(void)
693 u64 time_ns = ktime_to_ns(ktime_get());
694 do_div(time_ns, 100);
698 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
707 *(u8 *) value = inb(port);
708 } else if (width <= 16) {
709 *(u16 *) value = inw(port);
710 } else if (width <= 32) {
711 *(u32 *) value = inl(port);
719 EXPORT_SYMBOL(acpi_os_read_port);
721 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
725 } else if (width <= 16) {
727 } else if (width <= 32) {
736 EXPORT_SYMBOL(acpi_os_write_port);
739 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
741 void __iomem *virt_addr;
742 unsigned int size = width / 8;
747 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
750 virt_addr = acpi_os_ioremap(phys_addr, size);
752 return AE_BAD_ADDRESS;
761 *(u8 *) value = readb(virt_addr);
764 *(u16 *) value = readw(virt_addr);
767 *(u32 *) value = readl(virt_addr);
770 *(u64 *) value = readq(virt_addr);
785 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
787 void __iomem *virt_addr;
788 unsigned int size = width / 8;
792 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
795 virt_addr = acpi_os_ioremap(phys_addr, size);
797 return AE_BAD_ADDRESS;
803 writeb(value, virt_addr);
806 writew(value, virt_addr);
809 writel(value, virt_addr);
812 writeq(value, virt_addr);
827 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
828 u64 *value, u32 width)
834 return AE_BAD_PARAMETER;
850 result = raw_pci_read(pci_id->segment, pci_id->bus,
851 PCI_DEVFN(pci_id->device, pci_id->function),
852 reg, size, &value32);
855 return (result ? AE_ERROR : AE_OK);
859 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
860 u64 value, u32 width)
878 result = raw_pci_write(pci_id->segment, pci_id->bus,
879 PCI_DEVFN(pci_id->device, pci_id->function),
882 return (result ? AE_ERROR : AE_OK);
885 static void acpi_os_execute_deferred(struct work_struct *work)
887 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
889 dpc->function(dpc->context);
893 #ifdef CONFIG_ACPI_DEBUGGER
894 static struct acpi_debugger acpi_debugger;
895 static bool acpi_debugger_initialized;
897 int acpi_register_debugger(struct module *owner,
898 const struct acpi_debugger_ops *ops)
902 mutex_lock(&acpi_debugger.lock);
903 if (acpi_debugger.ops) {
908 acpi_debugger.owner = owner;
909 acpi_debugger.ops = ops;
912 mutex_unlock(&acpi_debugger.lock);
915 EXPORT_SYMBOL(acpi_register_debugger);
917 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
919 mutex_lock(&acpi_debugger.lock);
920 if (ops == acpi_debugger.ops) {
921 acpi_debugger.ops = NULL;
922 acpi_debugger.owner = NULL;
924 mutex_unlock(&acpi_debugger.lock);
926 EXPORT_SYMBOL(acpi_unregister_debugger);
928 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
931 int (*func)(acpi_osd_exec_callback, void *);
932 struct module *owner;
934 if (!acpi_debugger_initialized)
936 mutex_lock(&acpi_debugger.lock);
937 if (!acpi_debugger.ops) {
941 if (!try_module_get(acpi_debugger.owner)) {
945 func = acpi_debugger.ops->create_thread;
946 owner = acpi_debugger.owner;
947 mutex_unlock(&acpi_debugger.lock);
949 ret = func(function, context);
951 mutex_lock(&acpi_debugger.lock);
954 mutex_unlock(&acpi_debugger.lock);
958 ssize_t acpi_debugger_write_log(const char *msg)
961 ssize_t (*func)(const char *);
962 struct module *owner;
964 if (!acpi_debugger_initialized)
966 mutex_lock(&acpi_debugger.lock);
967 if (!acpi_debugger.ops) {
971 if (!try_module_get(acpi_debugger.owner)) {
975 func = acpi_debugger.ops->write_log;
976 owner = acpi_debugger.owner;
977 mutex_unlock(&acpi_debugger.lock);
981 mutex_lock(&acpi_debugger.lock);
984 mutex_unlock(&acpi_debugger.lock);
988 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
991 ssize_t (*func)(char *, size_t);
992 struct module *owner;
994 if (!acpi_debugger_initialized)
996 mutex_lock(&acpi_debugger.lock);
997 if (!acpi_debugger.ops) {
1001 if (!try_module_get(acpi_debugger.owner)) {
1005 func = acpi_debugger.ops->read_cmd;
1006 owner = acpi_debugger.owner;
1007 mutex_unlock(&acpi_debugger.lock);
1009 ret = func(buffer, buffer_length);
1011 mutex_lock(&acpi_debugger.lock);
1014 mutex_unlock(&acpi_debugger.lock);
1018 int acpi_debugger_wait_command_ready(void)
1021 int (*func)(bool, char *, size_t);
1022 struct module *owner;
1024 if (!acpi_debugger_initialized)
1026 mutex_lock(&acpi_debugger.lock);
1027 if (!acpi_debugger.ops) {
1031 if (!try_module_get(acpi_debugger.owner)) {
1035 func = acpi_debugger.ops->wait_command_ready;
1036 owner = acpi_debugger.owner;
1037 mutex_unlock(&acpi_debugger.lock);
1039 ret = func(acpi_gbl_method_executing,
1040 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1042 mutex_lock(&acpi_debugger.lock);
1045 mutex_unlock(&acpi_debugger.lock);
1049 int acpi_debugger_notify_command_complete(void)
1053 struct module *owner;
1055 if (!acpi_debugger_initialized)
1057 mutex_lock(&acpi_debugger.lock);
1058 if (!acpi_debugger.ops) {
1062 if (!try_module_get(acpi_debugger.owner)) {
1066 func = acpi_debugger.ops->notify_command_complete;
1067 owner = acpi_debugger.owner;
1068 mutex_unlock(&acpi_debugger.lock);
1072 mutex_lock(&acpi_debugger.lock);
1075 mutex_unlock(&acpi_debugger.lock);
1079 int __init acpi_debugger_init(void)
1081 mutex_init(&acpi_debugger.lock);
1082 acpi_debugger_initialized = true;
1087 /*******************************************************************************
1089 * FUNCTION: acpi_os_execute
1091 * PARAMETERS: Type - Type of the callback
1092 * Function - Function to be executed
1093 * Context - Function parameters
1097 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1098 * immediately executes function on a separate thread.
1100 ******************************************************************************/
1102 acpi_status acpi_os_execute(acpi_execute_type type,
1103 acpi_osd_exec_callback function, void *context)
1105 acpi_status status = AE_OK;
1106 struct acpi_os_dpc *dpc;
1107 struct workqueue_struct *queue;
1109 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1110 "Scheduling function [%p(%p)] for deferred execution.\n",
1111 function, context));
1113 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1114 ret = acpi_debugger_create_thread(function, context);
1116 pr_err("Call to kthread_create() failed.\n");
1123 * Allocate/initialize DPC structure. Note that this memory will be
1124 * freed by the callee. The kernel handles the work_struct list in a
1125 * way that allows us to also free its memory inside the callee.
1126 * Because we may want to schedule several tasks with different
1127 * parameters we can't use the approach some kernel code uses of
1128 * having a static work_struct.
1131 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1133 return AE_NO_MEMORY;
1135 dpc->function = function;
1136 dpc->context = context;
1139 * To prevent lockdep from complaining unnecessarily, make sure that
1140 * there is a different static lockdep key for each workqueue by using
1141 * INIT_WORK() for each of them separately.
1143 if (type == OSL_NOTIFY_HANDLER) {
1144 queue = kacpi_notify_wq;
1145 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1146 } else if (type == OSL_GPE_HANDLER) {
1148 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1150 pr_err("Unsupported os_execute type %d.\n", type);
1154 if (ACPI_FAILURE(status))
1158 * On some machines, a software-initiated SMI causes corruption unless
1159 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1160 * typically it's done in GPE-related methods that are run via
1161 * workqueues, so we can avoid the known corruption cases by always
1162 * queueing on CPU 0.
1164 ret = queue_work_on(0, queue, &dpc->work);
1166 printk(KERN_ERR PREFIX
1167 "Call to queue_work() failed.\n");
1171 if (ACPI_FAILURE(status))
1176 EXPORT_SYMBOL(acpi_os_execute);
1178 void acpi_os_wait_events_complete(void)
1181 * Make sure the GPE handler or the fixed event handler is not used
1182 * on another CPU after removal.
1184 if (acpi_sci_irq_valid())
1185 synchronize_hardirq(acpi_sci_irq);
1186 flush_workqueue(kacpid_wq);
1187 flush_workqueue(kacpi_notify_wq);
1190 struct acpi_hp_work {
1191 struct work_struct work;
1192 struct acpi_device *adev;
1196 static void acpi_hotplug_work_fn(struct work_struct *work)
1198 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1200 acpi_os_wait_events_complete();
1201 acpi_device_hotplug(hpw->adev, hpw->src);
1205 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1207 struct acpi_hp_work *hpw;
1209 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1210 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1213 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1215 return AE_NO_MEMORY;
1217 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1221 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1222 * the hotplug code may call driver .remove() functions, which may
1223 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1226 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1233 bool acpi_queue_hotplug_work(struct work_struct *work)
1235 return queue_work(kacpi_hotplug_wq, work);
1239 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1241 struct semaphore *sem = NULL;
1243 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1245 return AE_NO_MEMORY;
1247 sema_init(sem, initial_units);
1249 *handle = (acpi_handle *) sem;
1251 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1252 *handle, initial_units));
1258 * TODO: A better way to delete semaphores? Linux doesn't have a
1259 * 'delete_semaphore()' function -- may result in an invalid
1260 * pointer dereference for non-synchronized consumers. Should
1261 * we at least check for blocked threads and signal/cancel them?
1264 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1266 struct semaphore *sem = (struct semaphore *)handle;
1269 return AE_BAD_PARAMETER;
1271 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1273 BUG_ON(!list_empty(&sem->wait_list));
1281 * TODO: Support for units > 1?
1283 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1285 acpi_status status = AE_OK;
1286 struct semaphore *sem = (struct semaphore *)handle;
1290 if (!acpi_os_initialized)
1293 if (!sem || (units < 1))
1294 return AE_BAD_PARAMETER;
1299 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1300 handle, units, timeout));
1302 if (timeout == ACPI_WAIT_FOREVER)
1303 jiffies = MAX_SCHEDULE_TIMEOUT;
1305 jiffies = msecs_to_jiffies(timeout);
1307 ret = down_timeout(sem, jiffies);
1311 if (ACPI_FAILURE(status)) {
1312 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1313 "Failed to acquire semaphore[%p|%d|%d], %s",
1314 handle, units, timeout,
1315 acpi_format_exception(status)));
1317 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1318 "Acquired semaphore[%p|%d|%d]", handle,
1326 * TODO: Support for units > 1?
1328 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1330 struct semaphore *sem = (struct semaphore *)handle;
1332 if (!acpi_os_initialized)
1335 if (!sem || (units < 1))
1336 return AE_BAD_PARAMETER;
1341 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1349 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1351 #ifdef ENABLE_DEBUGGER
1352 if (acpi_in_debugger) {
1355 kdb_read(buffer, buffer_length);
1357 /* remove the CR kdb includes */
1358 chars = strlen(buffer) - 1;
1359 buffer[chars] = '\0';
1364 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1373 EXPORT_SYMBOL(acpi_os_get_line);
1375 acpi_status acpi_os_wait_command_ready(void)
1379 ret = acpi_debugger_wait_command_ready();
1385 acpi_status acpi_os_notify_command_complete(void)
1389 ret = acpi_debugger_notify_command_complete();
1395 acpi_status acpi_os_signal(u32 function, void *info)
1398 case ACPI_SIGNAL_FATAL:
1399 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1401 case ACPI_SIGNAL_BREAKPOINT:
1404 * ACPI spec. says to treat it as a NOP unless
1405 * you are debugging. So if/when we integrate
1406 * AML debugger into the kernel debugger its
1407 * hook will go here. But until then it is
1408 * not useful to print anything on breakpoints.
1418 static int __init acpi_os_name_setup(char *str)
1420 char *p = acpi_os_name;
1421 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1426 for (; count-- && *str; str++) {
1427 if (isalnum(*str) || *str == ' ' || *str == ':')
1429 else if (*str == '\'' || *str == '"')
1440 __setup("acpi_os_name=", acpi_os_name_setup);
1442 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1443 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1445 struct osi_setup_entry {
1446 char string[OSI_STRING_LENGTH_MAX];
1450 static struct osi_setup_entry
1451 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1452 {"Module Device", true},
1453 {"Processor Device", true},
1454 {"3.0 _SCP Extensions", true},
1455 {"Processor Aggregator Device", true},
1458 void __init acpi_osi_setup(char *str)
1460 struct osi_setup_entry *osi;
1464 if (!acpi_gbl_create_osi_method)
1467 if (str == NULL || *str == '\0') {
1468 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1469 acpi_gbl_create_osi_method = FALSE;
1476 osi_linux.default_disabling = 1;
1478 } else if (*str == '*') {
1479 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1480 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1481 osi = &osi_setup_entries[i];
1482 osi->enable = false;
1489 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1490 osi = &osi_setup_entries[i];
1491 if (!strcmp(osi->string, str)) {
1492 osi->enable = enable;
1494 } else if (osi->string[0] == '\0') {
1495 osi->enable = enable;
1496 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1502 static void __init set_osi_linux(unsigned int enable)
1504 if (osi_linux.enable != enable)
1505 osi_linux.enable = enable;
1507 if (osi_linux.enable)
1508 acpi_osi_setup("Linux");
1510 acpi_osi_setup("!Linux");
1515 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1517 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1519 set_osi_linux(enable);
1524 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1526 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1531 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1532 set_osi_linux(enable);
1538 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1540 * empty string disables _OSI
1541 * string starting with '!' disables that string
1542 * otherwise string is added to list, augmenting built-in strings
1544 static void __init acpi_osi_setup_late(void)
1546 struct osi_setup_entry *osi;
1551 if (osi_linux.default_disabling) {
1552 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1554 if (ACPI_SUCCESS(status))
1555 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1558 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1559 osi = &osi_setup_entries[i];
1565 status = acpi_install_interface(str);
1567 if (ACPI_SUCCESS(status))
1568 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1570 status = acpi_remove_interface(str);
1572 if (ACPI_SUCCESS(status))
1573 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1578 static int __init osi_setup(char *str)
1580 if (str && !strcmp("Linux", str))
1581 acpi_cmdline_osi_linux(1);
1582 else if (str && !strcmp("!Linux", str))
1583 acpi_cmdline_osi_linux(0);
1585 acpi_osi_setup(str);
1590 __setup("acpi_osi=", osi_setup);
1593 * Disable the auto-serialization of named objects creation methods.
1595 * This feature is enabled by default. It marks the AML control methods
1596 * that contain the opcodes to create named objects as "Serialized".
1598 static int __init acpi_no_auto_serialize_setup(char *str)
1600 acpi_gbl_auto_serialize_methods = FALSE;
1601 pr_info("ACPI: auto-serialization disabled\n");
1606 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1608 /* Check of resource interference between native drivers and ACPI
1609 * OperationRegions (SystemIO and System Memory only).
1610 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1611 * in arbitrary AML code and can interfere with legacy drivers.
1612 * acpi_enforce_resources= can be set to:
1614 * - strict (default) (2)
1615 * -> further driver trying to access the resources will not load
1617 * -> further driver trying to access the resources will load, but you
1618 * get a system message that something might go wrong...
1621 * -> ACPI Operation Region resources will not be registered
1624 #define ENFORCE_RESOURCES_STRICT 2
1625 #define ENFORCE_RESOURCES_LAX 1
1626 #define ENFORCE_RESOURCES_NO 0
1628 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1630 static int __init acpi_enforce_resources_setup(char *str)
1632 if (str == NULL || *str == '\0')
1635 if (!strcmp("strict", str))
1636 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1637 else if (!strcmp("lax", str))
1638 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1639 else if (!strcmp("no", str))
1640 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1645 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1647 /* Check for resource conflicts between ACPI OperationRegions and native
1649 int acpi_check_resource_conflict(const struct resource *res)
1651 acpi_adr_space_type space_id;
1656 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1658 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1661 if (res->flags & IORESOURCE_IO)
1662 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1664 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1666 length = resource_size(res);
1667 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1669 clash = acpi_check_address_range(space_id, res->start, length, warn);
1672 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1673 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1674 printk(KERN_NOTICE "ACPI: This conflict may"
1675 " cause random problems and system"
1677 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1678 " for this device, you should use it instead of"
1679 " the native driver\n");
1681 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1686 EXPORT_SYMBOL(acpi_check_resource_conflict);
1688 int acpi_check_region(resource_size_t start, resource_size_t n,
1691 struct resource res = {
1693 .end = start + n - 1,
1695 .flags = IORESOURCE_IO,
1698 return acpi_check_resource_conflict(&res);
1700 EXPORT_SYMBOL(acpi_check_region);
1703 * Let drivers know whether the resource checks are effective
1705 int acpi_resources_are_enforced(void)
1707 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1709 EXPORT_SYMBOL(acpi_resources_are_enforced);
1711 bool acpi_osi_is_win8(void)
1713 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1715 EXPORT_SYMBOL(acpi_osi_is_win8);
1718 * Deallocate the memory for a spinlock.
1720 void acpi_os_delete_lock(acpi_spinlock handle)
1726 * Acquire a spinlock.
1728 * handle is a pointer to the spinlock_t.
1731 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1733 acpi_cpu_flags flags;
1734 spin_lock_irqsave(lockp, flags);
1739 * Release a spinlock. See above.
1742 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1744 spin_unlock_irqrestore(lockp, flags);
1747 #ifndef ACPI_USE_LOCAL_CACHE
1749 /*******************************************************************************
1751 * FUNCTION: acpi_os_create_cache
1753 * PARAMETERS: name - Ascii name for the cache
1754 * size - Size of each cached object
1755 * depth - Maximum depth of the cache (in objects) <ignored>
1756 * cache - Where the new cache object is returned
1760 * DESCRIPTION: Create a cache object
1762 ******************************************************************************/
1765 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1767 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1774 /*******************************************************************************
1776 * FUNCTION: acpi_os_purge_cache
1778 * PARAMETERS: Cache - Handle to cache object
1782 * DESCRIPTION: Free all objects within the requested cache.
1784 ******************************************************************************/
1786 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1788 kmem_cache_shrink(cache);
1792 /*******************************************************************************
1794 * FUNCTION: acpi_os_delete_cache
1796 * PARAMETERS: Cache - Handle to cache object
1800 * DESCRIPTION: Free all objects within the requested cache and delete the
1803 ******************************************************************************/
1805 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1807 kmem_cache_destroy(cache);
1811 /*******************************************************************************
1813 * FUNCTION: acpi_os_release_object
1815 * PARAMETERS: Cache - Handle to cache object
1816 * Object - The object to be released
1820 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1821 * the object is deleted.
1823 ******************************************************************************/
1825 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1827 kmem_cache_free(cache, object);
1832 static int __init acpi_no_static_ssdt_setup(char *s)
1834 acpi_gbl_disable_ssdt_table_install = TRUE;
1835 pr_info("ACPI: static SSDT installation disabled\n");
1840 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1842 static int __init acpi_disable_return_repair(char *s)
1844 printk(KERN_NOTICE PREFIX
1845 "ACPI: Predefined validation mechanism disabled\n");
1846 acpi_gbl_disable_auto_repair = TRUE;
1851 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1853 acpi_status __init acpi_os_initialize(void)
1855 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1856 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1857 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1858 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1859 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1861 * Use acpi_os_map_generic_address to pre-map the reset
1862 * register if it's in system memory.
1866 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1867 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1869 acpi_os_initialized = true;
1874 acpi_status __init acpi_os_initialize1(void)
1876 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1877 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1878 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1880 BUG_ON(!kacpi_notify_wq);
1881 BUG_ON(!kacpi_hotplug_wq);
1882 acpi_install_interface_handler(acpi_osi_handler);
1883 acpi_osi_setup_late();
1887 acpi_status acpi_os_terminate(void)
1889 if (acpi_irq_handler) {
1890 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1894 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1895 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1896 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1897 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1898 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1899 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1901 destroy_workqueue(kacpid_wq);
1902 destroy_workqueue(kacpi_notify_wq);
1903 destroy_workqueue(kacpi_hotplug_wq);
1908 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1912 if (__acpi_os_prepare_sleep)
1913 rc = __acpi_os_prepare_sleep(sleep_state,
1914 pm1a_control, pm1b_control);
1918 return AE_CTRL_SKIP;
1923 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1924 u32 pm1a_ctrl, u32 pm1b_ctrl))
1926 __acpi_os_prepare_sleep = func;
1929 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1933 if (__acpi_os_prepare_extended_sleep)
1934 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1939 return AE_CTRL_SKIP;
1944 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1945 u32 val_a, u32 val_b))
1947 __acpi_os_prepare_extended_sleep = func;