ACPI / osi: Cleanup coding style issues before creating a separate OSI source file
[linux-2.6-block.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69 #endif                          /*ENABLE_DEBUGGER */
70
71 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
72                                       u32 pm1b_ctrl);
73 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
74                                       u32 val_b);
75
76 static acpi_osd_handler acpi_irq_handler;
77 static void *acpi_irq_context;
78 static struct workqueue_struct *kacpid_wq;
79 static struct workqueue_struct *kacpi_notify_wq;
80 static struct workqueue_struct *kacpi_hotplug_wq;
81 static bool acpi_os_initialized;
82 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
83
84 /*
85  * This list of permanent mappings is for memory that may be accessed from
86  * interrupt context, where we can't do the ioremap().
87  */
88 struct acpi_ioremap {
89         struct list_head list;
90         void __iomem *virt;
91         acpi_physical_address phys;
92         acpi_size size;
93         unsigned long refcount;
94 };
95
96 static LIST_HEAD(acpi_ioremaps);
97 static DEFINE_MUTEX(acpi_ioremap_lock);
98
99 static void __init acpi_osi_setup_late(void);
100
101 /*
102  * The story of _OSI(Linux)
103  *
104  * From pre-history through Linux-2.6.22,
105  * Linux responded TRUE upon a BIOS OSI(Linux) query.
106  *
107  * Unfortunately, reference BIOS writers got wind of this
108  * and put OSI(Linux) in their example code, quickly exposing
109  * this string as ill-conceived and opening the door to
110  * an un-bounded number of BIOS incompatibilities.
111  *
112  * For example, OSI(Linux) was used on resume to re-POST a
113  * video card on one system, because Linux at that time
114  * could not do a speedy restore in its native driver.
115  * But then upon gaining quick native restore capability,
116  * Linux has no way to tell the BIOS to skip the time-consuming
117  * POST -- putting Linux at a permanent performance disadvantage.
118  * On another system, the BIOS writer used OSI(Linux)
119  * to infer native OS support for IPMI!  On other systems,
120  * OSI(Linux) simply got in the way of Linux claiming to
121  * be compatible with other operating systems, exposing
122  * BIOS issues such as skipped device initialization.
123  *
124  * So "Linux" turned out to be a really poor chose of
125  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126  *
127  * BIOS writers should NOT query _OSI(Linux) on future systems.
128  * Linux will complain on the console when it sees it, and return FALSE.
129  * To get Linux to return TRUE for your system  will require
130  * a kernel source update to add a DMI entry,
131  * or boot with "acpi_osi=Linux"
132  */
133
134 static struct acpi_osi_config {
135         unsigned int    linux_enable:1;
136         unsigned int    linux_dmi:1;
137         unsigned int    linux_cmdline:1;
138         unsigned int    darwin_enable:1;
139         unsigned int    darwin_dmi:1;
140         unsigned int    darwin_cmdline:1;
141         u8              default_disabling;
142 } osi_config;
143
144 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
145 {
146         if (!strcmp("Linux", interface)) {
147
148                 pr_notice_once(FW_BUG PREFIX
149                         "BIOS _OSI(Linux) query %s%s\n",
150                         osi_config.linux_enable ? "honored" : "ignored",
151                         osi_config.linux_cmdline ? " via cmdline" :
152                         osi_config.linux_dmi ? " via DMI" : "");
153         }
154
155         if (!strcmp("Darwin", interface)) {
156
157                 pr_notice_once(PREFIX
158                         "BIOS _OSI(Darwin) query %s%s\n",
159                         osi_config.darwin_enable ? "honored" : "ignored",
160                         osi_config.darwin_cmdline ? " via cmdline" :
161                         osi_config.darwin_dmi ? " via DMI" : "");
162         }
163
164         return supported;
165 }
166
167 static void __init acpi_request_region (struct acpi_generic_address *gas,
168         unsigned int length, char *desc)
169 {
170         u64 addr;
171
172         /* Handle possible alignment issues */
173         memcpy(&addr, &gas->address, sizeof(addr));
174         if (!addr || !length)
175                 return;
176
177         /* Resources are never freed */
178         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
179                 request_region(addr, length, desc);
180         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
181                 request_mem_region(addr, length, desc);
182 }
183
184 static int __init acpi_reserve_resources(void)
185 {
186         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
187                 "ACPI PM1a_EVT_BLK");
188
189         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
190                 "ACPI PM1b_EVT_BLK");
191
192         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
193                 "ACPI PM1a_CNT_BLK");
194
195         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
196                 "ACPI PM1b_CNT_BLK");
197
198         if (acpi_gbl_FADT.pm_timer_length == 4)
199                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
200
201         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
202                 "ACPI PM2_CNT_BLK");
203
204         /* Length of GPE blocks must be a non-negative multiple of 2 */
205
206         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
207                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
208                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
209
210         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
211                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
212                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
213
214         return 0;
215 }
216 fs_initcall_sync(acpi_reserve_resources);
217
218 void acpi_os_printf(const char *fmt, ...)
219 {
220         va_list args;
221         va_start(args, fmt);
222         acpi_os_vprintf(fmt, args);
223         va_end(args);
224 }
225 EXPORT_SYMBOL(acpi_os_printf);
226
227 void acpi_os_vprintf(const char *fmt, va_list args)
228 {
229         static char buffer[512];
230
231         vsprintf(buffer, fmt, args);
232
233 #ifdef ENABLE_DEBUGGER
234         if (acpi_in_debugger) {
235                 kdb_printf("%s", buffer);
236         } else {
237                 printk(KERN_CONT "%s", buffer);
238         }
239 #else
240         if (acpi_debugger_write_log(buffer) < 0)
241                 printk(KERN_CONT "%s", buffer);
242 #endif
243 }
244
245 #ifdef CONFIG_KEXEC
246 static unsigned long acpi_rsdp;
247 static int __init setup_acpi_rsdp(char *arg)
248 {
249         if (kstrtoul(arg, 16, &acpi_rsdp))
250                 return -EINVAL;
251         return 0;
252 }
253 early_param("acpi_rsdp", setup_acpi_rsdp);
254 #endif
255
256 acpi_physical_address __init acpi_os_get_root_pointer(void)
257 {
258 #ifdef CONFIG_KEXEC
259         if (acpi_rsdp)
260                 return acpi_rsdp;
261 #endif
262
263         if (efi_enabled(EFI_CONFIG_TABLES)) {
264                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
265                         return efi.acpi20;
266                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
267                         return efi.acpi;
268                 else {
269                         printk(KERN_ERR PREFIX
270                                "System description tables not found\n");
271                         return 0;
272                 }
273         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
274                 acpi_physical_address pa = 0;
275
276                 acpi_find_root_pointer(&pa);
277                 return pa;
278         }
279
280         return 0;
281 }
282
283 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
284 static struct acpi_ioremap *
285 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
286 {
287         struct acpi_ioremap *map;
288
289         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
290                 if (map->phys <= phys &&
291                     phys + size <= map->phys + map->size)
292                         return map;
293
294         return NULL;
295 }
296
297 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
298 static void __iomem *
299 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
300 {
301         struct acpi_ioremap *map;
302
303         map = acpi_map_lookup(phys, size);
304         if (map)
305                 return map->virt + (phys - map->phys);
306
307         return NULL;
308 }
309
310 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
311 {
312         struct acpi_ioremap *map;
313         void __iomem *virt = NULL;
314
315         mutex_lock(&acpi_ioremap_lock);
316         map = acpi_map_lookup(phys, size);
317         if (map) {
318                 virt = map->virt + (phys - map->phys);
319                 map->refcount++;
320         }
321         mutex_unlock(&acpi_ioremap_lock);
322         return virt;
323 }
324 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
325
326 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
327 static struct acpi_ioremap *
328 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
329 {
330         struct acpi_ioremap *map;
331
332         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
333                 if (map->virt <= virt &&
334                     virt + size <= map->virt + map->size)
335                         return map;
336
337         return NULL;
338 }
339
340 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
341 /* ioremap will take care of cache attributes */
342 #define should_use_kmap(pfn)   0
343 #else
344 #define should_use_kmap(pfn)   page_is_ram(pfn)
345 #endif
346
347 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
348 {
349         unsigned long pfn;
350
351         pfn = pg_off >> PAGE_SHIFT;
352         if (should_use_kmap(pfn)) {
353                 if (pg_sz > PAGE_SIZE)
354                         return NULL;
355                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
356         } else
357                 return acpi_os_ioremap(pg_off, pg_sz);
358 }
359
360 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
361 {
362         unsigned long pfn;
363
364         pfn = pg_off >> PAGE_SHIFT;
365         if (should_use_kmap(pfn))
366                 kunmap(pfn_to_page(pfn));
367         else
368                 iounmap(vaddr);
369 }
370
371 /**
372  * acpi_os_map_iomem - Get a virtual address for a given physical address range.
373  * @phys: Start of the physical address range to map.
374  * @size: Size of the physical address range to map.
375  *
376  * Look up the given physical address range in the list of existing ACPI memory
377  * mappings.  If found, get a reference to it and return a pointer to it (its
378  * virtual address).  If not found, map it, add it to that list and return a
379  * pointer to it.
380  *
381  * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
382  * routine simply calls __acpi_map_table() to get the job done.
383  */
384 void __iomem *__init_refok
385 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
386 {
387         struct acpi_ioremap *map;
388         void __iomem *virt;
389         acpi_physical_address pg_off;
390         acpi_size pg_sz;
391
392         if (phys > ULONG_MAX) {
393                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
394                 return NULL;
395         }
396
397         if (!acpi_gbl_permanent_mmap)
398                 return __acpi_map_table((unsigned long)phys, size);
399
400         mutex_lock(&acpi_ioremap_lock);
401         /* Check if there's a suitable mapping already. */
402         map = acpi_map_lookup(phys, size);
403         if (map) {
404                 map->refcount++;
405                 goto out;
406         }
407
408         map = kzalloc(sizeof(*map), GFP_KERNEL);
409         if (!map) {
410                 mutex_unlock(&acpi_ioremap_lock);
411                 return NULL;
412         }
413
414         pg_off = round_down(phys, PAGE_SIZE);
415         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
416         virt = acpi_map(pg_off, pg_sz);
417         if (!virt) {
418                 mutex_unlock(&acpi_ioremap_lock);
419                 kfree(map);
420                 return NULL;
421         }
422
423         INIT_LIST_HEAD(&map->list);
424         map->virt = virt;
425         map->phys = pg_off;
426         map->size = pg_sz;
427         map->refcount = 1;
428
429         list_add_tail_rcu(&map->list, &acpi_ioremaps);
430
431 out:
432         mutex_unlock(&acpi_ioremap_lock);
433         return map->virt + (phys - map->phys);
434 }
435 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
436
437 void *__init_refok
438 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
439 {
440         return (void *)acpi_os_map_iomem(phys, size);
441 }
442 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
443
444 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
445 {
446         if (!--map->refcount)
447                 list_del_rcu(&map->list);
448 }
449
450 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
451 {
452         if (!map->refcount) {
453                 synchronize_rcu_expedited();
454                 acpi_unmap(map->phys, map->virt);
455                 kfree(map);
456         }
457 }
458
459 /**
460  * acpi_os_unmap_iomem - Drop a memory mapping reference.
461  * @virt: Start of the address range to drop a reference to.
462  * @size: Size of the address range to drop a reference to.
463  *
464  * Look up the given virtual address range in the list of existing ACPI memory
465  * mappings, drop a reference to it and unmap it if there are no more active
466  * references to it.
467  *
468  * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
469  * routine simply calls __acpi_unmap_table() to get the job done.  Since
470  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
471  * here.
472  */
473 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
474 {
475         struct acpi_ioremap *map;
476
477         if (!acpi_gbl_permanent_mmap) {
478                 __acpi_unmap_table(virt, size);
479                 return;
480         }
481
482         mutex_lock(&acpi_ioremap_lock);
483         map = acpi_map_lookup_virt(virt, size);
484         if (!map) {
485                 mutex_unlock(&acpi_ioremap_lock);
486                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
487                 return;
488         }
489         acpi_os_drop_map_ref(map);
490         mutex_unlock(&acpi_ioremap_lock);
491
492         acpi_os_map_cleanup(map);
493 }
494 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
495
496 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
497 {
498         return acpi_os_unmap_iomem((void __iomem *)virt, size);
499 }
500 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
501
502 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
503 {
504         if (!acpi_gbl_permanent_mmap)
505                 __acpi_unmap_table(virt, size);
506 }
507
508 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
509 {
510         u64 addr;
511         void __iomem *virt;
512
513         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
514                 return 0;
515
516         /* Handle possible alignment issues */
517         memcpy(&addr, &gas->address, sizeof(addr));
518         if (!addr || !gas->bit_width)
519                 return -EINVAL;
520
521         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
522         if (!virt)
523                 return -EIO;
524
525         return 0;
526 }
527 EXPORT_SYMBOL(acpi_os_map_generic_address);
528
529 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
530 {
531         u64 addr;
532         struct acpi_ioremap *map;
533
534         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
535                 return;
536
537         /* Handle possible alignment issues */
538         memcpy(&addr, &gas->address, sizeof(addr));
539         if (!addr || !gas->bit_width)
540                 return;
541
542         mutex_lock(&acpi_ioremap_lock);
543         map = acpi_map_lookup(addr, gas->bit_width / 8);
544         if (!map) {
545                 mutex_unlock(&acpi_ioremap_lock);
546                 return;
547         }
548         acpi_os_drop_map_ref(map);
549         mutex_unlock(&acpi_ioremap_lock);
550
551         acpi_os_map_cleanup(map);
552 }
553 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
554
555 #ifdef ACPI_FUTURE_USAGE
556 acpi_status
557 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
558 {
559         if (!phys || !virt)
560                 return AE_BAD_PARAMETER;
561
562         *phys = virt_to_phys(virt);
563
564         return AE_OK;
565 }
566 #endif
567
568 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
569 static bool acpi_rev_override;
570
571 int __init acpi_rev_override_setup(char *str)
572 {
573         acpi_rev_override = true;
574         return 1;
575 }
576 __setup("acpi_rev_override", acpi_rev_override_setup);
577 #else
578 #define acpi_rev_override       false
579 #endif
580
581 #define ACPI_MAX_OVERRIDE_LEN 100
582
583 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
584
585 acpi_status
586 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
587                             char **new_val)
588 {
589         if (!init_val || !new_val)
590                 return AE_BAD_PARAMETER;
591
592         *new_val = NULL;
593         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
594                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
595                        acpi_os_name);
596                 *new_val = acpi_os_name;
597         }
598
599         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
600                 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
601                 *new_val = (char *)5;
602         }
603
604         return AE_OK;
605 }
606
607 static void acpi_table_taint(struct acpi_table_header *table)
608 {
609         pr_warn(PREFIX
610                 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
611                 table->signature, table->oem_table_id);
612         add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
613 }
614
615 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
616 #include <linux/earlycpio.h>
617 #include <linux/memblock.h>
618
619 static u64 acpi_tables_addr;
620 static int all_tables_size;
621
622 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
623 static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
624 {
625         u8 sum = 0;
626         u8 *end = buffer + length;
627
628         while (buffer < end)
629                 sum = (u8) (sum + *(buffer++));
630         return sum;
631 }
632
633 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
634 static const char * const table_sigs[] = {
635         ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
636         ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
637         ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
638         ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
639         ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
640         ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
641         ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
642         ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
643         ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
644
645 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
646
647 #define ACPI_OVERRIDE_TABLES 64
648 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
649 static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
650
651 #define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
652
653 void __init acpi_initrd_override(void *data, size_t size)
654 {
655         int sig, no, table_nr = 0, total_offset = 0;
656         long offset = 0;
657         struct acpi_table_header *table;
658         char cpio_path[32] = "kernel/firmware/acpi/";
659         struct cpio_data file;
660
661         if (data == NULL || size == 0)
662                 return;
663
664         for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
665                 file = find_cpio_data(cpio_path, data, size, &offset);
666                 if (!file.data)
667                         break;
668
669                 data += offset;
670                 size -= offset;
671
672                 if (file.size < sizeof(struct acpi_table_header)) {
673                         pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
674                                 cpio_path, file.name);
675                         continue;
676                 }
677
678                 table = file.data;
679
680                 for (sig = 0; table_sigs[sig]; sig++)
681                         if (!memcmp(table->signature, table_sigs[sig], 4))
682                                 break;
683
684                 if (!table_sigs[sig]) {
685                         pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
686                                 cpio_path, file.name);
687                         continue;
688                 }
689                 if (file.size != table->length) {
690                         pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
691                                 cpio_path, file.name);
692                         continue;
693                 }
694                 if (acpi_table_checksum(file.data, table->length)) {
695                         pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
696                                 cpio_path, file.name);
697                         continue;
698                 }
699
700                 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
701                         table->signature, cpio_path, file.name, table->length);
702
703                 all_tables_size += table->length;
704                 acpi_initrd_files[table_nr].data = file.data;
705                 acpi_initrd_files[table_nr].size = file.size;
706                 table_nr++;
707         }
708         if (table_nr == 0)
709                 return;
710
711         acpi_tables_addr =
712                 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
713                                        all_tables_size, PAGE_SIZE);
714         if (!acpi_tables_addr) {
715                 WARN_ON(1);
716                 return;
717         }
718         /*
719          * Only calling e820_add_reserve does not work and the
720          * tables are invalid (memory got used) later.
721          * memblock_reserve works as expected and the tables won't get modified.
722          * But it's not enough on X86 because ioremap will
723          * complain later (used by acpi_os_map_memory) that the pages
724          * that should get mapped are not marked "reserved".
725          * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
726          * works fine.
727          */
728         memblock_reserve(acpi_tables_addr, all_tables_size);
729         arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
730
731         /*
732          * early_ioremap only can remap 256k one time. If we map all
733          * tables one time, we will hit the limit. Need to map chunks
734          * one by one during copying the same as that in relocate_initrd().
735          */
736         for (no = 0; no < table_nr; no++) {
737                 unsigned char *src_p = acpi_initrd_files[no].data;
738                 phys_addr_t size = acpi_initrd_files[no].size;
739                 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
740                 phys_addr_t slop, clen;
741                 char *dest_p;
742
743                 total_offset += size;
744
745                 while (size) {
746                         slop = dest_addr & ~PAGE_MASK;
747                         clen = size;
748                         if (clen > MAP_CHUNK_SIZE - slop)
749                                 clen = MAP_CHUNK_SIZE - slop;
750                         dest_p = early_ioremap(dest_addr & PAGE_MASK,
751                                                  clen + slop);
752                         memcpy(dest_p + slop, src_p, clen);
753                         early_iounmap(dest_p, clen + slop);
754                         src_p += clen;
755                         dest_addr += clen;
756                         size -= clen;
757                 }
758         }
759 }
760
761 acpi_status
762 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
763                                 acpi_physical_address *address, u32 *length)
764 {
765         int table_offset = 0;
766         int table_index = 0;
767         struct acpi_table_header *table;
768         u32 table_length;
769
770         *length = 0;
771         *address = 0;
772         if (!acpi_tables_addr)
773                 return AE_OK;
774
775         while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
776                 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
777                                            ACPI_HEADER_SIZE);
778                 if (table_offset + table->length > all_tables_size) {
779                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
780                         WARN_ON(1);
781                         return AE_OK;
782                 }
783
784                 table_length = table->length;
785
786                 /* Only override tables matched */
787                 if (test_bit(table_index, acpi_initrd_installed) ||
788                     memcmp(existing_table->signature, table->signature, 4) ||
789                     memcmp(table->oem_table_id, existing_table->oem_table_id,
790                            ACPI_OEM_TABLE_ID_SIZE)) {
791                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
792                         goto next_table;
793                 }
794
795                 *length = table_length;
796                 *address = acpi_tables_addr + table_offset;
797                 acpi_table_taint(existing_table);
798                 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
799                 set_bit(table_index, acpi_initrd_installed);
800                 break;
801
802 next_table:
803                 table_offset += table_length;
804                 table_index++;
805         }
806         return AE_OK;
807 }
808
809 void __init acpi_initrd_initialize_tables(void)
810 {
811         int table_offset = 0;
812         int table_index = 0;
813         u32 table_length;
814         struct acpi_table_header *table;
815
816         if (!acpi_tables_addr)
817                 return;
818
819         while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
820                 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
821                                            ACPI_HEADER_SIZE);
822                 if (table_offset + table->length > all_tables_size) {
823                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
824                         WARN_ON(1);
825                         return;
826                 }
827
828                 table_length = table->length;
829
830                 /* Skip RSDT/XSDT which should only be used for override */
831                 if (test_bit(table_index, acpi_initrd_installed) ||
832                     ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
833                     ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
834                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
835                         goto next_table;
836                 }
837
838                 acpi_table_taint(table);
839                 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
840                 acpi_install_table(acpi_tables_addr + table_offset, TRUE);
841                 set_bit(table_index, acpi_initrd_installed);
842 next_table:
843                 table_offset += table_length;
844                 table_index++;
845         }
846 }
847 #else
848 acpi_status
849 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
850                                 acpi_physical_address *address,
851                                 u32 *table_length)
852 {
853         *table_length = 0;
854         *address = 0;
855         return AE_OK;
856 }
857
858 void __init acpi_initrd_initialize_tables(void)
859 {
860 }
861 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
862
863 acpi_status
864 acpi_os_table_override(struct acpi_table_header *existing_table,
865                        struct acpi_table_header **new_table)
866 {
867         if (!existing_table || !new_table)
868                 return AE_BAD_PARAMETER;
869
870         *new_table = NULL;
871
872 #ifdef CONFIG_ACPI_CUSTOM_DSDT
873         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
874                 *new_table = (struct acpi_table_header *)AmlCode;
875 #endif
876         if (*new_table != NULL)
877                 acpi_table_taint(existing_table);
878         return AE_OK;
879 }
880
881 static irqreturn_t acpi_irq(int irq, void *dev_id)
882 {
883         u32 handled;
884
885         handled = (*acpi_irq_handler) (acpi_irq_context);
886
887         if (handled) {
888                 acpi_irq_handled++;
889                 return IRQ_HANDLED;
890         } else {
891                 acpi_irq_not_handled++;
892                 return IRQ_NONE;
893         }
894 }
895
896 acpi_status
897 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
898                                   void *context)
899 {
900         unsigned int irq;
901
902         acpi_irq_stats_init();
903
904         /*
905          * ACPI interrupts different from the SCI in our copy of the FADT are
906          * not supported.
907          */
908         if (gsi != acpi_gbl_FADT.sci_interrupt)
909                 return AE_BAD_PARAMETER;
910
911         if (acpi_irq_handler)
912                 return AE_ALREADY_ACQUIRED;
913
914         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
915                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
916                        gsi);
917                 return AE_OK;
918         }
919
920         acpi_irq_handler = handler;
921         acpi_irq_context = context;
922         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
923                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
924                 acpi_irq_handler = NULL;
925                 return AE_NOT_ACQUIRED;
926         }
927         acpi_sci_irq = irq;
928
929         return AE_OK;
930 }
931
932 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
933 {
934         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
935                 return AE_BAD_PARAMETER;
936
937         free_irq(acpi_sci_irq, acpi_irq);
938         acpi_irq_handler = NULL;
939         acpi_sci_irq = INVALID_ACPI_IRQ;
940
941         return AE_OK;
942 }
943
944 /*
945  * Running in interpreter thread context, safe to sleep
946  */
947
948 void acpi_os_sleep(u64 ms)
949 {
950         msleep(ms);
951 }
952
953 void acpi_os_stall(u32 us)
954 {
955         while (us) {
956                 u32 delay = 1000;
957
958                 if (delay > us)
959                         delay = us;
960                 udelay(delay);
961                 touch_nmi_watchdog();
962                 us -= delay;
963         }
964 }
965
966 /*
967  * Support ACPI 3.0 AML Timer operand
968  * Returns 64-bit free-running, monotonically increasing timer
969  * with 100ns granularity
970  */
971 u64 acpi_os_get_timer(void)
972 {
973         u64 time_ns = ktime_to_ns(ktime_get());
974         do_div(time_ns, 100);
975         return time_ns;
976 }
977
978 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
979 {
980         u32 dummy;
981
982         if (!value)
983                 value = &dummy;
984
985         *value = 0;
986         if (width <= 8) {
987                 *(u8 *) value = inb(port);
988         } else if (width <= 16) {
989                 *(u16 *) value = inw(port);
990         } else if (width <= 32) {
991                 *(u32 *) value = inl(port);
992         } else {
993                 BUG();
994         }
995
996         return AE_OK;
997 }
998
999 EXPORT_SYMBOL(acpi_os_read_port);
1000
1001 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
1002 {
1003         if (width <= 8) {
1004                 outb(value, port);
1005         } else if (width <= 16) {
1006                 outw(value, port);
1007         } else if (width <= 32) {
1008                 outl(value, port);
1009         } else {
1010                 BUG();
1011         }
1012
1013         return AE_OK;
1014 }
1015
1016 EXPORT_SYMBOL(acpi_os_write_port);
1017
1018 acpi_status
1019 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
1020 {
1021         void __iomem *virt_addr;
1022         unsigned int size = width / 8;
1023         bool unmap = false;
1024         u64 dummy;
1025
1026         rcu_read_lock();
1027         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1028         if (!virt_addr) {
1029                 rcu_read_unlock();
1030                 virt_addr = acpi_os_ioremap(phys_addr, size);
1031                 if (!virt_addr)
1032                         return AE_BAD_ADDRESS;
1033                 unmap = true;
1034         }
1035
1036         if (!value)
1037                 value = &dummy;
1038
1039         switch (width) {
1040         case 8:
1041                 *(u8 *) value = readb(virt_addr);
1042                 break;
1043         case 16:
1044                 *(u16 *) value = readw(virt_addr);
1045                 break;
1046         case 32:
1047                 *(u32 *) value = readl(virt_addr);
1048                 break;
1049         case 64:
1050                 *(u64 *) value = readq(virt_addr);
1051                 break;
1052         default:
1053                 BUG();
1054         }
1055
1056         if (unmap)
1057                 iounmap(virt_addr);
1058         else
1059                 rcu_read_unlock();
1060
1061         return AE_OK;
1062 }
1063
1064 acpi_status
1065 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1066 {
1067         void __iomem *virt_addr;
1068         unsigned int size = width / 8;
1069         bool unmap = false;
1070
1071         rcu_read_lock();
1072         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1073         if (!virt_addr) {
1074                 rcu_read_unlock();
1075                 virt_addr = acpi_os_ioremap(phys_addr, size);
1076                 if (!virt_addr)
1077                         return AE_BAD_ADDRESS;
1078                 unmap = true;
1079         }
1080
1081         switch (width) {
1082         case 8:
1083                 writeb(value, virt_addr);
1084                 break;
1085         case 16:
1086                 writew(value, virt_addr);
1087                 break;
1088         case 32:
1089                 writel(value, virt_addr);
1090                 break;
1091         case 64:
1092                 writeq(value, virt_addr);
1093                 break;
1094         default:
1095                 BUG();
1096         }
1097
1098         if (unmap)
1099                 iounmap(virt_addr);
1100         else
1101                 rcu_read_unlock();
1102
1103         return AE_OK;
1104 }
1105
1106 acpi_status
1107 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1108                                u64 *value, u32 width)
1109 {
1110         int result, size;
1111         u32 value32;
1112
1113         if (!value)
1114                 return AE_BAD_PARAMETER;
1115
1116         switch (width) {
1117         case 8:
1118                 size = 1;
1119                 break;
1120         case 16:
1121                 size = 2;
1122                 break;
1123         case 32:
1124                 size = 4;
1125                 break;
1126         default:
1127                 return AE_ERROR;
1128         }
1129
1130         result = raw_pci_read(pci_id->segment, pci_id->bus,
1131                                 PCI_DEVFN(pci_id->device, pci_id->function),
1132                                 reg, size, &value32);
1133         *value = value32;
1134
1135         return (result ? AE_ERROR : AE_OK);
1136 }
1137
1138 acpi_status
1139 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1140                                 u64 value, u32 width)
1141 {
1142         int result, size;
1143
1144         switch (width) {
1145         case 8:
1146                 size = 1;
1147                 break;
1148         case 16:
1149                 size = 2;
1150                 break;
1151         case 32:
1152                 size = 4;
1153                 break;
1154         default:
1155                 return AE_ERROR;
1156         }
1157
1158         result = raw_pci_write(pci_id->segment, pci_id->bus,
1159                                 PCI_DEVFN(pci_id->device, pci_id->function),
1160                                 reg, size, value);
1161
1162         return (result ? AE_ERROR : AE_OK);
1163 }
1164
1165 static void acpi_os_execute_deferred(struct work_struct *work)
1166 {
1167         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1168
1169         dpc->function(dpc->context);
1170         kfree(dpc);
1171 }
1172
1173 #ifdef CONFIG_ACPI_DEBUGGER
1174 static struct acpi_debugger acpi_debugger;
1175 static bool acpi_debugger_initialized;
1176
1177 int acpi_register_debugger(struct module *owner,
1178                            const struct acpi_debugger_ops *ops)
1179 {
1180         int ret = 0;
1181
1182         mutex_lock(&acpi_debugger.lock);
1183         if (acpi_debugger.ops) {
1184                 ret = -EBUSY;
1185                 goto err_lock;
1186         }
1187
1188         acpi_debugger.owner = owner;
1189         acpi_debugger.ops = ops;
1190
1191 err_lock:
1192         mutex_unlock(&acpi_debugger.lock);
1193         return ret;
1194 }
1195 EXPORT_SYMBOL(acpi_register_debugger);
1196
1197 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
1198 {
1199         mutex_lock(&acpi_debugger.lock);
1200         if (ops == acpi_debugger.ops) {
1201                 acpi_debugger.ops = NULL;
1202                 acpi_debugger.owner = NULL;
1203         }
1204         mutex_unlock(&acpi_debugger.lock);
1205 }
1206 EXPORT_SYMBOL(acpi_unregister_debugger);
1207
1208 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
1209 {
1210         int ret;
1211         int (*func)(acpi_osd_exec_callback, void *);
1212         struct module *owner;
1213
1214         if (!acpi_debugger_initialized)
1215                 return -ENODEV;
1216         mutex_lock(&acpi_debugger.lock);
1217         if (!acpi_debugger.ops) {
1218                 ret = -ENODEV;
1219                 goto err_lock;
1220         }
1221         if (!try_module_get(acpi_debugger.owner)) {
1222                 ret = -ENODEV;
1223                 goto err_lock;
1224         }
1225         func = acpi_debugger.ops->create_thread;
1226         owner = acpi_debugger.owner;
1227         mutex_unlock(&acpi_debugger.lock);
1228
1229         ret = func(function, context);
1230
1231         mutex_lock(&acpi_debugger.lock);
1232         module_put(owner);
1233 err_lock:
1234         mutex_unlock(&acpi_debugger.lock);
1235         return ret;
1236 }
1237
1238 ssize_t acpi_debugger_write_log(const char *msg)
1239 {
1240         ssize_t ret;
1241         ssize_t (*func)(const char *);
1242         struct module *owner;
1243
1244         if (!acpi_debugger_initialized)
1245                 return -ENODEV;
1246         mutex_lock(&acpi_debugger.lock);
1247         if (!acpi_debugger.ops) {
1248                 ret = -ENODEV;
1249                 goto err_lock;
1250         }
1251         if (!try_module_get(acpi_debugger.owner)) {
1252                 ret = -ENODEV;
1253                 goto err_lock;
1254         }
1255         func = acpi_debugger.ops->write_log;
1256         owner = acpi_debugger.owner;
1257         mutex_unlock(&acpi_debugger.lock);
1258
1259         ret = func(msg);
1260
1261         mutex_lock(&acpi_debugger.lock);
1262         module_put(owner);
1263 err_lock:
1264         mutex_unlock(&acpi_debugger.lock);
1265         return ret;
1266 }
1267
1268 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
1269 {
1270         ssize_t ret;
1271         ssize_t (*func)(char *, size_t);
1272         struct module *owner;
1273
1274         if (!acpi_debugger_initialized)
1275                 return -ENODEV;
1276         mutex_lock(&acpi_debugger.lock);
1277         if (!acpi_debugger.ops) {
1278                 ret = -ENODEV;
1279                 goto err_lock;
1280         }
1281         if (!try_module_get(acpi_debugger.owner)) {
1282                 ret = -ENODEV;
1283                 goto err_lock;
1284         }
1285         func = acpi_debugger.ops->read_cmd;
1286         owner = acpi_debugger.owner;
1287         mutex_unlock(&acpi_debugger.lock);
1288
1289         ret = func(buffer, buffer_length);
1290
1291         mutex_lock(&acpi_debugger.lock);
1292         module_put(owner);
1293 err_lock:
1294         mutex_unlock(&acpi_debugger.lock);
1295         return ret;
1296 }
1297
1298 int acpi_debugger_wait_command_ready(void)
1299 {
1300         int ret;
1301         int (*func)(bool, char *, size_t);
1302         struct module *owner;
1303
1304         if (!acpi_debugger_initialized)
1305                 return -ENODEV;
1306         mutex_lock(&acpi_debugger.lock);
1307         if (!acpi_debugger.ops) {
1308                 ret = -ENODEV;
1309                 goto err_lock;
1310         }
1311         if (!try_module_get(acpi_debugger.owner)) {
1312                 ret = -ENODEV;
1313                 goto err_lock;
1314         }
1315         func = acpi_debugger.ops->wait_command_ready;
1316         owner = acpi_debugger.owner;
1317         mutex_unlock(&acpi_debugger.lock);
1318
1319         ret = func(acpi_gbl_method_executing,
1320                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1321
1322         mutex_lock(&acpi_debugger.lock);
1323         module_put(owner);
1324 err_lock:
1325         mutex_unlock(&acpi_debugger.lock);
1326         return ret;
1327 }
1328
1329 int acpi_debugger_notify_command_complete(void)
1330 {
1331         int ret;
1332         int (*func)(void);
1333         struct module *owner;
1334
1335         if (!acpi_debugger_initialized)
1336                 return -ENODEV;
1337         mutex_lock(&acpi_debugger.lock);
1338         if (!acpi_debugger.ops) {
1339                 ret = -ENODEV;
1340                 goto err_lock;
1341         }
1342         if (!try_module_get(acpi_debugger.owner)) {
1343                 ret = -ENODEV;
1344                 goto err_lock;
1345         }
1346         func = acpi_debugger.ops->notify_command_complete;
1347         owner = acpi_debugger.owner;
1348         mutex_unlock(&acpi_debugger.lock);
1349
1350         ret = func();
1351
1352         mutex_lock(&acpi_debugger.lock);
1353         module_put(owner);
1354 err_lock:
1355         mutex_unlock(&acpi_debugger.lock);
1356         return ret;
1357 }
1358
1359 int __init acpi_debugger_init(void)
1360 {
1361         mutex_init(&acpi_debugger.lock);
1362         acpi_debugger_initialized = true;
1363         return 0;
1364 }
1365 #endif
1366
1367 /*******************************************************************************
1368  *
1369  * FUNCTION:    acpi_os_execute
1370  *
1371  * PARAMETERS:  Type               - Type of the callback
1372  *              Function           - Function to be executed
1373  *              Context            - Function parameters
1374  *
1375  * RETURN:      Status
1376  *
1377  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1378  *              immediately executes function on a separate thread.
1379  *
1380  ******************************************************************************/
1381
1382 acpi_status acpi_os_execute(acpi_execute_type type,
1383                             acpi_osd_exec_callback function, void *context)
1384 {
1385         acpi_status status = AE_OK;
1386         struct acpi_os_dpc *dpc;
1387         struct workqueue_struct *queue;
1388         int ret;
1389         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1390                           "Scheduling function [%p(%p)] for deferred execution.\n",
1391                           function, context));
1392
1393         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1394                 ret = acpi_debugger_create_thread(function, context);
1395                 if (ret) {
1396                         pr_err("Call to kthread_create() failed.\n");
1397                         status = AE_ERROR;
1398                 }
1399                 goto out_thread;
1400         }
1401
1402         /*
1403          * Allocate/initialize DPC structure.  Note that this memory will be
1404          * freed by the callee.  The kernel handles the work_struct list  in a
1405          * way that allows us to also free its memory inside the callee.
1406          * Because we may want to schedule several tasks with different
1407          * parameters we can't use the approach some kernel code uses of
1408          * having a static work_struct.
1409          */
1410
1411         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1412         if (!dpc)
1413                 return AE_NO_MEMORY;
1414
1415         dpc->function = function;
1416         dpc->context = context;
1417
1418         /*
1419          * To prevent lockdep from complaining unnecessarily, make sure that
1420          * there is a different static lockdep key for each workqueue by using
1421          * INIT_WORK() for each of them separately.
1422          */
1423         if (type == OSL_NOTIFY_HANDLER) {
1424                 queue = kacpi_notify_wq;
1425                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1426         } else if (type == OSL_GPE_HANDLER) {
1427                 queue = kacpid_wq;
1428                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1429         } else {
1430                 pr_err("Unsupported os_execute type %d.\n", type);
1431                 status = AE_ERROR;
1432         }
1433
1434         if (ACPI_FAILURE(status))
1435                 goto err_workqueue;
1436
1437         /*
1438          * On some machines, a software-initiated SMI causes corruption unless
1439          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1440          * typically it's done in GPE-related methods that are run via
1441          * workqueues, so we can avoid the known corruption cases by always
1442          * queueing on CPU 0.
1443          */
1444         ret = queue_work_on(0, queue, &dpc->work);
1445         if (!ret) {
1446                 printk(KERN_ERR PREFIX
1447                           "Call to queue_work() failed.\n");
1448                 status = AE_ERROR;
1449         }
1450 err_workqueue:
1451         if (ACPI_FAILURE(status))
1452                 kfree(dpc);
1453 out_thread:
1454         return status;
1455 }
1456 EXPORT_SYMBOL(acpi_os_execute);
1457
1458 void acpi_os_wait_events_complete(void)
1459 {
1460         /*
1461          * Make sure the GPE handler or the fixed event handler is not used
1462          * on another CPU after removal.
1463          */
1464         if (acpi_sci_irq_valid())
1465                 synchronize_hardirq(acpi_sci_irq);
1466         flush_workqueue(kacpid_wq);
1467         flush_workqueue(kacpi_notify_wq);
1468 }
1469
1470 struct acpi_hp_work {
1471         struct work_struct work;
1472         struct acpi_device *adev;
1473         u32 src;
1474 };
1475
1476 static void acpi_hotplug_work_fn(struct work_struct *work)
1477 {
1478         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1479
1480         acpi_os_wait_events_complete();
1481         acpi_device_hotplug(hpw->adev, hpw->src);
1482         kfree(hpw);
1483 }
1484
1485 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1486 {
1487         struct acpi_hp_work *hpw;
1488
1489         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1490                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1491                   adev, src));
1492
1493         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1494         if (!hpw)
1495                 return AE_NO_MEMORY;
1496
1497         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1498         hpw->adev = adev;
1499         hpw->src = src;
1500         /*
1501          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1502          * the hotplug code may call driver .remove() functions, which may
1503          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1504          * these workqueues.
1505          */
1506         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1507                 kfree(hpw);
1508                 return AE_ERROR;
1509         }
1510         return AE_OK;
1511 }
1512
1513 bool acpi_queue_hotplug_work(struct work_struct *work)
1514 {
1515         return queue_work(kacpi_hotplug_wq, work);
1516 }
1517
1518 acpi_status
1519 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1520 {
1521         struct semaphore *sem = NULL;
1522
1523         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1524         if (!sem)
1525                 return AE_NO_MEMORY;
1526
1527         sema_init(sem, initial_units);
1528
1529         *handle = (acpi_handle *) sem;
1530
1531         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1532                           *handle, initial_units));
1533
1534         return AE_OK;
1535 }
1536
1537 /*
1538  * TODO: A better way to delete semaphores?  Linux doesn't have a
1539  * 'delete_semaphore()' function -- may result in an invalid
1540  * pointer dereference for non-synchronized consumers.  Should
1541  * we at least check for blocked threads and signal/cancel them?
1542  */
1543
1544 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1545 {
1546         struct semaphore *sem = (struct semaphore *)handle;
1547
1548         if (!sem)
1549                 return AE_BAD_PARAMETER;
1550
1551         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1552
1553         BUG_ON(!list_empty(&sem->wait_list));
1554         kfree(sem);
1555         sem = NULL;
1556
1557         return AE_OK;
1558 }
1559
1560 /*
1561  * TODO: Support for units > 1?
1562  */
1563 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1564 {
1565         acpi_status status = AE_OK;
1566         struct semaphore *sem = (struct semaphore *)handle;
1567         long jiffies;
1568         int ret = 0;
1569
1570         if (!acpi_os_initialized)
1571                 return AE_OK;
1572
1573         if (!sem || (units < 1))
1574                 return AE_BAD_PARAMETER;
1575
1576         if (units > 1)
1577                 return AE_SUPPORT;
1578
1579         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1580                           handle, units, timeout));
1581
1582         if (timeout == ACPI_WAIT_FOREVER)
1583                 jiffies = MAX_SCHEDULE_TIMEOUT;
1584         else
1585                 jiffies = msecs_to_jiffies(timeout);
1586
1587         ret = down_timeout(sem, jiffies);
1588         if (ret)
1589                 status = AE_TIME;
1590
1591         if (ACPI_FAILURE(status)) {
1592                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1593                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1594                                   handle, units, timeout,
1595                                   acpi_format_exception(status)));
1596         } else {
1597                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1598                                   "Acquired semaphore[%p|%d|%d]", handle,
1599                                   units, timeout));
1600         }
1601
1602         return status;
1603 }
1604
1605 /*
1606  * TODO: Support for units > 1?
1607  */
1608 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1609 {
1610         struct semaphore *sem = (struct semaphore *)handle;
1611
1612         if (!acpi_os_initialized)
1613                 return AE_OK;
1614
1615         if (!sem || (units < 1))
1616                 return AE_BAD_PARAMETER;
1617
1618         if (units > 1)
1619                 return AE_SUPPORT;
1620
1621         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1622                           units));
1623
1624         up(sem);
1625
1626         return AE_OK;
1627 }
1628
1629 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1630 {
1631 #ifdef ENABLE_DEBUGGER
1632         if (acpi_in_debugger) {
1633                 u32 chars;
1634
1635                 kdb_read(buffer, buffer_length);
1636
1637                 /* remove the CR kdb includes */
1638                 chars = strlen(buffer) - 1;
1639                 buffer[chars] = '\0';
1640         }
1641 #else
1642         int ret;
1643
1644         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1645         if (ret < 0)
1646                 return AE_ERROR;
1647         if (bytes_read)
1648                 *bytes_read = ret;
1649 #endif
1650
1651         return AE_OK;
1652 }
1653 EXPORT_SYMBOL(acpi_os_get_line);
1654
1655 acpi_status acpi_os_wait_command_ready(void)
1656 {
1657         int ret;
1658
1659         ret = acpi_debugger_wait_command_ready();
1660         if (ret < 0)
1661                 return AE_ERROR;
1662         return AE_OK;
1663 }
1664
1665 acpi_status acpi_os_notify_command_complete(void)
1666 {
1667         int ret;
1668
1669         ret = acpi_debugger_notify_command_complete();
1670         if (ret < 0)
1671                 return AE_ERROR;
1672         return AE_OK;
1673 }
1674
1675 acpi_status acpi_os_signal(u32 function, void *info)
1676 {
1677         switch (function) {
1678         case ACPI_SIGNAL_FATAL:
1679                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1680                 break;
1681         case ACPI_SIGNAL_BREAKPOINT:
1682                 /*
1683                  * AML Breakpoint
1684                  * ACPI spec. says to treat it as a NOP unless
1685                  * you are debugging.  So if/when we integrate
1686                  * AML debugger into the kernel debugger its
1687                  * hook will go here.  But until then it is
1688                  * not useful to print anything on breakpoints.
1689                  */
1690                 break;
1691         default:
1692                 break;
1693         }
1694
1695         return AE_OK;
1696 }
1697
1698 static int __init acpi_os_name_setup(char *str)
1699 {
1700         char *p = acpi_os_name;
1701         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1702
1703         if (!str || !*str)
1704                 return 0;
1705
1706         for (; count-- && *str; str++) {
1707                 if (isalnum(*str) || *str == ' ' || *str == ':')
1708                         *p++ = *str;
1709                 else if (*str == '\'' || *str == '"')
1710                         continue;
1711                 else
1712                         break;
1713         }
1714         *p = 0;
1715
1716         return 1;
1717
1718 }
1719
1720 __setup("acpi_os_name=", acpi_os_name_setup);
1721
1722 #define OSI_STRING_LENGTH_MAX 64
1723 #define OSI_STRING_ENTRIES_MAX 16
1724
1725 struct acpi_osi_entry {
1726         char string[OSI_STRING_LENGTH_MAX];
1727         bool enable;
1728 };
1729
1730 static struct acpi_osi_entry
1731                 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1732         {"Module Device", true},
1733         {"Processor Device", true},
1734         {"3.0 _SCP Extensions", true},
1735         {"Processor Aggregator Device", true},
1736 };
1737
1738 void __init acpi_osi_setup(char *str)
1739 {
1740         struct acpi_osi_entry *osi;
1741         bool enable = true;
1742         int i;
1743
1744         if (!acpi_gbl_create_osi_method)
1745                 return;
1746
1747         if (str == NULL || *str == '\0') {
1748                 pr_info(PREFIX "_OSI method disabled\n");
1749                 acpi_gbl_create_osi_method = FALSE;
1750                 return;
1751         }
1752
1753         if (*str == '!') {
1754                 str++;
1755                 if (*str == '\0') {
1756                         /* Do not override acpi_osi=!* */
1757                         if (!osi_config.default_disabling)
1758                                 osi_config.default_disabling =
1759                                         ACPI_DISABLE_ALL_VENDOR_STRINGS;
1760                         return;
1761                 } else if (*str == '*') {
1762                         osi_config.default_disabling = ACPI_DISABLE_ALL_STRINGS;
1763                         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1764                                 osi = &osi_setup_entries[i];
1765                                 osi->enable = false;
1766                         }
1767                         return;
1768                 } else if (*str == '!') {
1769                         osi_config.default_disabling = 0;
1770                         return;
1771                 }
1772                 enable = false;
1773         }
1774
1775         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1776                 osi = &osi_setup_entries[i];
1777                 if (!strcmp(osi->string, str)) {
1778                         osi->enable = enable;
1779                         break;
1780                 } else if (osi->string[0] == '\0') {
1781                         osi->enable = enable;
1782                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1783                         break;
1784                 }
1785         }
1786 }
1787
1788 static void __init __acpi_osi_setup_darwin(bool enable)
1789 {
1790         osi_config.darwin_enable = !!enable;
1791         if (enable) {
1792                 acpi_osi_setup("!");
1793                 acpi_osi_setup("Darwin");
1794         } else {
1795                 acpi_osi_setup("!!");
1796                 acpi_osi_setup("!Darwin");
1797         }
1798 }
1799
1800 static void __init acpi_osi_setup_darwin(bool enable)
1801 {
1802         osi_config.darwin_cmdline = 1;
1803         osi_config.darwin_dmi = 0;
1804         __acpi_osi_setup_darwin(enable);
1805 }
1806
1807 void __init acpi_osi_dmi_darwin(bool enable, const struct dmi_system_id *d)
1808 {
1809         pr_notice(PREFIX "DMI detected to setup _OSI(\"Darwin\"): %s\n",
1810                   d->ident);
1811         osi_config.darwin_dmi = 1;
1812         __acpi_osi_setup_darwin(enable);
1813 }
1814
1815 static void __init __acpi_osi_setup_linux(bool enable)
1816 {
1817         osi_config.linux_enable = !!enable;
1818         if (enable)
1819                 acpi_osi_setup("Linux");
1820         else
1821                 acpi_osi_setup("!Linux");
1822 }
1823
1824 static void __init acpi_osi_setup_linux(bool enable)
1825 {
1826         osi_config.linux_cmdline = 1;
1827         osi_config.linux_dmi = 0;
1828         __acpi_osi_setup_linux(enable);
1829 }
1830
1831 void __init acpi_osi_dmi_linux(bool enable, const struct dmi_system_id *d)
1832 {
1833         pr_notice(PREFIX "DMI detected to setup _OSI(\"Linux\"): %s\n",
1834                   d->ident);
1835         osi_config.linux_dmi = 1;
1836         __acpi_osi_setup_linux(enable);
1837 }
1838
1839 /*
1840  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1841  *
1842  * empty string disables _OSI
1843  * string starting with '!' disables that string
1844  * otherwise string is added to list, augmenting built-in strings
1845  */
1846 static void __init acpi_osi_setup_late(void)
1847 {
1848         struct acpi_osi_entry *osi;
1849         char *str;
1850         int i;
1851         acpi_status status;
1852
1853         if (osi_config.default_disabling) {
1854                 status = acpi_update_interfaces(osi_config.default_disabling);
1855
1856                 if (ACPI_SUCCESS(status))
1857                         pr_info(PREFIX "Disabled all _OSI OS vendors%s\n",
1858                                 osi_config.default_disabling ==
1859                                 ACPI_DISABLE_ALL_STRINGS ?
1860                                 " and feature groups" : "");
1861         }
1862
1863         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1864                 osi = &osi_setup_entries[i];
1865                 str = osi->string;
1866
1867                 if (*str == '\0')
1868                         break;
1869                 if (osi->enable) {
1870                         status = acpi_install_interface(str);
1871
1872                         if (ACPI_SUCCESS(status))
1873                                 pr_info(PREFIX "Added _OSI(%s)\n", str);
1874                 } else {
1875                         status = acpi_remove_interface(str);
1876
1877                         if (ACPI_SUCCESS(status))
1878                                 pr_info(PREFIX "Deleted _OSI(%s)\n", str);
1879                 }
1880         }
1881 }
1882
1883 static int __init osi_setup(char *str)
1884 {
1885         if (str && !strcmp("Linux", str))
1886                 acpi_osi_setup_linux(true);
1887         else if (str && !strcmp("!Linux", str))
1888                 acpi_osi_setup_linux(false);
1889         else if (str && !strcmp("Darwin", str))
1890                 acpi_osi_setup_darwin(true);
1891         else if (str && !strcmp("!Darwin", str))
1892                 acpi_osi_setup_darwin(false);
1893         else
1894                 acpi_osi_setup(str);
1895
1896         return 1;
1897 }
1898
1899 __setup("acpi_osi=", osi_setup);
1900
1901 /*
1902  * Disable the auto-serialization of named objects creation methods.
1903  *
1904  * This feature is enabled by default.  It marks the AML control methods
1905  * that contain the opcodes to create named objects as "Serialized".
1906  */
1907 static int __init acpi_no_auto_serialize_setup(char *str)
1908 {
1909         acpi_gbl_auto_serialize_methods = FALSE;
1910         pr_info("ACPI: auto-serialization disabled\n");
1911
1912         return 1;
1913 }
1914
1915 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1916
1917 /* Check of resource interference between native drivers and ACPI
1918  * OperationRegions (SystemIO and System Memory only).
1919  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1920  * in arbitrary AML code and can interfere with legacy drivers.
1921  * acpi_enforce_resources= can be set to:
1922  *
1923  *   - strict (default) (2)
1924  *     -> further driver trying to access the resources will not load
1925  *   - lax              (1)
1926  *     -> further driver trying to access the resources will load, but you
1927  *     get a system message that something might go wrong...
1928  *
1929  *   - no               (0)
1930  *     -> ACPI Operation Region resources will not be registered
1931  *
1932  */
1933 #define ENFORCE_RESOURCES_STRICT 2
1934 #define ENFORCE_RESOURCES_LAX    1
1935 #define ENFORCE_RESOURCES_NO     0
1936
1937 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1938
1939 static int __init acpi_enforce_resources_setup(char *str)
1940 {
1941         if (str == NULL || *str == '\0')
1942                 return 0;
1943
1944         if (!strcmp("strict", str))
1945                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1946         else if (!strcmp("lax", str))
1947                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1948         else if (!strcmp("no", str))
1949                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1950
1951         return 1;
1952 }
1953
1954 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1955
1956 /* Check for resource conflicts between ACPI OperationRegions and native
1957  * drivers */
1958 int acpi_check_resource_conflict(const struct resource *res)
1959 {
1960         acpi_adr_space_type space_id;
1961         acpi_size length;
1962         u8 warn = 0;
1963         int clash = 0;
1964
1965         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1966                 return 0;
1967         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1968                 return 0;
1969
1970         if (res->flags & IORESOURCE_IO)
1971                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1972         else
1973                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1974
1975         length = resource_size(res);
1976         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1977                 warn = 1;
1978         clash = acpi_check_address_range(space_id, res->start, length, warn);
1979
1980         if (clash) {
1981                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1982                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1983                                 printk(KERN_NOTICE "ACPI: This conflict may"
1984                                        " cause random problems and system"
1985                                        " instability\n");
1986                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1987                                " for this device, you should use it instead of"
1988                                " the native driver\n");
1989                 }
1990                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1991                         return -EBUSY;
1992         }
1993         return 0;
1994 }
1995 EXPORT_SYMBOL(acpi_check_resource_conflict);
1996
1997 int acpi_check_region(resource_size_t start, resource_size_t n,
1998                       const char *name)
1999 {
2000         struct resource res = {
2001                 .start = start,
2002                 .end   = start + n - 1,
2003                 .name  = name,
2004                 .flags = IORESOURCE_IO,
2005         };
2006
2007         return acpi_check_resource_conflict(&res);
2008 }
2009 EXPORT_SYMBOL(acpi_check_region);
2010
2011 /*
2012  * Let drivers know whether the resource checks are effective
2013  */
2014 int acpi_resources_are_enforced(void)
2015 {
2016         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
2017 }
2018 EXPORT_SYMBOL(acpi_resources_are_enforced);
2019
2020 bool acpi_osi_is_win8(void)
2021 {
2022         return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
2023 }
2024 EXPORT_SYMBOL(acpi_osi_is_win8);
2025
2026 /*
2027  * Deallocate the memory for a spinlock.
2028  */
2029 void acpi_os_delete_lock(acpi_spinlock handle)
2030 {
2031         ACPI_FREE(handle);
2032 }
2033
2034 /*
2035  * Acquire a spinlock.
2036  *
2037  * handle is a pointer to the spinlock_t.
2038  */
2039
2040 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
2041 {
2042         acpi_cpu_flags flags;
2043         spin_lock_irqsave(lockp, flags);
2044         return flags;
2045 }
2046
2047 /*
2048  * Release a spinlock. See above.
2049  */
2050
2051 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
2052 {
2053         spin_unlock_irqrestore(lockp, flags);
2054 }
2055
2056 #ifndef ACPI_USE_LOCAL_CACHE
2057
2058 /*******************************************************************************
2059  *
2060  * FUNCTION:    acpi_os_create_cache
2061  *
2062  * PARAMETERS:  name      - Ascii name for the cache
2063  *              size      - Size of each cached object
2064  *              depth     - Maximum depth of the cache (in objects) <ignored>
2065  *              cache     - Where the new cache object is returned
2066  *
2067  * RETURN:      status
2068  *
2069  * DESCRIPTION: Create a cache object
2070  *
2071  ******************************************************************************/
2072
2073 acpi_status
2074 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
2075 {
2076         *cache = kmem_cache_create(name, size, 0, 0, NULL);
2077         if (*cache == NULL)
2078                 return AE_ERROR;
2079         else
2080                 return AE_OK;
2081 }
2082
2083 /*******************************************************************************
2084  *
2085  * FUNCTION:    acpi_os_purge_cache
2086  *
2087  * PARAMETERS:  Cache           - Handle to cache object
2088  *
2089  * RETURN:      Status
2090  *
2091  * DESCRIPTION: Free all objects within the requested cache.
2092  *
2093  ******************************************************************************/
2094
2095 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
2096 {
2097         kmem_cache_shrink(cache);
2098         return (AE_OK);
2099 }
2100
2101 /*******************************************************************************
2102  *
2103  * FUNCTION:    acpi_os_delete_cache
2104  *
2105  * PARAMETERS:  Cache           - Handle to cache object
2106  *
2107  * RETURN:      Status
2108  *
2109  * DESCRIPTION: Free all objects within the requested cache and delete the
2110  *              cache object.
2111  *
2112  ******************************************************************************/
2113
2114 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
2115 {
2116         kmem_cache_destroy(cache);
2117         return (AE_OK);
2118 }
2119
2120 /*******************************************************************************
2121  *
2122  * FUNCTION:    acpi_os_release_object
2123  *
2124  * PARAMETERS:  Cache       - Handle to cache object
2125  *              Object      - The object to be released
2126  *
2127  * RETURN:      None
2128  *
2129  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
2130  *              the object is deleted.
2131  *
2132  ******************************************************************************/
2133
2134 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
2135 {
2136         kmem_cache_free(cache, object);
2137         return (AE_OK);
2138 }
2139 #endif
2140
2141 static int __init acpi_no_static_ssdt_setup(char *s)
2142 {
2143         acpi_gbl_disable_ssdt_table_install = TRUE;
2144         pr_info("ACPI: static SSDT installation disabled\n");
2145
2146         return 0;
2147 }
2148
2149 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
2150
2151 static int __init acpi_disable_return_repair(char *s)
2152 {
2153         printk(KERN_NOTICE PREFIX
2154                "ACPI: Predefined validation mechanism disabled\n");
2155         acpi_gbl_disable_auto_repair = TRUE;
2156
2157         return 1;
2158 }
2159
2160 __setup("acpica_no_return_repair", acpi_disable_return_repair);
2161
2162 acpi_status __init acpi_os_initialize(void)
2163 {
2164         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2165         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2166         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
2167         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
2168         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
2169                 /*
2170                  * Use acpi_os_map_generic_address to pre-map the reset
2171                  * register if it's in system memory.
2172                  */
2173                 int rv;
2174
2175                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
2176                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
2177         }
2178         acpi_os_initialized = true;
2179
2180         return AE_OK;
2181 }
2182
2183 acpi_status __init acpi_os_initialize1(void)
2184 {
2185         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
2186         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
2187         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
2188         BUG_ON(!kacpid_wq);
2189         BUG_ON(!kacpi_notify_wq);
2190         BUG_ON(!kacpi_hotplug_wq);
2191         acpi_install_interface_handler(acpi_osi_handler);
2192         acpi_osi_setup_late();
2193         return AE_OK;
2194 }
2195
2196 acpi_status acpi_os_terminate(void)
2197 {
2198         if (acpi_irq_handler) {
2199                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
2200                                                  acpi_irq_handler);
2201         }
2202
2203         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
2204         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
2205         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2206         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2207         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
2208                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
2209
2210         destroy_workqueue(kacpid_wq);
2211         destroy_workqueue(kacpi_notify_wq);
2212         destroy_workqueue(kacpi_hotplug_wq);
2213
2214         return AE_OK;
2215 }
2216
2217 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
2218                                   u32 pm1b_control)
2219 {
2220         int rc = 0;
2221         if (__acpi_os_prepare_sleep)
2222                 rc = __acpi_os_prepare_sleep(sleep_state,
2223                                              pm1a_control, pm1b_control);
2224         if (rc < 0)
2225                 return AE_ERROR;
2226         else if (rc > 0)
2227                 return AE_CTRL_SKIP;
2228
2229         return AE_OK;
2230 }
2231
2232 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
2233                                u32 pm1a_ctrl, u32 pm1b_ctrl))
2234 {
2235         __acpi_os_prepare_sleep = func;
2236 }
2237
2238 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
2239                                   u32 val_b)
2240 {
2241         int rc = 0;
2242         if (__acpi_os_prepare_extended_sleep)
2243                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
2244                                              val_a, val_b);
2245         if (rc < 0)
2246                 return AE_ERROR;
2247         else if (rc > 0)
2248                 return AE_CTRL_SKIP;
2249
2250         return AE_OK;
2251 }
2252
2253 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
2254                                u32 val_a, u32 val_b))
2255 {
2256         __acpi_os_prepare_extended_sleep = func;
2257 }