ACPI / sleep: Introduce acpi_os_prepare_extended_sleep() for extended sleep path
[firefly-linux-kernel-4.4.55.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/highmem.h>
35 #include <linux/pci.h>
36 #include <linux/interrupt.h>
37 #include <linux/kmod.h>
38 #include <linux/delay.h>
39 #include <linux/workqueue.h>
40 #include <linux/nmi.h>
41 #include <linux/acpi.h>
42 #include <linux/acpi_io.h>
43 #include <linux/efi.h>
44 #include <linux/ioport.h>
45 #include <linux/list.h>
46 #include <linux/jiffies.h>
47 #include <linux/semaphore.h>
48
49 #include <asm/io.h>
50 #include <asm/uaccess.h>
51
52 #include <acpi/acpi.h>
53 #include <acpi/acpi_bus.h>
54 #include <acpi/processor.h>
55
56 #define _COMPONENT              ACPI_OS_SERVICES
57 ACPI_MODULE_NAME("osl");
58 #define PREFIX          "ACPI: "
59 struct acpi_os_dpc {
60         acpi_osd_exec_callback function;
61         void *context;
62         struct work_struct work;
63         int wait;
64 };
65
66 #ifdef CONFIG_ACPI_CUSTOM_DSDT
67 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
68 #endif
69
70 #ifdef ENABLE_DEBUGGER
71 #include <linux/kdb.h>
72
73 /* stuff for debugger support */
74 int acpi_in_debugger;
75 EXPORT_SYMBOL(acpi_in_debugger);
76
77 extern char line_buf[80];
78 #endif                          /*ENABLE_DEBUGGER */
79
80 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
81                                       u32 pm1b_ctrl);
82 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
83                                       u32 val_b);
84
85 static acpi_osd_handler acpi_irq_handler;
86 static void *acpi_irq_context;
87 static struct workqueue_struct *kacpid_wq;
88 static struct workqueue_struct *kacpi_notify_wq;
89 static struct workqueue_struct *kacpi_hotplug_wq;
90
91 /*
92  * This list of permanent mappings is for memory that may be accessed from
93  * interrupt context, where we can't do the ioremap().
94  */
95 struct acpi_ioremap {
96         struct list_head list;
97         void __iomem *virt;
98         acpi_physical_address phys;
99         acpi_size size;
100         unsigned long refcount;
101 };
102
103 static LIST_HEAD(acpi_ioremaps);
104 static DEFINE_MUTEX(acpi_ioremap_lock);
105
106 static void __init acpi_osi_setup_late(void);
107
108 /*
109  * The story of _OSI(Linux)
110  *
111  * From pre-history through Linux-2.6.22,
112  * Linux responded TRUE upon a BIOS OSI(Linux) query.
113  *
114  * Unfortunately, reference BIOS writers got wind of this
115  * and put OSI(Linux) in their example code, quickly exposing
116  * this string as ill-conceived and opening the door to
117  * an un-bounded number of BIOS incompatibilities.
118  *
119  * For example, OSI(Linux) was used on resume to re-POST a
120  * video card on one system, because Linux at that time
121  * could not do a speedy restore in its native driver.
122  * But then upon gaining quick native restore capability,
123  * Linux has no way to tell the BIOS to skip the time-consuming
124  * POST -- putting Linux at a permanent performance disadvantage.
125  * On another system, the BIOS writer used OSI(Linux)
126  * to infer native OS support for IPMI!  On other systems,
127  * OSI(Linux) simply got in the way of Linux claiming to
128  * be compatible with other operating systems, exposing
129  * BIOS issues such as skipped device initialization.
130  *
131  * So "Linux" turned out to be a really poor chose of
132  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
133  *
134  * BIOS writers should NOT query _OSI(Linux) on future systems.
135  * Linux will complain on the console when it sees it, and return FALSE.
136  * To get Linux to return TRUE for your system  will require
137  * a kernel source update to add a DMI entry,
138  * or boot with "acpi_osi=Linux"
139  */
140
141 static struct osi_linux {
142         unsigned int    enable:1;
143         unsigned int    dmi:1;
144         unsigned int    cmdline:1;
145 } osi_linux = {0, 0, 0};
146
147 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
148 {
149         if (!strcmp("Linux", interface)) {
150
151                 printk_once(KERN_NOTICE FW_BUG PREFIX
152                         "BIOS _OSI(Linux) query %s%s\n",
153                         osi_linux.enable ? "honored" : "ignored",
154                         osi_linux.cmdline ? " via cmdline" :
155                         osi_linux.dmi ? " via DMI" : "");
156         }
157
158         return supported;
159 }
160
161 static void __init acpi_request_region (struct acpi_generic_address *gas,
162         unsigned int length, char *desc)
163 {
164         u64 addr;
165
166         /* Handle possible alignment issues */
167         memcpy(&addr, &gas->address, sizeof(addr));
168         if (!addr || !length)
169                 return;
170
171         /* Resources are never freed */
172         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
173                 request_region(addr, length, desc);
174         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
175                 request_mem_region(addr, length, desc);
176 }
177
178 static int __init acpi_reserve_resources(void)
179 {
180         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
181                 "ACPI PM1a_EVT_BLK");
182
183         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
184                 "ACPI PM1b_EVT_BLK");
185
186         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
187                 "ACPI PM1a_CNT_BLK");
188
189         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
190                 "ACPI PM1b_CNT_BLK");
191
192         if (acpi_gbl_FADT.pm_timer_length == 4)
193                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
194
195         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
196                 "ACPI PM2_CNT_BLK");
197
198         /* Length of GPE blocks must be a non-negative multiple of 2 */
199
200         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
201                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
202                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
203
204         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
205                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
206                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
207
208         return 0;
209 }
210 device_initcall(acpi_reserve_resources);
211
212 void acpi_os_printf(const char *fmt, ...)
213 {
214         va_list args;
215         va_start(args, fmt);
216         acpi_os_vprintf(fmt, args);
217         va_end(args);
218 }
219
220 void acpi_os_vprintf(const char *fmt, va_list args)
221 {
222         static char buffer[512];
223
224         vsprintf(buffer, fmt, args);
225
226 #ifdef ENABLE_DEBUGGER
227         if (acpi_in_debugger) {
228                 kdb_printf("%s", buffer);
229         } else {
230                 printk(KERN_CONT "%s", buffer);
231         }
232 #else
233         printk(KERN_CONT "%s", buffer);
234 #endif
235 }
236
237 #ifdef CONFIG_KEXEC
238 static unsigned long acpi_rsdp;
239 static int __init setup_acpi_rsdp(char *arg)
240 {
241         acpi_rsdp = simple_strtoul(arg, NULL, 16);
242         return 0;
243 }
244 early_param("acpi_rsdp", setup_acpi_rsdp);
245 #endif
246
247 acpi_physical_address __init acpi_os_get_root_pointer(void)
248 {
249 #ifdef CONFIG_KEXEC
250         if (acpi_rsdp)
251                 return acpi_rsdp;
252 #endif
253
254         if (efi_enabled(EFI_CONFIG_TABLES)) {
255                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
256                         return efi.acpi20;
257                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
258                         return efi.acpi;
259                 else {
260                         printk(KERN_ERR PREFIX
261                                "System description tables not found\n");
262                         return 0;
263                 }
264         } else {
265                 acpi_physical_address pa = 0;
266
267                 acpi_find_root_pointer(&pa);
268                 return pa;
269         }
270 }
271
272 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
273 static struct acpi_ioremap *
274 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
275 {
276         struct acpi_ioremap *map;
277
278         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
279                 if (map->phys <= phys &&
280                     phys + size <= map->phys + map->size)
281                         return map;
282
283         return NULL;
284 }
285
286 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
287 static void __iomem *
288 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
289 {
290         struct acpi_ioremap *map;
291
292         map = acpi_map_lookup(phys, size);
293         if (map)
294                 return map->virt + (phys - map->phys);
295
296         return NULL;
297 }
298
299 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
300 {
301         struct acpi_ioremap *map;
302         void __iomem *virt = NULL;
303
304         mutex_lock(&acpi_ioremap_lock);
305         map = acpi_map_lookup(phys, size);
306         if (map) {
307                 virt = map->virt + (phys - map->phys);
308                 map->refcount++;
309         }
310         mutex_unlock(&acpi_ioremap_lock);
311         return virt;
312 }
313 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
314
315 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
316 static struct acpi_ioremap *
317 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
318 {
319         struct acpi_ioremap *map;
320
321         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
322                 if (map->virt <= virt &&
323                     virt + size <= map->virt + map->size)
324                         return map;
325
326         return NULL;
327 }
328
329 #ifndef CONFIG_IA64
330 #define should_use_kmap(pfn)   page_is_ram(pfn)
331 #else
332 /* ioremap will take care of cache attributes */
333 #define should_use_kmap(pfn)   0
334 #endif
335
336 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
337 {
338         unsigned long pfn;
339
340         pfn = pg_off >> PAGE_SHIFT;
341         if (should_use_kmap(pfn)) {
342                 if (pg_sz > PAGE_SIZE)
343                         return NULL;
344                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
345         } else
346                 return acpi_os_ioremap(pg_off, pg_sz);
347 }
348
349 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
350 {
351         unsigned long pfn;
352
353         pfn = pg_off >> PAGE_SHIFT;
354         if (should_use_kmap(pfn))
355                 kunmap(pfn_to_page(pfn));
356         else
357                 iounmap(vaddr);
358 }
359
360 void __iomem *__init_refok
361 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
362 {
363         struct acpi_ioremap *map;
364         void __iomem *virt;
365         acpi_physical_address pg_off;
366         acpi_size pg_sz;
367
368         if (phys > ULONG_MAX) {
369                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
370                 return NULL;
371         }
372
373         if (!acpi_gbl_permanent_mmap)
374                 return __acpi_map_table((unsigned long)phys, size);
375
376         mutex_lock(&acpi_ioremap_lock);
377         /* Check if there's a suitable mapping already. */
378         map = acpi_map_lookup(phys, size);
379         if (map) {
380                 map->refcount++;
381                 goto out;
382         }
383
384         map = kzalloc(sizeof(*map), GFP_KERNEL);
385         if (!map) {
386                 mutex_unlock(&acpi_ioremap_lock);
387                 return NULL;
388         }
389
390         pg_off = round_down(phys, PAGE_SIZE);
391         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
392         virt = acpi_map(pg_off, pg_sz);
393         if (!virt) {
394                 mutex_unlock(&acpi_ioremap_lock);
395                 kfree(map);
396                 return NULL;
397         }
398
399         INIT_LIST_HEAD(&map->list);
400         map->virt = virt;
401         map->phys = pg_off;
402         map->size = pg_sz;
403         map->refcount = 1;
404
405         list_add_tail_rcu(&map->list, &acpi_ioremaps);
406
407  out:
408         mutex_unlock(&acpi_ioremap_lock);
409         return map->virt + (phys - map->phys);
410 }
411 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
412
413 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
414 {
415         if (!--map->refcount)
416                 list_del_rcu(&map->list);
417 }
418
419 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
420 {
421         if (!map->refcount) {
422                 synchronize_rcu();
423                 acpi_unmap(map->phys, map->virt);
424                 kfree(map);
425         }
426 }
427
428 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
429 {
430         struct acpi_ioremap *map;
431
432         if (!acpi_gbl_permanent_mmap) {
433                 __acpi_unmap_table(virt, size);
434                 return;
435         }
436
437         mutex_lock(&acpi_ioremap_lock);
438         map = acpi_map_lookup_virt(virt, size);
439         if (!map) {
440                 mutex_unlock(&acpi_ioremap_lock);
441                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
442                 return;
443         }
444         acpi_os_drop_map_ref(map);
445         mutex_unlock(&acpi_ioremap_lock);
446
447         acpi_os_map_cleanup(map);
448 }
449 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
450
451 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
452 {
453         if (!acpi_gbl_permanent_mmap)
454                 __acpi_unmap_table(virt, size);
455 }
456
457 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
458 {
459         u64 addr;
460         void __iomem *virt;
461
462         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
463                 return 0;
464
465         /* Handle possible alignment issues */
466         memcpy(&addr, &gas->address, sizeof(addr));
467         if (!addr || !gas->bit_width)
468                 return -EINVAL;
469
470         virt = acpi_os_map_memory(addr, gas->bit_width / 8);
471         if (!virt)
472                 return -EIO;
473
474         return 0;
475 }
476 EXPORT_SYMBOL(acpi_os_map_generic_address);
477
478 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
479 {
480         u64 addr;
481         struct acpi_ioremap *map;
482
483         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
484                 return;
485
486         /* Handle possible alignment issues */
487         memcpy(&addr, &gas->address, sizeof(addr));
488         if (!addr || !gas->bit_width)
489                 return;
490
491         mutex_lock(&acpi_ioremap_lock);
492         map = acpi_map_lookup(addr, gas->bit_width / 8);
493         if (!map) {
494                 mutex_unlock(&acpi_ioremap_lock);
495                 return;
496         }
497         acpi_os_drop_map_ref(map);
498         mutex_unlock(&acpi_ioremap_lock);
499
500         acpi_os_map_cleanup(map);
501 }
502 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
503
504 #ifdef ACPI_FUTURE_USAGE
505 acpi_status
506 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
507 {
508         if (!phys || !virt)
509                 return AE_BAD_PARAMETER;
510
511         *phys = virt_to_phys(virt);
512
513         return AE_OK;
514 }
515 #endif
516
517 #define ACPI_MAX_OVERRIDE_LEN 100
518
519 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
520
521 acpi_status
522 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
523                             acpi_string * new_val)
524 {
525         if (!init_val || !new_val)
526                 return AE_BAD_PARAMETER;
527
528         *new_val = NULL;
529         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
530                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
531                        acpi_os_name);
532                 *new_val = acpi_os_name;
533         }
534
535         return AE_OK;
536 }
537
538 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
539 #include <linux/earlycpio.h>
540 #include <linux/memblock.h>
541
542 static u64 acpi_tables_addr;
543 static int all_tables_size;
544
545 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
546 u8 __init acpi_table_checksum(u8 *buffer, u32 length)
547 {
548         u8 sum = 0;
549         u8 *end = buffer + length;
550
551         while (buffer < end)
552                 sum = (u8) (sum + *(buffer++));
553         return sum;
554 }
555
556 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
557 static const char * const table_sigs[] = {
558         ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
559         ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
560         ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
561         ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
562         ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
563         ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
564         ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
565         ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
566         ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
567
568 /* Non-fatal errors: Affected tables/files are ignored */
569 #define INVALID_TABLE(x, path, name)                                    \
570         { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
571
572 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
573
574 /* Must not increase 10 or needs code modification below */
575 #define ACPI_OVERRIDE_TABLES 10
576
577 void __init acpi_initrd_override(void *data, size_t size)
578 {
579         int sig, no, table_nr = 0, total_offset = 0;
580         long offset = 0;
581         struct acpi_table_header *table;
582         char cpio_path[32] = "kernel/firmware/acpi/";
583         struct cpio_data file;
584         struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
585         char *p;
586
587         if (data == NULL || size == 0)
588                 return;
589
590         for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
591                 file = find_cpio_data(cpio_path, data, size, &offset);
592                 if (!file.data)
593                         break;
594
595                 data += offset;
596                 size -= offset;
597
598                 if (file.size < sizeof(struct acpi_table_header))
599                         INVALID_TABLE("Table smaller than ACPI header",
600                                       cpio_path, file.name);
601
602                 table = file.data;
603
604                 for (sig = 0; table_sigs[sig]; sig++)
605                         if (!memcmp(table->signature, table_sigs[sig], 4))
606                                 break;
607
608                 if (!table_sigs[sig])
609                         INVALID_TABLE("Unknown signature",
610                                       cpio_path, file.name);
611                 if (file.size != table->length)
612                         INVALID_TABLE("File length does not match table length",
613                                       cpio_path, file.name);
614                 if (acpi_table_checksum(file.data, table->length))
615                         INVALID_TABLE("Bad table checksum",
616                                       cpio_path, file.name);
617
618                 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
619                         table->signature, cpio_path, file.name, table->length);
620
621                 all_tables_size += table->length;
622                 early_initrd_files[table_nr].data = file.data;
623                 early_initrd_files[table_nr].size = file.size;
624                 table_nr++;
625         }
626         if (table_nr == 0)
627                 return;
628
629         acpi_tables_addr =
630                 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
631                                        all_tables_size, PAGE_SIZE);
632         if (!acpi_tables_addr) {
633                 WARN_ON(1);
634                 return;
635         }
636         /*
637          * Only calling e820_add_reserve does not work and the
638          * tables are invalid (memory got used) later.
639          * memblock_reserve works as expected and the tables won't get modified.
640          * But it's not enough on X86 because ioremap will
641          * complain later (used by acpi_os_map_memory) that the pages
642          * that should get mapped are not marked "reserved".
643          * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
644          * works fine.
645          */
646         memblock_reserve(acpi_tables_addr, all_tables_size);
647         arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
648
649         p = early_ioremap(acpi_tables_addr, all_tables_size);
650
651         for (no = 0; no < table_nr; no++) {
652                 memcpy(p + total_offset, early_initrd_files[no].data,
653                        early_initrd_files[no].size);
654                 total_offset += early_initrd_files[no].size;
655         }
656         early_iounmap(p, all_tables_size);
657 }
658 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
659
660 static void acpi_table_taint(struct acpi_table_header *table)
661 {
662         pr_warn(PREFIX
663                 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
664                 table->signature, table->oem_table_id);
665         add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
666 }
667
668
669 acpi_status
670 acpi_os_table_override(struct acpi_table_header * existing_table,
671                        struct acpi_table_header ** new_table)
672 {
673         if (!existing_table || !new_table)
674                 return AE_BAD_PARAMETER;
675
676         *new_table = NULL;
677
678 #ifdef CONFIG_ACPI_CUSTOM_DSDT
679         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
680                 *new_table = (struct acpi_table_header *)AmlCode;
681 #endif
682         if (*new_table != NULL)
683                 acpi_table_taint(existing_table);
684         return AE_OK;
685 }
686
687 acpi_status
688 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
689                                 acpi_physical_address *address,
690                                 u32 *table_length)
691 {
692 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
693         *table_length = 0;
694         *address = 0;
695         return AE_OK;
696 #else
697         int table_offset = 0;
698         struct acpi_table_header *table;
699
700         *table_length = 0;
701         *address = 0;
702
703         if (!acpi_tables_addr)
704                 return AE_OK;
705
706         do {
707                 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
708                         WARN_ON(1);
709                         return AE_OK;
710                 }
711
712                 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
713                                            ACPI_HEADER_SIZE);
714
715                 if (table_offset + table->length > all_tables_size) {
716                         acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
717                         WARN_ON(1);
718                         return AE_OK;
719                 }
720
721                 table_offset += table->length;
722
723                 if (memcmp(existing_table->signature, table->signature, 4)) {
724                         acpi_os_unmap_memory(table,
725                                      ACPI_HEADER_SIZE);
726                         continue;
727                 }
728
729                 /* Only override tables with matching oem id */
730                 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
731                            ACPI_OEM_TABLE_ID_SIZE)) {
732                         acpi_os_unmap_memory(table,
733                                      ACPI_HEADER_SIZE);
734                         continue;
735                 }
736
737                 table_offset -= table->length;
738                 *table_length = table->length;
739                 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
740                 *address = acpi_tables_addr + table_offset;
741                 break;
742         } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
743
744         if (*address != 0)
745                 acpi_table_taint(existing_table);
746         return AE_OK;
747 #endif
748 }
749
750 static irqreturn_t acpi_irq(int irq, void *dev_id)
751 {
752         u32 handled;
753
754         handled = (*acpi_irq_handler) (acpi_irq_context);
755
756         if (handled) {
757                 acpi_irq_handled++;
758                 return IRQ_HANDLED;
759         } else {
760                 acpi_irq_not_handled++;
761                 return IRQ_NONE;
762         }
763 }
764
765 acpi_status
766 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
767                                   void *context)
768 {
769         unsigned int irq;
770
771         acpi_irq_stats_init();
772
773         /*
774          * ACPI interrupts different from the SCI in our copy of the FADT are
775          * not supported.
776          */
777         if (gsi != acpi_gbl_FADT.sci_interrupt)
778                 return AE_BAD_PARAMETER;
779
780         if (acpi_irq_handler)
781                 return AE_ALREADY_ACQUIRED;
782
783         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
784                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
785                        gsi);
786                 return AE_OK;
787         }
788
789         acpi_irq_handler = handler;
790         acpi_irq_context = context;
791         if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
792                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
793                 acpi_irq_handler = NULL;
794                 return AE_NOT_ACQUIRED;
795         }
796
797         return AE_OK;
798 }
799
800 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
801 {
802         if (irq != acpi_gbl_FADT.sci_interrupt)
803                 return AE_BAD_PARAMETER;
804
805         free_irq(irq, acpi_irq);
806         acpi_irq_handler = NULL;
807
808         return AE_OK;
809 }
810
811 /*
812  * Running in interpreter thread context, safe to sleep
813  */
814
815 void acpi_os_sleep(u64 ms)
816 {
817         schedule_timeout_interruptible(msecs_to_jiffies(ms));
818 }
819
820 void acpi_os_stall(u32 us)
821 {
822         while (us) {
823                 u32 delay = 1000;
824
825                 if (delay > us)
826                         delay = us;
827                 udelay(delay);
828                 touch_nmi_watchdog();
829                 us -= delay;
830         }
831 }
832
833 /*
834  * Support ACPI 3.0 AML Timer operand
835  * Returns 64-bit free-running, monotonically increasing timer
836  * with 100ns granularity
837  */
838 u64 acpi_os_get_timer(void)
839 {
840         u64 time_ns = ktime_to_ns(ktime_get());
841         do_div(time_ns, 100);
842         return time_ns;
843 }
844
845 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
846 {
847         u32 dummy;
848
849         if (!value)
850                 value = &dummy;
851
852         *value = 0;
853         if (width <= 8) {
854                 *(u8 *) value = inb(port);
855         } else if (width <= 16) {
856                 *(u16 *) value = inw(port);
857         } else if (width <= 32) {
858                 *(u32 *) value = inl(port);
859         } else {
860                 BUG();
861         }
862
863         return AE_OK;
864 }
865
866 EXPORT_SYMBOL(acpi_os_read_port);
867
868 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
869 {
870         if (width <= 8) {
871                 outb(value, port);
872         } else if (width <= 16) {
873                 outw(value, port);
874         } else if (width <= 32) {
875                 outl(value, port);
876         } else {
877                 BUG();
878         }
879
880         return AE_OK;
881 }
882
883 EXPORT_SYMBOL(acpi_os_write_port);
884
885 #ifdef readq
886 static inline u64 read64(const volatile void __iomem *addr)
887 {
888         return readq(addr);
889 }
890 #else
891 static inline u64 read64(const volatile void __iomem *addr)
892 {
893         u64 l, h;
894         l = readl(addr);
895         h = readl(addr+4);
896         return l | (h << 32);
897 }
898 #endif
899
900 acpi_status
901 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
902 {
903         void __iomem *virt_addr;
904         unsigned int size = width / 8;
905         bool unmap = false;
906         u64 dummy;
907
908         rcu_read_lock();
909         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
910         if (!virt_addr) {
911                 rcu_read_unlock();
912                 virt_addr = acpi_os_ioremap(phys_addr, size);
913                 if (!virt_addr)
914                         return AE_BAD_ADDRESS;
915                 unmap = true;
916         }
917
918         if (!value)
919                 value = &dummy;
920
921         switch (width) {
922         case 8:
923                 *(u8 *) value = readb(virt_addr);
924                 break;
925         case 16:
926                 *(u16 *) value = readw(virt_addr);
927                 break;
928         case 32:
929                 *(u32 *) value = readl(virt_addr);
930                 break;
931         case 64:
932                 *(u64 *) value = read64(virt_addr);
933                 break;
934         default:
935                 BUG();
936         }
937
938         if (unmap)
939                 iounmap(virt_addr);
940         else
941                 rcu_read_unlock();
942
943         return AE_OK;
944 }
945
946 #ifdef writeq
947 static inline void write64(u64 val, volatile void __iomem *addr)
948 {
949         writeq(val, addr);
950 }
951 #else
952 static inline void write64(u64 val, volatile void __iomem *addr)
953 {
954         writel(val, addr);
955         writel(val>>32, addr+4);
956 }
957 #endif
958
959 acpi_status
960 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
961 {
962         void __iomem *virt_addr;
963         unsigned int size = width / 8;
964         bool unmap = false;
965
966         rcu_read_lock();
967         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
968         if (!virt_addr) {
969                 rcu_read_unlock();
970                 virt_addr = acpi_os_ioremap(phys_addr, size);
971                 if (!virt_addr)
972                         return AE_BAD_ADDRESS;
973                 unmap = true;
974         }
975
976         switch (width) {
977         case 8:
978                 writeb(value, virt_addr);
979                 break;
980         case 16:
981                 writew(value, virt_addr);
982                 break;
983         case 32:
984                 writel(value, virt_addr);
985                 break;
986         case 64:
987                 write64(value, virt_addr);
988                 break;
989         default:
990                 BUG();
991         }
992
993         if (unmap)
994                 iounmap(virt_addr);
995         else
996                 rcu_read_unlock();
997
998         return AE_OK;
999 }
1000
1001 acpi_status
1002 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1003                                u64 *value, u32 width)
1004 {
1005         int result, size;
1006         u32 value32;
1007
1008         if (!value)
1009                 return AE_BAD_PARAMETER;
1010
1011         switch (width) {
1012         case 8:
1013                 size = 1;
1014                 break;
1015         case 16:
1016                 size = 2;
1017                 break;
1018         case 32:
1019                 size = 4;
1020                 break;
1021         default:
1022                 return AE_ERROR;
1023         }
1024
1025         result = raw_pci_read(pci_id->segment, pci_id->bus,
1026                                 PCI_DEVFN(pci_id->device, pci_id->function),
1027                                 reg, size, &value32);
1028         *value = value32;
1029
1030         return (result ? AE_ERROR : AE_OK);
1031 }
1032
1033 acpi_status
1034 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1035                                 u64 value, u32 width)
1036 {
1037         int result, size;
1038
1039         switch (width) {
1040         case 8:
1041                 size = 1;
1042                 break;
1043         case 16:
1044                 size = 2;
1045                 break;
1046         case 32:
1047                 size = 4;
1048                 break;
1049         default:
1050                 return AE_ERROR;
1051         }
1052
1053         result = raw_pci_write(pci_id->segment, pci_id->bus,
1054                                 PCI_DEVFN(pci_id->device, pci_id->function),
1055                                 reg, size, value);
1056
1057         return (result ? AE_ERROR : AE_OK);
1058 }
1059
1060 static void acpi_os_execute_deferred(struct work_struct *work)
1061 {
1062         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1063
1064         if (dpc->wait)
1065                 acpi_os_wait_events_complete();
1066
1067         dpc->function(dpc->context);
1068         kfree(dpc);
1069 }
1070
1071 /*******************************************************************************
1072  *
1073  * FUNCTION:    acpi_os_execute
1074  *
1075  * PARAMETERS:  Type               - Type of the callback
1076  *              Function           - Function to be executed
1077  *              Context            - Function parameters
1078  *
1079  * RETURN:      Status
1080  *
1081  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1082  *              immediately executes function on a separate thread.
1083  *
1084  ******************************************************************************/
1085
1086 static acpi_status __acpi_os_execute(acpi_execute_type type,
1087         acpi_osd_exec_callback function, void *context, int hp)
1088 {
1089         acpi_status status = AE_OK;
1090         struct acpi_os_dpc *dpc;
1091         struct workqueue_struct *queue;
1092         int ret;
1093         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1094                           "Scheduling function [%p(%p)] for deferred execution.\n",
1095                           function, context));
1096
1097         /*
1098          * Allocate/initialize DPC structure.  Note that this memory will be
1099          * freed by the callee.  The kernel handles the work_struct list  in a
1100          * way that allows us to also free its memory inside the callee.
1101          * Because we may want to schedule several tasks with different
1102          * parameters we can't use the approach some kernel code uses of
1103          * having a static work_struct.
1104          */
1105
1106         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1107         if (!dpc)
1108                 return AE_NO_MEMORY;
1109
1110         dpc->function = function;
1111         dpc->context = context;
1112
1113         /*
1114          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
1115          * because the hotplug code may call driver .remove() functions,
1116          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
1117          * to flush these workqueues.
1118          *
1119          * To prevent lockdep from complaining unnecessarily, make sure that
1120          * there is a different static lockdep key for each workqueue by using
1121          * INIT_WORK() for each of them separately.
1122          */
1123         if (hp) {
1124                 queue = kacpi_hotplug_wq;
1125                 dpc->wait = 1;
1126                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1127         } else if (type == OSL_NOTIFY_HANDLER) {
1128                 queue = kacpi_notify_wq;
1129                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1130         } else {
1131                 queue = kacpid_wq;
1132                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1133         }
1134
1135         /*
1136          * On some machines, a software-initiated SMI causes corruption unless
1137          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1138          * typically it's done in GPE-related methods that are run via
1139          * workqueues, so we can avoid the known corruption cases by always
1140          * queueing on CPU 0.
1141          */
1142         ret = queue_work_on(0, queue, &dpc->work);
1143
1144         if (!ret) {
1145                 printk(KERN_ERR PREFIX
1146                           "Call to queue_work() failed.\n");
1147                 status = AE_ERROR;
1148                 kfree(dpc);
1149         }
1150         return status;
1151 }
1152
1153 acpi_status acpi_os_execute(acpi_execute_type type,
1154                             acpi_osd_exec_callback function, void *context)
1155 {
1156         return __acpi_os_execute(type, function, context, 0);
1157 }
1158 EXPORT_SYMBOL(acpi_os_execute);
1159
1160 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
1161         void *context)
1162 {
1163         return __acpi_os_execute(0, function, context, 1);
1164 }
1165 EXPORT_SYMBOL(acpi_os_hotplug_execute);
1166
1167 void acpi_os_wait_events_complete(void)
1168 {
1169         flush_workqueue(kacpid_wq);
1170         flush_workqueue(kacpi_notify_wq);
1171 }
1172
1173 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1174
1175 acpi_status
1176 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1177 {
1178         struct semaphore *sem = NULL;
1179
1180         sem = acpi_os_allocate(sizeof(struct semaphore));
1181         if (!sem)
1182                 return AE_NO_MEMORY;
1183         memset(sem, 0, sizeof(struct semaphore));
1184
1185         sema_init(sem, initial_units);
1186
1187         *handle = (acpi_handle *) sem;
1188
1189         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1190                           *handle, initial_units));
1191
1192         return AE_OK;
1193 }
1194
1195 /*
1196  * TODO: A better way to delete semaphores?  Linux doesn't have a
1197  * 'delete_semaphore()' function -- may result in an invalid
1198  * pointer dereference for non-synchronized consumers.  Should
1199  * we at least check for blocked threads and signal/cancel them?
1200  */
1201
1202 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1203 {
1204         struct semaphore *sem = (struct semaphore *)handle;
1205
1206         if (!sem)
1207                 return AE_BAD_PARAMETER;
1208
1209         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1210
1211         BUG_ON(!list_empty(&sem->wait_list));
1212         kfree(sem);
1213         sem = NULL;
1214
1215         return AE_OK;
1216 }
1217
1218 /*
1219  * TODO: Support for units > 1?
1220  */
1221 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1222 {
1223         acpi_status status = AE_OK;
1224         struct semaphore *sem = (struct semaphore *)handle;
1225         long jiffies;
1226         int ret = 0;
1227
1228         if (!sem || (units < 1))
1229                 return AE_BAD_PARAMETER;
1230
1231         if (units > 1)
1232                 return AE_SUPPORT;
1233
1234         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1235                           handle, units, timeout));
1236
1237         if (timeout == ACPI_WAIT_FOREVER)
1238                 jiffies = MAX_SCHEDULE_TIMEOUT;
1239         else
1240                 jiffies = msecs_to_jiffies(timeout);
1241         
1242         ret = down_timeout(sem, jiffies);
1243         if (ret)
1244                 status = AE_TIME;
1245
1246         if (ACPI_FAILURE(status)) {
1247                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1248                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1249                                   handle, units, timeout,
1250                                   acpi_format_exception(status)));
1251         } else {
1252                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1253                                   "Acquired semaphore[%p|%d|%d]", handle,
1254                                   units, timeout));
1255         }
1256
1257         return status;
1258 }
1259
1260 /*
1261  * TODO: Support for units > 1?
1262  */
1263 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1264 {
1265         struct semaphore *sem = (struct semaphore *)handle;
1266
1267         if (!sem || (units < 1))
1268                 return AE_BAD_PARAMETER;
1269
1270         if (units > 1)
1271                 return AE_SUPPORT;
1272
1273         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1274                           units));
1275
1276         up(sem);
1277
1278         return AE_OK;
1279 }
1280
1281 #ifdef ACPI_FUTURE_USAGE
1282 u32 acpi_os_get_line(char *buffer)
1283 {
1284
1285 #ifdef ENABLE_DEBUGGER
1286         if (acpi_in_debugger) {
1287                 u32 chars;
1288
1289                 kdb_read(buffer, sizeof(line_buf));
1290
1291                 /* remove the CR kdb includes */
1292                 chars = strlen(buffer) - 1;
1293                 buffer[chars] = '\0';
1294         }
1295 #endif
1296
1297         return 0;
1298 }
1299 #endif                          /*  ACPI_FUTURE_USAGE  */
1300
1301 acpi_status acpi_os_signal(u32 function, void *info)
1302 {
1303         switch (function) {
1304         case ACPI_SIGNAL_FATAL:
1305                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1306                 break;
1307         case ACPI_SIGNAL_BREAKPOINT:
1308                 /*
1309                  * AML Breakpoint
1310                  * ACPI spec. says to treat it as a NOP unless
1311                  * you are debugging.  So if/when we integrate
1312                  * AML debugger into the kernel debugger its
1313                  * hook will go here.  But until then it is
1314                  * not useful to print anything on breakpoints.
1315                  */
1316                 break;
1317         default:
1318                 break;
1319         }
1320
1321         return AE_OK;
1322 }
1323
1324 static int __init acpi_os_name_setup(char *str)
1325 {
1326         char *p = acpi_os_name;
1327         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1328
1329         if (!str || !*str)
1330                 return 0;
1331
1332         for (; count-- && str && *str; str++) {
1333                 if (isalnum(*str) || *str == ' ' || *str == ':')
1334                         *p++ = *str;
1335                 else if (*str == '\'' || *str == '"')
1336                         continue;
1337                 else
1338                         break;
1339         }
1340         *p = 0;
1341
1342         return 1;
1343
1344 }
1345
1346 __setup("acpi_os_name=", acpi_os_name_setup);
1347
1348 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
1349 #define OSI_STRING_ENTRIES_MAX 16       /* arbitrary */
1350
1351 struct osi_setup_entry {
1352         char string[OSI_STRING_LENGTH_MAX];
1353         bool enable;
1354 };
1355
1356 static struct osi_setup_entry __initdata
1357                 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
1358         {"Module Device", true},
1359         {"Processor Device", true},
1360         {"3.0 _SCP Extensions", true},
1361         {"Processor Aggregator Device", true},
1362 };
1363
1364 void __init acpi_osi_setup(char *str)
1365 {
1366         struct osi_setup_entry *osi;
1367         bool enable = true;
1368         int i;
1369
1370         if (!acpi_gbl_create_osi_method)
1371                 return;
1372
1373         if (str == NULL || *str == '\0') {
1374                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1375                 acpi_gbl_create_osi_method = FALSE;
1376                 return;
1377         }
1378
1379         if (*str == '!') {
1380                 str++;
1381                 enable = false;
1382         }
1383
1384         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1385                 osi = &osi_setup_entries[i];
1386                 if (!strcmp(osi->string, str)) {
1387                         osi->enable = enable;
1388                         break;
1389                 } else if (osi->string[0] == '\0') {
1390                         osi->enable = enable;
1391                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1392                         break;
1393                 }
1394         }
1395 }
1396
1397 static void __init set_osi_linux(unsigned int enable)
1398 {
1399         if (osi_linux.enable != enable)
1400                 osi_linux.enable = enable;
1401
1402         if (osi_linux.enable)
1403                 acpi_osi_setup("Linux");
1404         else
1405                 acpi_osi_setup("!Linux");
1406
1407         return;
1408 }
1409
1410 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1411 {
1412         osi_linux.cmdline = 1;  /* cmdline set the default and override DMI */
1413         osi_linux.dmi = 0;
1414         set_osi_linux(enable);
1415
1416         return;
1417 }
1418
1419 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1420 {
1421         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1422
1423         if (enable == -1)
1424                 return;
1425
1426         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1427         set_osi_linux(enable);
1428
1429         return;
1430 }
1431
1432 /*
1433  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1434  *
1435  * empty string disables _OSI
1436  * string starting with '!' disables that string
1437  * otherwise string is added to list, augmenting built-in strings
1438  */
1439 static void __init acpi_osi_setup_late(void)
1440 {
1441         struct osi_setup_entry *osi;
1442         char *str;
1443         int i;
1444         acpi_status status;
1445
1446         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1447                 osi = &osi_setup_entries[i];
1448                 str = osi->string;
1449
1450                 if (*str == '\0')
1451                         break;
1452                 if (osi->enable) {
1453                         status = acpi_install_interface(str);
1454
1455                         if (ACPI_SUCCESS(status))
1456                                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1457                 } else {
1458                         status = acpi_remove_interface(str);
1459
1460                         if (ACPI_SUCCESS(status))
1461                                 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1462                 }
1463         }
1464 }
1465
1466 static int __init osi_setup(char *str)
1467 {
1468         if (str && !strcmp("Linux", str))
1469                 acpi_cmdline_osi_linux(1);
1470         else if (str && !strcmp("!Linux", str))
1471                 acpi_cmdline_osi_linux(0);
1472         else
1473                 acpi_osi_setup(str);
1474
1475         return 1;
1476 }
1477
1478 __setup("acpi_osi=", osi_setup);
1479
1480 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1481 static int __init acpi_serialize_setup(char *str)
1482 {
1483         printk(KERN_INFO PREFIX "serialize enabled\n");
1484
1485         acpi_gbl_all_methods_serialized = TRUE;
1486
1487         return 1;
1488 }
1489
1490 __setup("acpi_serialize", acpi_serialize_setup);
1491
1492 /* Check of resource interference between native drivers and ACPI
1493  * OperationRegions (SystemIO and System Memory only).
1494  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1495  * in arbitrary AML code and can interfere with legacy drivers.
1496  * acpi_enforce_resources= can be set to:
1497  *
1498  *   - strict (default) (2)
1499  *     -> further driver trying to access the resources will not load
1500  *   - lax              (1)
1501  *     -> further driver trying to access the resources will load, but you
1502  *     get a system message that something might go wrong...
1503  *
1504  *   - no               (0)
1505  *     -> ACPI Operation Region resources will not be registered
1506  *
1507  */
1508 #define ENFORCE_RESOURCES_STRICT 2
1509 #define ENFORCE_RESOURCES_LAX    1
1510 #define ENFORCE_RESOURCES_NO     0
1511
1512 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1513
1514 static int __init acpi_enforce_resources_setup(char *str)
1515 {
1516         if (str == NULL || *str == '\0')
1517                 return 0;
1518
1519         if (!strcmp("strict", str))
1520                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1521         else if (!strcmp("lax", str))
1522                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1523         else if (!strcmp("no", str))
1524                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1525
1526         return 1;
1527 }
1528
1529 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1530
1531 /* Check for resource conflicts between ACPI OperationRegions and native
1532  * drivers */
1533 int acpi_check_resource_conflict(const struct resource *res)
1534 {
1535         acpi_adr_space_type space_id;
1536         acpi_size length;
1537         u8 warn = 0;
1538         int clash = 0;
1539
1540         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1541                 return 0;
1542         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1543                 return 0;
1544
1545         if (res->flags & IORESOURCE_IO)
1546                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1547         else
1548                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1549
1550         length = resource_size(res);
1551         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1552                 warn = 1;
1553         clash = acpi_check_address_range(space_id, res->start, length, warn);
1554
1555         if (clash) {
1556                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1557                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1558                                 printk(KERN_NOTICE "ACPI: This conflict may"
1559                                        " cause random problems and system"
1560                                        " instability\n");
1561                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1562                                " for this device, you should use it instead of"
1563                                " the native driver\n");
1564                 }
1565                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1566                         return -EBUSY;
1567         }
1568         return 0;
1569 }
1570 EXPORT_SYMBOL(acpi_check_resource_conflict);
1571
1572 int acpi_check_region(resource_size_t start, resource_size_t n,
1573                       const char *name)
1574 {
1575         struct resource res = {
1576                 .start = start,
1577                 .end   = start + n - 1,
1578                 .name  = name,
1579                 .flags = IORESOURCE_IO,
1580         };
1581
1582         return acpi_check_resource_conflict(&res);
1583 }
1584 EXPORT_SYMBOL(acpi_check_region);
1585
1586 /*
1587  * Let drivers know whether the resource checks are effective
1588  */
1589 int acpi_resources_are_enforced(void)
1590 {
1591         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1592 }
1593 EXPORT_SYMBOL(acpi_resources_are_enforced);
1594
1595 /*
1596  * Deallocate the memory for a spinlock.
1597  */
1598 void acpi_os_delete_lock(acpi_spinlock handle)
1599 {
1600         ACPI_FREE(handle);
1601 }
1602
1603 /*
1604  * Acquire a spinlock.
1605  *
1606  * handle is a pointer to the spinlock_t.
1607  */
1608
1609 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1610 {
1611         acpi_cpu_flags flags;
1612         spin_lock_irqsave(lockp, flags);
1613         return flags;
1614 }
1615
1616 /*
1617  * Release a spinlock. See above.
1618  */
1619
1620 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1621 {
1622         spin_unlock_irqrestore(lockp, flags);
1623 }
1624
1625 #ifndef ACPI_USE_LOCAL_CACHE
1626
1627 /*******************************************************************************
1628  *
1629  * FUNCTION:    acpi_os_create_cache
1630  *
1631  * PARAMETERS:  name      - Ascii name for the cache
1632  *              size      - Size of each cached object
1633  *              depth     - Maximum depth of the cache (in objects) <ignored>
1634  *              cache     - Where the new cache object is returned
1635  *
1636  * RETURN:      status
1637  *
1638  * DESCRIPTION: Create a cache object
1639  *
1640  ******************************************************************************/
1641
1642 acpi_status
1643 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1644 {
1645         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1646         if (*cache == NULL)
1647                 return AE_ERROR;
1648         else
1649                 return AE_OK;
1650 }
1651
1652 /*******************************************************************************
1653  *
1654  * FUNCTION:    acpi_os_purge_cache
1655  *
1656  * PARAMETERS:  Cache           - Handle to cache object
1657  *
1658  * RETURN:      Status
1659  *
1660  * DESCRIPTION: Free all objects within the requested cache.
1661  *
1662  ******************************************************************************/
1663
1664 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1665 {
1666         kmem_cache_shrink(cache);
1667         return (AE_OK);
1668 }
1669
1670 /*******************************************************************************
1671  *
1672  * FUNCTION:    acpi_os_delete_cache
1673  *
1674  * PARAMETERS:  Cache           - Handle to cache object
1675  *
1676  * RETURN:      Status
1677  *
1678  * DESCRIPTION: Free all objects within the requested cache and delete the
1679  *              cache object.
1680  *
1681  ******************************************************************************/
1682
1683 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1684 {
1685         kmem_cache_destroy(cache);
1686         return (AE_OK);
1687 }
1688
1689 /*******************************************************************************
1690  *
1691  * FUNCTION:    acpi_os_release_object
1692  *
1693  * PARAMETERS:  Cache       - Handle to cache object
1694  *              Object      - The object to be released
1695  *
1696  * RETURN:      None
1697  *
1698  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1699  *              the object is deleted.
1700  *
1701  ******************************************************************************/
1702
1703 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1704 {
1705         kmem_cache_free(cache, object);
1706         return (AE_OK);
1707 }
1708 #endif
1709
1710 static int __init acpi_no_auto_ssdt_setup(char *s)
1711 {
1712         printk(KERN_NOTICE PREFIX "SSDT auto-load disabled\n");
1713
1714         acpi_gbl_disable_ssdt_table_load = TRUE;
1715
1716         return 1;
1717 }
1718
1719 __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
1720
1721 acpi_status __init acpi_os_initialize(void)
1722 {
1723         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1724         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1725         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1726         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1727
1728         return AE_OK;
1729 }
1730
1731 acpi_status __init acpi_os_initialize1(void)
1732 {
1733         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1734         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1735         kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
1736         BUG_ON(!kacpid_wq);
1737         BUG_ON(!kacpi_notify_wq);
1738         BUG_ON(!kacpi_hotplug_wq);
1739         acpi_install_interface_handler(acpi_osi_handler);
1740         acpi_osi_setup_late();
1741         return AE_OK;
1742 }
1743
1744 acpi_status acpi_os_terminate(void)
1745 {
1746         if (acpi_irq_handler) {
1747                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1748                                                  acpi_irq_handler);
1749         }
1750
1751         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1752         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1753         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1754         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1755
1756         destroy_workqueue(kacpid_wq);
1757         destroy_workqueue(kacpi_notify_wq);
1758         destroy_workqueue(kacpi_hotplug_wq);
1759
1760         return AE_OK;
1761 }
1762
1763 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1764                                   u32 pm1b_control)
1765 {
1766         int rc = 0;
1767         if (__acpi_os_prepare_sleep)
1768                 rc = __acpi_os_prepare_sleep(sleep_state,
1769                                              pm1a_control, pm1b_control);
1770         if (rc < 0)
1771                 return AE_ERROR;
1772         else if (rc > 0)
1773                 return AE_CTRL_SKIP;
1774
1775         return AE_OK;
1776 }
1777
1778 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1779                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1780 {
1781         __acpi_os_prepare_sleep = func;
1782 }
1783
1784 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1785                                   u32 val_b)
1786 {
1787         int rc = 0;
1788         if (__acpi_os_prepare_extended_sleep)
1789                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1790                                              val_a, val_b);
1791         if (rc < 0)
1792                 return AE_ERROR;
1793         else if (rc > 0)
1794                 return AE_CTRL_SKIP;
1795
1796         return AE_OK;
1797 }
1798
1799 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1800                                u32 val_a, u32 val_b))
1801 {
1802         __acpi_os_prepare_extended_sleep = func;
1803 }
1804
1805
1806 void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
1807                         void (*func)(struct work_struct *work))
1808 {
1809         struct acpi_hp_work *hp_work;
1810         int ret;
1811
1812         hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
1813         if (!hp_work)
1814                 return;
1815
1816         hp_work->handle = handle;
1817         hp_work->type = type;
1818         hp_work->context = context;
1819
1820         INIT_WORK(&hp_work->work, func);
1821         ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
1822         if (!ret)
1823                 kfree(hp_work);
1824 }
1825 EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);