2 * Extensible Firmware Interface
4 * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
6 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 * Copyright (C) 1999-2003 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
12 * Bjorn Helgaas <bjorn.helgaas@hp.com>
14 * All EFI Runtime Services are not implemented yet as EFI only
15 * supports physical mode addressing on SoftSDV. This is to be fixed
16 * in a future version. --drummond 1999-07-20
18 * Implemented EFI runtime services and virtual mode calls. --davidm
20 * Goutham Rao: <goutham.rao@intel.com>
21 * Skip non-WB memory and ignore empty memory ranges.
23 #include <linux/module.h>
24 #include <linux/bootmem.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/time.h>
29 #include <linux/efi.h>
30 #include <linux/kexec.h>
34 #include <asm/kregs.h>
35 #include <asm/meminit.h>
36 #include <asm/pgtable.h>
37 #include <asm/processor.h>
42 extern efi_status_t efi_call_phys (void *, ...);
46 static efi_runtime_services_t *runtime;
47 static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
49 #define efi_call_virt(f, args...) (*(f))(args)
51 #define STUB_GET_TIME(prefix, adjust_arg) \
53 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
55 struct ia64_fpreg fr[6]; \
56 efi_time_cap_t *atc = NULL; \
60 atc = adjust_arg(tc); \
61 ia64_save_scratch_fpregs(fr); \
62 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
63 ia64_load_scratch_fpregs(fr); \
67 #define STUB_SET_TIME(prefix, adjust_arg) \
69 prefix##_set_time (efi_time_t *tm) \
71 struct ia64_fpreg fr[6]; \
74 ia64_save_scratch_fpregs(fr); \
75 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
76 ia64_load_scratch_fpregs(fr); \
80 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
82 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
84 struct ia64_fpreg fr[6]; \
87 ia64_save_scratch_fpregs(fr); \
88 ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
89 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
90 ia64_load_scratch_fpregs(fr); \
94 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
96 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
98 struct ia64_fpreg fr[6]; \
99 efi_time_t *atm = NULL; \
103 atm = adjust_arg(tm); \
104 ia64_save_scratch_fpregs(fr); \
105 ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
107 ia64_load_scratch_fpregs(fr); \
111 #define STUB_GET_VARIABLE(prefix, adjust_arg) \
112 static efi_status_t \
113 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
114 unsigned long *data_size, void *data) \
116 struct ia64_fpreg fr[6]; \
121 aattr = adjust_arg(attr); \
122 ia64_save_scratch_fpregs(fr); \
123 ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
124 adjust_arg(name), adjust_arg(vendor), aattr, \
125 adjust_arg(data_size), adjust_arg(data)); \
126 ia64_load_scratch_fpregs(fr); \
130 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
131 static efi_status_t \
132 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
134 struct ia64_fpreg fr[6]; \
137 ia64_save_scratch_fpregs(fr); \
138 ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
139 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
140 ia64_load_scratch_fpregs(fr); \
144 #define STUB_SET_VARIABLE(prefix, adjust_arg) \
145 static efi_status_t \
146 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \
147 unsigned long data_size, void *data) \
149 struct ia64_fpreg fr[6]; \
152 ia64_save_scratch_fpregs(fr); \
153 ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
154 adjust_arg(name), adjust_arg(vendor), attr, data_size, \
156 ia64_load_scratch_fpregs(fr); \
160 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
161 static efi_status_t \
162 prefix##_get_next_high_mono_count (u32 *count) \
164 struct ia64_fpreg fr[6]; \
167 ia64_save_scratch_fpregs(fr); \
168 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
169 __va(runtime->get_next_high_mono_count), adjust_arg(count)); \
170 ia64_load_scratch_fpregs(fr); \
174 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \
176 prefix##_reset_system (int reset_type, efi_status_t status, \
177 unsigned long data_size, efi_char16_t *data) \
179 struct ia64_fpreg fr[6]; \
180 efi_char16_t *adata = NULL; \
183 adata = adjust_arg(data); \
185 ia64_save_scratch_fpregs(fr); \
186 efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
187 reset_type, status, data_size, adata); \
188 /* should not return, but just in case... */ \
189 ia64_load_scratch_fpregs(fr); \
192 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
194 STUB_GET_TIME(phys, phys_ptr)
195 STUB_SET_TIME(phys, phys_ptr)
196 STUB_GET_WAKEUP_TIME(phys, phys_ptr)
197 STUB_SET_WAKEUP_TIME(phys, phys_ptr)
198 STUB_GET_VARIABLE(phys, phys_ptr)
199 STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
200 STUB_SET_VARIABLE(phys, phys_ptr)
201 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
202 STUB_RESET_SYSTEM(phys, phys_ptr)
206 STUB_GET_TIME(virt, id)
207 STUB_SET_TIME(virt, id)
208 STUB_GET_WAKEUP_TIME(virt, id)
209 STUB_SET_WAKEUP_TIME(virt, id)
210 STUB_GET_VARIABLE(virt, id)
211 STUB_GET_NEXT_VARIABLE(virt, id)
212 STUB_SET_VARIABLE(virt, id)
213 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
214 STUB_RESET_SYSTEM(virt, id)
217 efi_gettimeofday (struct timespec *ts)
221 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) {
222 memset(ts, 0, sizeof(*ts));
226 ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
227 ts->tv_nsec = tm.nanosecond;
231 is_memory_available (efi_memory_desc_t *md)
233 if (!(md->attribute & EFI_MEMORY_WB))
237 case EFI_LOADER_CODE:
238 case EFI_LOADER_DATA:
239 case EFI_BOOT_SERVICES_CODE:
240 case EFI_BOOT_SERVICES_DATA:
241 case EFI_CONVENTIONAL_MEMORY:
247 typedef struct kern_memdesc {
253 static kern_memdesc_t *kern_memmap;
255 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
258 kmd_end(kern_memdesc_t *kmd)
260 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
264 efi_md_end(efi_memory_desc_t *md)
266 return (md->phys_addr + efi_md_size(md));
270 efi_wb(efi_memory_desc_t *md)
272 return (md->attribute & EFI_MEMORY_WB);
276 efi_uc(efi_memory_desc_t *md)
278 return (md->attribute & EFI_MEMORY_UC);
282 walk (efi_freemem_callback_t callback, void *arg, u64 attr)
285 u64 start, end, voff;
287 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
288 for (k = kern_memmap; k->start != ~0UL; k++) {
289 if (k->attribute != attr)
291 start = PAGE_ALIGN(k->start);
292 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
294 if ((*callback)(start + voff, end + voff, arg) < 0)
300 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
301 * has memory that is available for OS use.
304 efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
306 walk(callback, arg, EFI_MEMORY_WB);
310 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
311 * has memory that is available for uncached allocator.
314 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
316 walk(callback, arg, EFI_MEMORY_UC);
320 * Look for the PAL_CODE region reported by EFI and maps it using an
321 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
322 * Abstraction Layer chapter 11 in ADAG
326 efi_get_pal_addr (void)
328 void *efi_map_start, *efi_map_end, *p;
329 efi_memory_desc_t *md;
331 int pal_code_count = 0;
334 efi_map_start = __va(ia64_boot_param->efi_memmap);
335 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
336 efi_desc_size = ia64_boot_param->efi_memdesc_size;
338 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
340 if (md->type != EFI_PAL_CODE)
343 if (++pal_code_count > 1) {
344 printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
349 * The only ITLB entry in region 7 that is used is the one installed by
350 * __start(). That entry covers a 64MB range.
352 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
353 vaddr = PAGE_OFFSET + md->phys_addr;
356 * We must check that the PAL mapping won't overlap with the kernel
359 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
360 * 256KB and that only one ITR is needed to map it. This implies that the
361 * PAL code is always aligned on its size, i.e., the closest matching page
362 * size supported by the TLB. Therefore PAL code is guaranteed never to
363 * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for
364 * now the following test is enough to determine whether or not we need a
365 * dedicated ITR for the PAL code.
367 if ((vaddr & mask) == (KERNEL_START & mask)) {
368 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
373 if (efi_md_size(md) > IA64_GRANULE_SIZE)
374 panic("Woah! PAL code size bigger than a granule!");
377 mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
379 printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
380 smp_processor_id(), md->phys_addr,
381 md->phys_addr + efi_md_size(md),
382 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
384 return __va(md->phys_addr);
386 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
392 efi_map_pal_code (void)
394 void *pal_vaddr = efi_get_pal_addr ();
401 * Cannot write to CRx with PSR.ic=1
403 psr = ia64_clear_ic();
404 ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
405 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
407 ia64_set_psr(psr); /* restore psr */
414 void *efi_map_start, *efi_map_end;
415 efi_config_table_t *config_tables;
418 char *cp, vendor[100] = "unknown";
421 /* it's too early to be able to use the standard kernel command line support... */
422 for (cp = boot_command_line; *cp; ) {
423 if (memcmp(cp, "mem=", 4) == 0) {
424 mem_limit = memparse(cp + 4, &cp);
425 } else if (memcmp(cp, "max_addr=", 9) == 0) {
426 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
427 } else if (memcmp(cp, "min_addr=", 9) == 0) {
428 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
430 while (*cp != ' ' && *cp)
437 printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
438 if (max_addr != ~0UL)
439 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
441 efi.systab = __va(ia64_boot_param->efi_systab);
444 * Verify the EFI Table
446 if (efi.systab == NULL)
447 panic("Woah! Can't find EFI system table.\n");
448 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
449 panic("Woah! EFI system table signature incorrect\n");
450 if ((efi.systab->hdr.revision >> 16) == 0)
451 printk(KERN_WARNING "Warning: EFI system table version "
452 "%d.%02d, expected 1.00 or greater\n",
453 efi.systab->hdr.revision >> 16,
454 efi.systab->hdr.revision & 0xffff);
456 config_tables = __va(efi.systab->tables);
458 /* Show what we know for posterity */
459 c16 = __va(efi.systab->fw_vendor);
461 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
466 printk(KERN_INFO "EFI v%u.%.02u by %s:",
467 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
469 efi.mps = EFI_INVALID_TABLE_ADDR;
470 efi.acpi = EFI_INVALID_TABLE_ADDR;
471 efi.acpi20 = EFI_INVALID_TABLE_ADDR;
472 efi.smbios = EFI_INVALID_TABLE_ADDR;
473 efi.sal_systab = EFI_INVALID_TABLE_ADDR;
474 efi.boot_info = EFI_INVALID_TABLE_ADDR;
475 efi.hcdp = EFI_INVALID_TABLE_ADDR;
476 efi.uga = EFI_INVALID_TABLE_ADDR;
478 for (i = 0; i < (int) efi.systab->nr_tables; i++) {
479 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
480 efi.mps = config_tables[i].table;
481 printk(" MPS=0x%lx", config_tables[i].table);
482 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
483 efi.acpi20 = config_tables[i].table;
484 printk(" ACPI 2.0=0x%lx", config_tables[i].table);
485 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
486 efi.acpi = config_tables[i].table;
487 printk(" ACPI=0x%lx", config_tables[i].table);
488 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
489 efi.smbios = config_tables[i].table;
490 printk(" SMBIOS=0x%lx", config_tables[i].table);
491 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
492 efi.sal_systab = config_tables[i].table;
493 printk(" SALsystab=0x%lx", config_tables[i].table);
494 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
495 efi.hcdp = config_tables[i].table;
496 printk(" HCDP=0x%lx", config_tables[i].table);
501 runtime = __va(efi.systab->runtime);
502 efi.get_time = phys_get_time;
503 efi.set_time = phys_set_time;
504 efi.get_wakeup_time = phys_get_wakeup_time;
505 efi.set_wakeup_time = phys_set_wakeup_time;
506 efi.get_variable = phys_get_variable;
507 efi.get_next_variable = phys_get_next_variable;
508 efi.set_variable = phys_set_variable;
509 efi.get_next_high_mono_count = phys_get_next_high_mono_count;
510 efi.reset_system = phys_reset_system;
512 efi_map_start = __va(ia64_boot_param->efi_memmap);
513 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
514 efi_desc_size = ia64_boot_param->efi_memdesc_size;
517 /* print EFI memory map: */
519 efi_memory_desc_t *md;
522 for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
524 printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
525 i, md->type, md->attribute, md->phys_addr,
526 md->phys_addr + efi_md_size(md),
527 md->num_pages >> (20 - EFI_PAGE_SHIFT));
533 efi_enter_virtual_mode();
537 efi_enter_virtual_mode (void)
539 void *efi_map_start, *efi_map_end, *p;
540 efi_memory_desc_t *md;
544 efi_map_start = __va(ia64_boot_param->efi_memmap);
545 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
546 efi_desc_size = ia64_boot_param->efi_memdesc_size;
548 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
550 if (md->attribute & EFI_MEMORY_RUNTIME) {
552 * Some descriptors have multiple bits set, so the order of
553 * the tests is relevant.
555 if (md->attribute & EFI_MEMORY_WB) {
556 md->virt_addr = (u64) __va(md->phys_addr);
557 } else if (md->attribute & EFI_MEMORY_UC) {
558 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
559 } else if (md->attribute & EFI_MEMORY_WC) {
561 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
567 printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
568 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
570 } else if (md->attribute & EFI_MEMORY_WT) {
572 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
573 | _PAGE_D | _PAGE_MA_WT
577 printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
578 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
584 status = efi_call_phys(__va(runtime->set_virtual_address_map),
585 ia64_boot_param->efi_memmap_size,
586 efi_desc_size, ia64_boot_param->efi_memdesc_version,
587 ia64_boot_param->efi_memmap);
588 if (status != EFI_SUCCESS) {
589 printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
590 "(status=%lu)\n", status);
595 * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
597 efi.get_time = virt_get_time;
598 efi.set_time = virt_set_time;
599 efi.get_wakeup_time = virt_get_wakeup_time;
600 efi.set_wakeup_time = virt_set_wakeup_time;
601 efi.get_variable = virt_get_variable;
602 efi.get_next_variable = virt_get_next_variable;
603 efi.set_variable = virt_set_variable;
604 efi.get_next_high_mono_count = virt_get_next_high_mono_count;
605 efi.reset_system = virt_reset_system;
609 * Walk the EFI memory map looking for the I/O port range. There can only be one entry of
610 * this type, other I/O port ranges should be described via ACPI.
613 efi_get_iobase (void)
615 void *efi_map_start, *efi_map_end, *p;
616 efi_memory_desc_t *md;
619 efi_map_start = __va(ia64_boot_param->efi_memmap);
620 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
621 efi_desc_size = ia64_boot_param->efi_memdesc_size;
623 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
625 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
626 if (md->attribute & EFI_MEMORY_UC)
627 return md->phys_addr;
633 static struct kern_memdesc *
634 kern_memory_descriptor (unsigned long phys_addr)
636 struct kern_memdesc *md;
638 for (md = kern_memmap; md->start != ~0UL; md++) {
639 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
645 static efi_memory_desc_t *
646 efi_memory_descriptor (unsigned long phys_addr)
648 void *efi_map_start, *efi_map_end, *p;
649 efi_memory_desc_t *md;
652 efi_map_start = __va(ia64_boot_param->efi_memmap);
653 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
654 efi_desc_size = ia64_boot_param->efi_memdesc_size;
656 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
659 if (phys_addr - md->phys_addr < efi_md_size(md))
666 efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
668 void *efi_map_start, *efi_map_end, *p;
669 efi_memory_desc_t *md;
673 efi_map_start = __va(ia64_boot_param->efi_memmap);
674 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
675 efi_desc_size = ia64_boot_param->efi_memdesc_size;
677 end = phys_addr + size;
679 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
682 if (md->phys_addr < end && efi_md_end(md) > phys_addr)
689 efi_mem_type (unsigned long phys_addr)
691 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
699 efi_mem_attributes (unsigned long phys_addr)
701 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
704 return md->attribute;
707 EXPORT_SYMBOL(efi_mem_attributes);
710 efi_mem_attribute (unsigned long phys_addr, unsigned long size)
712 unsigned long end = phys_addr + size;
713 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
720 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
721 * the kernel that firmware needs this region mapped.
723 attr = md->attribute & ~EFI_MEMORY_RUNTIME;
725 unsigned long md_end = efi_md_end(md);
730 md = efi_memory_descriptor(md_end);
731 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
738 kern_mem_attribute (unsigned long phys_addr, unsigned long size)
740 unsigned long end = phys_addr + size;
741 struct kern_memdesc *md;
745 * This is a hack for ioremap calls before we set up kern_memmap.
746 * Maybe we should do efi_memmap_init() earlier instead.
749 attr = efi_mem_attribute(phys_addr, size);
750 if (attr & EFI_MEMORY_WB)
751 return EFI_MEMORY_WB;
755 md = kern_memory_descriptor(phys_addr);
759 attr = md->attribute;
761 unsigned long md_end = kmd_end(md);
766 md = kern_memory_descriptor(md_end);
767 if (!md || md->attribute != attr)
772 EXPORT_SYMBOL(kern_mem_attribute);
775 valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
780 * /dev/mem reads and writes use copy_to_user(), which implicitly
781 * uses a granule-sized kernel identity mapping. It's really
782 * only safe to do this for regions in kern_memmap. For more
783 * details, see Documentation/ia64/aliasing.txt.
785 attr = kern_mem_attribute(phys_addr, size);
786 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
792 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
794 unsigned long phys_addr = pfn << PAGE_SHIFT;
797 attr = efi_mem_attribute(phys_addr, size);
800 * /dev/mem mmap uses normal user pages, so we don't need the entire
801 * granule, but the entire region we're mapping must support the same
804 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
808 * Intel firmware doesn't tell us about all the MMIO regions, so
809 * in general we have to allow mmap requests. But if EFI *does*
810 * tell us about anything inside this region, we should deny it.
811 * The user can always map a smaller region to avoid the overlap.
813 if (efi_memmap_intersects(phys_addr, size))
820 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
823 unsigned long phys_addr = pfn << PAGE_SHIFT;
827 * For /dev/mem mmap, we use user mappings, but if the region is
828 * in kern_memmap (and hence may be covered by a kernel mapping),
829 * we must use the same attribute as the kernel mapping.
831 attr = kern_mem_attribute(phys_addr, size);
832 if (attr & EFI_MEMORY_WB)
833 return pgprot_cacheable(vma_prot);
834 else if (attr & EFI_MEMORY_UC)
835 return pgprot_noncached(vma_prot);
838 * Some chipsets don't support UC access to memory. If
839 * WB is supported, we prefer that.
841 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
842 return pgprot_cacheable(vma_prot);
844 return pgprot_noncached(vma_prot);
848 efi_uart_console_only(void)
851 char *s, name[] = "ConOut";
852 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
853 efi_char16_t *utf16, name_utf16[32];
854 unsigned char data[1024];
855 unsigned long size = sizeof(data);
856 struct efi_generic_dev_path *hdr, *end_addr;
859 /* Convert to UTF-16 */
863 *utf16++ = *s++ & 0x7f;
866 status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
867 if (status != EFI_SUCCESS) {
868 printk(KERN_ERR "No EFI %s variable?\n", name);
872 hdr = (struct efi_generic_dev_path *) data;
873 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
874 while (hdr < end_addr) {
875 if (hdr->type == EFI_DEV_MSG &&
876 hdr->sub_type == EFI_DEV_MSG_UART)
878 else if (hdr->type == EFI_DEV_END_PATH ||
879 hdr->type == EFI_DEV_END_PATH2) {
882 if (hdr->sub_type == EFI_DEV_END_ENTIRE)
886 hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
888 printk(KERN_ERR "Malformed %s value\n", name);
893 * Look for the first granule aligned memory descriptor memory
894 * that is big enough to hold EFI memory map. Make sure this
895 * descriptor is atleast granule sized so it does not get trimmed
897 struct kern_memdesc *
898 find_memmap_space (void)
900 u64 contig_low=0, contig_high=0;
902 void *efi_map_start, *efi_map_end, *p, *q;
903 efi_memory_desc_t *md, *pmd = NULL, *check_md;
904 u64 space_needed, efi_desc_size;
905 unsigned long total_mem = 0;
907 efi_map_start = __va(ia64_boot_param->efi_memmap);
908 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
909 efi_desc_size = ia64_boot_param->efi_memdesc_size;
912 * Worst case: we need 3 kernel descriptors for each efi descriptor
913 * (if every entry has a WB part in the middle, and UC head and tail),
914 * plus one for the end marker.
916 space_needed = sizeof(kern_memdesc_t) *
917 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
919 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
924 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
925 contig_low = GRANULEROUNDUP(md->phys_addr);
926 contig_high = efi_md_end(md);
927 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
929 if (!efi_wb(check_md))
931 if (contig_high != check_md->phys_addr)
933 contig_high = efi_md_end(check_md);
935 contig_high = GRANULEROUNDDOWN(contig_high);
937 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
940 /* Round ends inward to granule boundaries */
941 as = max(contig_low, md->phys_addr);
942 ae = min(contig_high, efi_md_end(md));
944 /* keep within max_addr= and min_addr= command line arg */
945 as = max(as, min_addr);
946 ae = min(ae, max_addr);
950 /* avoid going over mem= command line arg */
951 if (total_mem + (ae - as) > mem_limit)
952 ae -= total_mem + (ae - as) - mem_limit;
957 if (ae - as > space_needed)
960 if (p >= efi_map_end)
961 panic("Can't allocate space for kernel memory descriptors");
967 * Walk the EFI memory map and gather all memory available for kernel
968 * to use. We can allocate partial granules only if the unavailable
969 * parts exist, and are WB.
972 efi_memmap_init(unsigned long *s, unsigned long *e)
974 struct kern_memdesc *k, *prev = NULL;
975 u64 contig_low=0, contig_high=0;
977 void *efi_map_start, *efi_map_end, *p, *q;
978 efi_memory_desc_t *md, *pmd = NULL, *check_md;
980 unsigned long total_mem = 0;
982 k = kern_memmap = find_memmap_space();
984 efi_map_start = __va(ia64_boot_param->efi_memmap);
985 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
986 efi_desc_size = ia64_boot_param->efi_memdesc_size;
988 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
991 if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
992 md->type == EFI_BOOT_SERVICES_DATA)) {
993 k->attribute = EFI_MEMORY_UC;
994 k->start = md->phys_addr;
995 k->num_pages = md->num_pages;
1000 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
1001 contig_low = GRANULEROUNDUP(md->phys_addr);
1002 contig_high = efi_md_end(md);
1003 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
1005 if (!efi_wb(check_md))
1007 if (contig_high != check_md->phys_addr)
1009 contig_high = efi_md_end(check_md);
1011 contig_high = GRANULEROUNDDOWN(contig_high);
1013 if (!is_memory_available(md))
1016 #ifdef CONFIG_CRASH_DUMP
1017 /* saved_max_pfn should ignore max_addr= command line arg */
1018 if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
1019 saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
1022 * Round ends inward to granule boundaries
1023 * Give trimmings to uncached allocator
1025 if (md->phys_addr < contig_low) {
1026 lim = min(efi_md_end(md), contig_low);
1028 if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
1029 kmd_end(k-1) == md->phys_addr) {
1030 (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1032 k->attribute = EFI_MEMORY_UC;
1033 k->start = md->phys_addr;
1034 k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1042 if (efi_md_end(md) > contig_high) {
1043 lim = max(md->phys_addr, contig_high);
1045 if (lim == md->phys_addr && k > kern_memmap &&
1046 (k-1)->attribute == EFI_MEMORY_UC &&
1047 kmd_end(k-1) == md->phys_addr) {
1048 (k-1)->num_pages += md->num_pages;
1050 k->attribute = EFI_MEMORY_UC;
1052 k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
1058 ae = efi_md_end(md);
1060 /* keep within max_addr= and min_addr= command line arg */
1061 as = max(as, min_addr);
1062 ae = min(ae, max_addr);
1066 /* avoid going over mem= command line arg */
1067 if (total_mem + (ae - as) > mem_limit)
1068 ae -= total_mem + (ae - as) - mem_limit;
1072 if (prev && kmd_end(prev) == md->phys_addr) {
1073 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1074 total_mem += ae - as;
1077 k->attribute = EFI_MEMORY_WB;
1079 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1080 total_mem += ae - as;
1083 k->start = ~0L; /* end-marker */
1085 /* reserve the memory we are using for kern_memmap */
1086 *s = (u64)kern_memmap;
1093 efi_initialize_iomem_resources(struct resource *code_resource,
1094 struct resource *data_resource,
1095 struct resource *bss_resource)
1097 struct resource *res;
1098 void *efi_map_start, *efi_map_end, *p;
1099 efi_memory_desc_t *md;
1102 unsigned long flags;
1104 efi_map_start = __va(ia64_boot_param->efi_memmap);
1105 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1106 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1110 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1113 if (md->num_pages == 0) /* should not happen */
1116 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1119 case EFI_MEMORY_MAPPED_IO:
1120 case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1123 case EFI_LOADER_CODE:
1124 case EFI_LOADER_DATA:
1125 case EFI_BOOT_SERVICES_DATA:
1126 case EFI_BOOT_SERVICES_CODE:
1127 case EFI_CONVENTIONAL_MEMORY:
1128 if (md->attribute & EFI_MEMORY_WP) {
1129 name = "System ROM";
1130 flags |= IORESOURCE_READONLY;
1132 name = "System RAM";
1136 case EFI_ACPI_MEMORY_NVS:
1137 name = "ACPI Non-volatile Storage";
1140 case EFI_UNUSABLE_MEMORY:
1142 flags |= IORESOURCE_DISABLED;
1145 case EFI_RESERVED_TYPE:
1146 case EFI_RUNTIME_SERVICES_CODE:
1147 case EFI_RUNTIME_SERVICES_DATA:
1148 case EFI_ACPI_RECLAIM_MEMORY:
1154 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
1155 printk(KERN_ERR "failed to alocate resource for iomem\n");
1160 res->start = md->phys_addr;
1161 res->end = md->phys_addr + efi_md_size(md) - 1;
1164 if (insert_resource(&iomem_resource, res) < 0)
1168 * We don't know which region contains
1169 * kernel data so we try it repeatedly and
1170 * let the resource manager test it.
1172 insert_resource(res, code_resource);
1173 insert_resource(res, data_resource);
1174 insert_resource(res, bss_resource);
1176 insert_resource(res, &efi_memmap_res);
1177 insert_resource(res, &boot_param_res);
1178 if (crashk_res.end > crashk_res.start)
1179 insert_resource(res, &crashk_res);
1186 /* find a block of memory aligned to 64M exclude reserved regions
1187 rsvd_regions are sorted
1189 unsigned long __init
1190 kdump_find_rsvd_region (unsigned long size,
1191 struct rsvd_region *r, int n)
1195 u64 alignment = 1UL << _PAGE_SIZE_64M;
1196 void *efi_map_start, *efi_map_end, *p;
1197 efi_memory_desc_t *md;
1200 efi_map_start = __va(ia64_boot_param->efi_memmap);
1201 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1202 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1204 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1208 start = ALIGN(md->phys_addr, alignment);
1209 end = efi_md_end(md);
1210 for (i = 0; i < n; i++) {
1211 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1212 if (__pa(r[i].start) > start + size)
1214 start = ALIGN(__pa(r[i].end), alignment);
1215 if (i < n-1 && __pa(r[i+1].start) < start + size)
1221 if (end > start + size)
1225 printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
1231 #ifdef CONFIG_PROC_VMCORE
1232 /* locate the size find a the descriptor at a certain address */
1233 unsigned long __init
1234 vmcore_find_descriptor_size (unsigned long address)
1236 void *efi_map_start, *efi_map_end, *p;
1237 efi_memory_desc_t *md;
1239 unsigned long ret = 0;
1241 efi_map_start = __va(ia64_boot_param->efi_memmap);
1242 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1243 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1245 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1247 if (efi_wb(md) && md->type == EFI_LOADER_DATA
1248 && md->phys_addr == address) {
1249 ret = efi_md_size(md);
1255 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");