1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/efi.h>
20 #include <asm/set_memory.h>
21 #include <asm/e820/api.h>
22 #include <asm/fixmap.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/pgalloc.h>
27 #include <asm/setup.h>
32 * Descriptor controlling ioremap() behavior.
39 * Fix up the linear direct mapping of the kernel to avoid cache attribute
42 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
43 enum page_cache_mode pcm)
45 unsigned long nrpages = size >> PAGE_SHIFT;
49 case _PAGE_CACHE_MODE_UC:
51 err = _set_memory_uc(vaddr, nrpages);
53 case _PAGE_CACHE_MODE_WC:
54 err = _set_memory_wc(vaddr, nrpages);
56 case _PAGE_CACHE_MODE_WT:
57 err = _set_memory_wt(vaddr, nrpages);
59 case _PAGE_CACHE_MODE_WB:
60 err = _set_memory_wb(vaddr, nrpages);
67 /* Does the range (or a subset of) contain normal RAM? */
68 static unsigned int __ioremap_check_ram(struct resource *res)
70 unsigned long start_pfn, stop_pfn;
73 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
76 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
77 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
78 if (stop_pfn > start_pfn) {
79 for (i = 0; i < (stop_pfn - start_pfn); ++i)
80 if (pfn_valid(start_pfn + i) &&
81 !PageReserved(pfn_to_page(start_pfn + i)))
82 return IORES_MAP_SYSTEM_RAM;
89 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
90 * there the whole memory is already encrypted.
92 static unsigned int __ioremap_check_encrypted(struct resource *res)
99 case IORES_DESC_RESERVED:
102 return IORES_MAP_ENCRYPTED;
108 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
110 struct ioremap_desc *desc = arg;
112 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
113 desc->flags |= __ioremap_check_ram(res);
115 if (!(desc->flags & IORES_MAP_ENCRYPTED))
116 desc->flags |= __ioremap_check_encrypted(res);
118 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
119 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
123 * To avoid multiple resource walks, this function walks resources marked as
124 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
125 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
127 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
128 struct ioremap_desc *desc)
133 end = start + size - 1;
134 memset(desc, 0, sizeof(struct ioremap_desc));
136 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
140 * Remap an arbitrary physical address space into the kernel virtual
141 * address space. It transparently creates kernel huge I/O mapping when
142 * the physical address is aligned by a huge page size (1GB or 2MB) and
143 * the requested size is at least the huge page size.
145 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
146 * Therefore, the mapping code falls back to use a smaller page toward 4KB
147 * when a mapping range is covered by non-WB type of MTRRs.
149 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
150 * have to convert them into an offset in a page-aligned mapping, but the
151 * caller shouldn't need to know that small detail.
153 static void __iomem *
154 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
155 enum page_cache_mode pcm, void *caller, bool encrypted)
157 unsigned long offset, vaddr;
158 resource_size_t last_addr;
159 const resource_size_t unaligned_phys_addr = phys_addr;
160 const unsigned long unaligned_size = size;
161 struct ioremap_desc io_desc;
162 struct vm_struct *area;
163 enum page_cache_mode new_pcm;
166 void __iomem *ret_addr;
168 /* Don't allow wraparound or zero size */
169 last_addr = phys_addr + size - 1;
170 if (!size || last_addr < phys_addr)
173 if (!phys_addr_valid(phys_addr)) {
174 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
175 (unsigned long long)phys_addr);
180 __ioremap_check_mem(phys_addr, size, &io_desc);
183 * Don't allow anybody to remap normal RAM that we're using..
185 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
186 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
187 &phys_addr, &last_addr);
192 * Mappings have to be page-aligned
194 offset = phys_addr & ~PAGE_MASK;
195 phys_addr &= PHYSICAL_PAGE_MASK;
196 size = PAGE_ALIGN(last_addr+1) - phys_addr;
198 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
201 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
205 if (pcm != new_pcm) {
206 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
208 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
209 (unsigned long long)phys_addr,
210 (unsigned long long)(phys_addr + size),
212 goto err_free_memtype;
218 * If the page being mapped is in memory and SEV is active then
219 * make sure the memory encryption attribute is enabled in the
222 prot = PAGE_KERNEL_IO;
223 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
224 prot = pgprot_encrypted(prot);
227 case _PAGE_CACHE_MODE_UC:
229 prot = __pgprot(pgprot_val(prot) |
230 cachemode2protval(_PAGE_CACHE_MODE_UC));
232 case _PAGE_CACHE_MODE_UC_MINUS:
233 prot = __pgprot(pgprot_val(prot) |
234 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
236 case _PAGE_CACHE_MODE_WC:
237 prot = __pgprot(pgprot_val(prot) |
238 cachemode2protval(_PAGE_CACHE_MODE_WC));
240 case _PAGE_CACHE_MODE_WT:
241 prot = __pgprot(pgprot_val(prot) |
242 cachemode2protval(_PAGE_CACHE_MODE_WT));
244 case _PAGE_CACHE_MODE_WB:
251 area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 goto err_free_memtype;
254 area->phys_addr = phys_addr;
255 vaddr = (unsigned long) area->addr;
257 if (kernel_map_sync_memtype(phys_addr, size, pcm))
260 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
263 ret_addr = (void __iomem *) (vaddr + offset);
264 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
267 * Check if the request spans more than any BAR in the iomem resource
270 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
271 pr_warn("caller %pS mapping multiple BARs\n", caller);
277 free_memtype(phys_addr, phys_addr + size);
282 * ioremap_nocache - map bus memory into CPU space
283 * @phys_addr: bus address of the memory
284 * @size: size of the resource to map
286 * ioremap_nocache performs a platform specific sequence of operations to
287 * make bus memory CPU accessible via the readb/readw/readl/writeb/
288 * writew/writel functions and the other mmio helpers. The returned
289 * address is not guaranteed to be usable directly as a virtual
292 * This version of ioremap ensures that the memory is marked uncachable
293 * on the CPU as well as honouring existing caching rules from things like
294 * the PCI bus. Note that there are other caches and buffers on many
295 * busses. In particular driver authors should read up on PCI writes
297 * It's useful if some control registers are in such an area and
298 * write combining or read caching is not desirable:
300 * Must be freed with iounmap.
302 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
305 * Ideally, this should be:
306 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
308 * Till we fix all X drivers to use ioremap_wc(), we will use
309 * UC MINUS. Drivers that are certain they need or can already
310 * be converted over to strong UC can use ioremap_uc().
312 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
314 return __ioremap_caller(phys_addr, size, pcm,
315 __builtin_return_address(0), false);
317 EXPORT_SYMBOL(ioremap_nocache);
320 * ioremap_uc - map bus memory into CPU space as strongly uncachable
321 * @phys_addr: bus address of the memory
322 * @size: size of the resource to map
324 * ioremap_uc performs a platform specific sequence of operations to
325 * make bus memory CPU accessible via the readb/readw/readl/writeb/
326 * writew/writel functions and the other mmio helpers. The returned
327 * address is not guaranteed to be usable directly as a virtual
330 * This version of ioremap ensures that the memory is marked with a strong
331 * preference as completely uncachable on the CPU when possible. For non-PAT
332 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
333 * systems this will set the PAT entry for the pages as strong UC. This call
334 * will honor existing caching rules from things like the PCI bus. Note that
335 * there are other caches and buffers on many busses. In particular driver
336 * authors should read up on PCI writes.
338 * It's useful if some control registers are in such an area and
339 * write combining or read caching is not desirable:
341 * Must be freed with iounmap.
343 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
345 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
347 return __ioremap_caller(phys_addr, size, pcm,
348 __builtin_return_address(0), false);
350 EXPORT_SYMBOL_GPL(ioremap_uc);
353 * ioremap_wc - map memory into CPU space write combined
354 * @phys_addr: bus address of the memory
355 * @size: size of the resource to map
357 * This version of ioremap ensures that the memory is marked write combining.
358 * Write combining allows faster writes to some hardware devices.
360 * Must be freed with iounmap.
362 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
364 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
365 __builtin_return_address(0), false);
367 EXPORT_SYMBOL(ioremap_wc);
370 * ioremap_wt - map memory into CPU space write through
371 * @phys_addr: bus address of the memory
372 * @size: size of the resource to map
374 * This version of ioremap ensures that the memory is marked write through.
375 * Write through stores data into memory while keeping the cache up-to-date.
377 * Must be freed with iounmap.
379 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
381 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
382 __builtin_return_address(0), false);
384 EXPORT_SYMBOL(ioremap_wt);
386 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
388 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
389 __builtin_return_address(0), true);
391 EXPORT_SYMBOL(ioremap_encrypted);
393 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
395 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
396 __builtin_return_address(0), false);
398 EXPORT_SYMBOL(ioremap_cache);
400 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
401 unsigned long prot_val)
403 return __ioremap_caller(phys_addr, size,
404 pgprot2cachemode(__pgprot(prot_val)),
405 __builtin_return_address(0), false);
407 EXPORT_SYMBOL(ioremap_prot);
410 * iounmap - Free a IO remapping
411 * @addr: virtual address from ioremap_*
413 * Caller must ensure there is only one unmapping for the same pointer.
415 void iounmap(volatile void __iomem *addr)
417 struct vm_struct *p, *o;
419 if ((void __force *)addr <= high_memory)
423 * The PCI/ISA range special-casing was removed from __ioremap()
424 * so this check, in theory, can be removed. However, there are
425 * cases where iounmap() is called for addresses not obtained via
426 * ioremap() (vga16fb for example). Add a warning so that these
427 * cases can be caught and fixed.
429 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
430 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
431 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
435 mmiotrace_iounmap(addr);
437 addr = (volatile void __iomem *)
438 (PAGE_MASK & (unsigned long __force)addr);
440 /* Use the vm area unlocked, assuming the caller
441 ensures there isn't another iounmap for the same address
442 in parallel. Reuse of the virtual address is prevented by
443 leaving it in the global lists until we're done with it.
444 cpa takes care of the direct mappings. */
445 p = find_vm_area((void __force *)addr);
448 printk(KERN_ERR "iounmap: bad address %p\n", addr);
453 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
455 /* Finally remove it */
456 o = remove_vm_area((void __force *)addr);
457 BUG_ON(p != o || o == NULL);
460 EXPORT_SYMBOL(iounmap);
462 int __init arch_ioremap_p4d_supported(void)
467 int __init arch_ioremap_pud_supported(void)
470 return boot_cpu_has(X86_FEATURE_GBPAGES);
476 int __init arch_ioremap_pmd_supported(void)
478 return boot_cpu_has(X86_FEATURE_PSE);
482 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
485 void *xlate_dev_mem_ptr(phys_addr_t phys)
487 unsigned long start = phys & PAGE_MASK;
488 unsigned long offset = phys & ~PAGE_MASK;
491 /* memremap() maps if RAM, otherwise falls back to ioremap() */
492 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
494 /* Only add the offset on success and return NULL if memremap() failed */
501 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
503 memunmap((void *)((unsigned long)addr & PAGE_MASK));
507 * Examine the physical address to determine if it is an area of memory
508 * that should be mapped decrypted. If the memory is not part of the
509 * kernel usable area it was accessed and created decrypted, so these
510 * areas should be mapped decrypted. And since the encryption key can
511 * change across reboots, persistent memory should also be mapped
514 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
515 * only persistent memory should be mapped decrypted.
517 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
523 * Check if the address is part of a persistent memory region.
524 * This check covers areas added by E820, EFI and ACPI.
526 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
527 IORES_DESC_PERSISTENT_MEMORY);
528 if (is_pmem != REGION_DISJOINT)
532 * Check if the non-volatile attribute is set for an EFI
535 if (efi_enabled(EFI_BOOT)) {
536 switch (efi_mem_type(phys_addr)) {
537 case EFI_RESERVED_TYPE:
538 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
546 /* Check if the address is outside kernel usable area */
547 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
548 case E820_TYPE_RESERVED:
551 case E820_TYPE_UNUSABLE:
552 /* For SEV, these areas are encrypted */
567 * Examine the physical address to determine if it is EFI data. Check
568 * it against the boot params structure and EFI tables and memory types.
570 static bool memremap_is_efi_data(resource_size_t phys_addr,
575 /* Check if the address is part of EFI boot/runtime data */
576 if (!efi_enabled(EFI_BOOT))
579 paddr = boot_params.efi_info.efi_memmap_hi;
581 paddr |= boot_params.efi_info.efi_memmap;
582 if (phys_addr == paddr)
585 paddr = boot_params.efi_info.efi_systab_hi;
587 paddr |= boot_params.efi_info.efi_systab;
588 if (phys_addr == paddr)
591 if (efi_is_table_address(phys_addr))
594 switch (efi_mem_type(phys_addr)) {
595 case EFI_BOOT_SERVICES_DATA:
596 case EFI_RUNTIME_SERVICES_DATA:
606 * Examine the physical address to determine if it is boot data by checking
607 * it against the boot params setup_data chain.
609 static bool memremap_is_setup_data(resource_size_t phys_addr,
612 struct setup_data *data;
613 u64 paddr, paddr_next;
615 paddr = boot_params.hdr.setup_data;
619 if (phys_addr == paddr)
622 data = memremap(paddr, sizeof(*data),
623 MEMREMAP_WB | MEMREMAP_DEC);
625 paddr_next = data->next;
630 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
640 * Examine the physical address to determine if it is boot data by checking
641 * it against the boot params setup_data chain (early boot version).
643 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
646 struct setup_data *data;
647 u64 paddr, paddr_next;
649 paddr = boot_params.hdr.setup_data;
653 if (phys_addr == paddr)
656 data = early_memremap_decrypted(paddr, sizeof(*data));
658 paddr_next = data->next;
661 early_memunmap(data, sizeof(*data));
663 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
673 * Architecture function to determine if RAM remap is allowed. By default, a
674 * RAM remap will map the data as encrypted. Determine if a RAM remap should
675 * not be done so that the data will be mapped decrypted.
677 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
680 if (!mem_encrypt_active())
683 if (flags & MEMREMAP_ENC)
686 if (flags & MEMREMAP_DEC)
690 if (memremap_is_setup_data(phys_addr, size) ||
691 memremap_is_efi_data(phys_addr, size))
695 return !memremap_should_map_decrypted(phys_addr, size);
699 * Architecture override of __weak function to adjust the protection attributes
700 * used when remapping memory. By default, early_memremap() will map the data
701 * as encrypted. Determine if an encrypted mapping should not be done and set
702 * the appropriate protection attributes.
704 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
710 if (!mem_encrypt_active())
713 encrypted_prot = true;
716 if (early_memremap_is_setup_data(phys_addr, size) ||
717 memremap_is_efi_data(phys_addr, size))
718 encrypted_prot = false;
721 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
722 encrypted_prot = false;
724 return encrypted_prot ? pgprot_encrypted(prot)
725 : pgprot_decrypted(prot);
728 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
730 return arch_memremap_can_ram_remap(phys_addr, size, 0);
733 #ifdef CONFIG_AMD_MEM_ENCRYPT
734 /* Remap memory with encryption */
735 void __init *early_memremap_encrypted(resource_size_t phys_addr,
738 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
742 * Remap memory with encryption and write-protected - cannot be called
743 * before pat_init() is called
745 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
748 /* Be sure the write-protect PAT entry is set for write-protect */
749 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
752 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
755 /* Remap memory without encryption */
756 void __init *early_memremap_decrypted(resource_size_t phys_addr,
759 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
763 * Remap memory without encryption and write-protected - cannot be called
764 * before pat_init() is called
766 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
769 /* Be sure the write-protect PAT entry is set for write-protect */
770 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
773 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
775 #endif /* CONFIG_AMD_MEM_ENCRYPT */
777 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
779 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
781 /* Don't assume we're using swapper_pg_dir at this point */
782 pgd_t *base = __va(read_cr3_pa());
783 pgd_t *pgd = &base[pgd_index(addr)];
784 p4d_t *p4d = p4d_offset(pgd, addr);
785 pud_t *pud = pud_offset(p4d, addr);
786 pmd_t *pmd = pmd_offset(pud, addr);
791 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
793 return &bm_pte[pte_index(addr)];
796 bool __init is_early_ioremap_ptep(pte_t *ptep)
798 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
801 void __init early_ioremap_init(void)
806 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
808 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
811 early_ioremap_setup();
813 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
814 memset(bm_pte, 0, sizeof(bm_pte));
815 pmd_populate_kernel(&init_mm, pmd, bm_pte);
818 * The boot-ioremap range spans multiple pmds, for which
819 * we are not prepared:
821 #define __FIXADDR_TOP (-PAGE_SIZE)
822 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
823 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
825 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
827 printk(KERN_WARNING "pmd %p != %p\n",
828 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
829 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
830 fix_to_virt(FIX_BTMAP_BEGIN));
831 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
832 fix_to_virt(FIX_BTMAP_END));
834 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
835 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
840 void __init __early_set_fixmap(enum fixed_addresses idx,
841 phys_addr_t phys, pgprot_t flags)
843 unsigned long addr = __fix_to_virt(idx);
846 if (idx >= __end_of_fixed_addresses) {
850 pte = early_ioremap_pte(addr);
852 /* Sanitize 'prot' against any unsupported bits: */
853 pgprot_val(flags) &= __supported_pte_mask;
855 if (pgprot_val(flags))
856 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
858 pte_clear(&init_mm, addr, pte);
859 __flush_tlb_one_kernel(addr);