License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / metag / mm / ioremap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
373cd784
JH
2/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * Needed for memory-mapped I/O devices mapped outside our normal DRAM
5 * window (that is, all memory-mapped I/O devices).
6 *
7 * Copyright (C) 1995,1996 Linus Torvalds
8 *
9 * Meta port based on CRIS-port by Axis Communications AB
10 */
11
12#include <linux/vmalloc.h>
13#include <linux/io.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17
18#include <asm/pgtable.h>
19
20/*
21 * Remap an arbitrary physical address space into the kernel virtual
22 * address space. Needed when the kernel wants to access high addresses
23 * directly.
24 *
25 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
26 * have to convert them into an offset in a page-aligned mapping, but the
27 * caller shouldn't need to know that small detail.
28 */
29void __iomem *__ioremap(unsigned long phys_addr, size_t size,
30 unsigned long flags)
31{
32 unsigned long addr;
33 struct vm_struct *area;
34 unsigned long offset, last_addr;
35 pgprot_t prot;
36
37 /* Don't allow wraparound or zero size */
38 last_addr = phys_addr + size - 1;
39 if (!size || last_addr < phys_addr)
40 return NULL;
41
42 /* Custom region addresses are accessible and uncached by default. */
43 if (phys_addr >= LINSYSCUSTOM_BASE &&
44 phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
45 return (__force void __iomem *) phys_addr;
46
47 /*
48 * Mappings have to be page-aligned
49 */
50 offset = phys_addr & ~PAGE_MASK;
51 phys_addr &= PAGE_MASK;
52 size = PAGE_ALIGN(last_addr+1) - phys_addr;
53 prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
54 _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
55 flags);
56
57 /*
58 * Ok, go for it..
59 */
60 area = get_vm_area(size, VM_IOREMAP);
61 if (!area)
62 return NULL;
63 area->phys_addr = phys_addr;
64 addr = (unsigned long) area->addr;
65 if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
66 vunmap((void *) addr);
67 return NULL;
68 }
69 return (__force void __iomem *) (offset + (char *)addr);
70}
71EXPORT_SYMBOL(__ioremap);
72
73void __iounmap(void __iomem *addr)
74{
75 struct vm_struct *p;
76
77 if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
78 (__force unsigned long)addr < (LINSYSCUSTOM_BASE +
79 LINSYSCUSTOM_LIMIT))
80 return;
81
82 p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
83 if (unlikely(!p)) {
84 pr_err("iounmap: bad address %p\n", addr);
85 return;
86 }
87
88 kfree(p);
89}
90EXPORT_SYMBOL(__iounmap);