[POWERPC] unmap_vm_area becomes unmap_kernel_range for the public
[linux-2.6-block.git] / arch / powerpc / mm / pgtable_64.c
CommitLineData
14cf11af
PM
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
14cf11af
PM
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/init.h>
37#include <linux/delay.h>
38#include <linux/bootmem.h>
39#include <linux/highmem.h>
40#include <linux/idr.h>
41#include <linux/nodemask.h>
42#include <linux/module.h>
43
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
14cf11af
PM
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
5e203d68 66#include <asm/firmware.h>
800fc3ee
DG
67
68#include "mmu_decl.h"
14cf11af 69
14cf11af
PM
70unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
14cf11af
PM
73/*
74 * map_io_page currently only called by __ioremap
75 * map_io_page adds an entry to the ioremap page table
76 * and adds an entry to the HPT, possibly bolting it
77 */
78static int map_io_page(unsigned long ea, unsigned long pa, int flags)
79{
80 pgd_t *pgdp;
81 pud_t *pudp;
82 pmd_t *pmdp;
83 pte_t *ptep;
14cf11af
PM
84
85 if (mem_init_done) {
14cf11af
PM
86 pgdp = pgd_offset_k(ea);
87 pudp = pud_alloc(&init_mm, pgdp, ea);
88 if (!pudp)
89 return -ENOMEM;
90 pmdp = pmd_alloc(&init_mm, pudp, ea);
91 if (!pmdp)
92 return -ENOMEM;
23fd0775 93 ptep = pte_alloc_kernel(pmdp, ea);
14cf11af
PM
94 if (!ptep)
95 return -ENOMEM;
96 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
97 __pgprot(flags)));
14cf11af 98 } else {
14cf11af
PM
99 /*
100 * If the mm subsystem is not fully up, we cannot create a
101 * linux page table entry for this mapping. Simply bolt an
102 * entry in the hardware page table.
3c726f8d 103 *
14cf11af 104 */
3c726f8d 105 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
a3252544 106 mmu_io_psize)) {
77ac166f
BH
107 printk(KERN_ERR "Failed to do bolted mapping IO "
108 "memory at %016lx !\n", pa);
109 return -ENOMEM;
110 }
14cf11af
PM
111 }
112 return 0;
113}
114
115
68a64357 116static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
14cf11af
PM
117 unsigned long ea, unsigned long size,
118 unsigned long flags)
119{
120 unsigned long i;
121
122 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL);
124
125 for (i = 0; i < size; i += PAGE_SIZE)
126 if (map_io_page(ea+i, pa+i, flags))
127 return NULL;
128
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
130}
131
68a64357 132void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
14cf11af
PM
133 unsigned long flags)
134{
135 unsigned long pa, ea;
136 void __iomem *ret;
137
138 /*
139 * Choose an address to map it to.
140 * Once the imalloc system is running, we use it.
141 * Before that, we map using addresses going
142 * up from ioremap_bot. imalloc will use
143 * the addresses from ioremap_bot through
144 * IMALLOC_END
145 *
146 */
147 pa = addr & PAGE_MASK;
148 size = PAGE_ALIGN(addr + size) - pa;
149
d177c207 150 if ((size == 0) || (pa == 0))
14cf11af
PM
151 return NULL;
152
153 if (mem_init_done) {
154 struct vm_struct *area;
155 area = im_get_free_area(size);
156 if (area == NULL)
157 return NULL;
158 ea = (unsigned long)(area->addr);
159 ret = __ioremap_com(addr, pa, ea, size, flags);
160 if (!ret)
161 im_free(area->addr);
162 } else {
163 ea = ioremap_bot;
164 ret = __ioremap_com(addr, pa, ea, size, flags);
165 if (ret)
166 ioremap_bot += size;
167 }
168 return ret;
169}
170
4cb3cee0 171
68a64357 172void __iomem * ioremap(phys_addr_t addr, unsigned long size)
4cb3cee0
BH
173{
174 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
175
176 if (ppc_md.ioremap)
177 return ppc_md.ioremap(addr, size, flags);
178 return __ioremap(addr, size, flags);
179}
180
68a64357 181void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
4cb3cee0
BH
182 unsigned long flags)
183{
184 if (ppc_md.ioremap)
185 return ppc_md.ioremap(addr, size, flags);
186 return __ioremap(addr, size, flags);
187}
188
189
14cf11af
PM
190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
191
68a64357 192int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
14cf11af
PM
193 unsigned long size, unsigned long flags)
194{
195 struct vm_struct *area;
196 void __iomem *ret;
197
198 /* For now, require page-aligned values for pa, ea, and size */
199 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
200 !IS_PAGE_ALIGNED(size)) {
201 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
202 return 1;
203 }
204
205 if (!mem_init_done) {
206 /* Two things to consider in this case:
207 * 1) No records will be kept (imalloc, etc) that the region
208 * has been remapped
209 * 2) It won't be easy to iounmap() the region later (because
210 * of 1)
211 */
212 ;
213 } else {
214 area = im_get_area(ea, size,
215 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
216 if (area == NULL) {
217 /* Expected when PHB-dlpar is in play */
218 return 1;
219 }
220 if (ea != (unsigned long) area->addr) {
221 printk(KERN_ERR "unexpected addr return from "
222 "im_get_area\n");
223 return 1;
224 }
225 }
226
227 ret = __ioremap_com(pa, pa, ea, size, flags);
228 if (ret == NULL) {
229 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
230 return 1;
231 }
232 if (ret != (void *) ea) {
233 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
234 return 1;
235 }
236
237 return 0;
238}
239
240/*
241 * Unmap an IO region and remove it from imalloc'd list.
242 * Access to IO memory should be serialized by driver.
14cf11af
PM
243 *
244 * XXX what about calls before mem_init_done (ie python_countermeasures())
245 */
68a64357 246void __iounmap(volatile void __iomem *token)
14cf11af
PM
247{
248 void *addr;
249
250 if (!mem_init_done)
251 return;
252
253 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
254
255 im_free(addr);
256}
257
68a64357 258void iounmap(volatile void __iomem *token)
4cb3cee0
BH
259{
260 if (ppc_md.iounmap)
261 ppc_md.iounmap(token);
262 else
263 __iounmap(token);
264}
265
14cf11af
PM
266static int iounmap_subset_regions(unsigned long addr, unsigned long size)
267{
268 struct vm_struct *area;
269
270 /* Check whether subsets of this region exist */
271 area = im_get_area(addr, size, IM_REGION_SUPERSET);
272 if (area == NULL)
273 return 1;
274
275 while (area) {
276 iounmap((void __iomem *) area->addr);
277 area = im_get_area(addr, size,
278 IM_REGION_SUPERSET);
279 }
280
281 return 0;
282}
283
68a64357 284int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
14cf11af
PM
285{
286 struct vm_struct *area;
287 unsigned long addr;
288 int rc;
289
290 addr = (unsigned long __force) start & PAGE_MASK;
291
292 /* Verify that the region either exists or is a subset of an existing
293 * region. In the latter case, split the parent region to create
294 * the exact region
295 */
296 area = im_get_area(addr, size,
297 IM_REGION_EXISTS | IM_REGION_SUBSET);
298 if (area == NULL) {
299 /* Determine whether subset regions exist. If so, unmap */
300 rc = iounmap_subset_regions(addr, size);
301 if (rc) {
302 printk(KERN_ERR
303 "%s() cannot unmap nonexistent range 0x%lx\n",
304 __FUNCTION__, addr);
305 return 1;
306 }
307 } else {
308 iounmap((void __iomem *) area->addr);
309 }
310 /*
311 * FIXME! This can't be right:
312 iounmap(area->addr);
313 * Maybe it should be "iounmap(area);"
314 */
315 return 0;
316}
317
14cf11af 318EXPORT_SYMBOL(ioremap);
4cb3cee0 319EXPORT_SYMBOL(ioremap_flags);
14cf11af
PM
320EXPORT_SYMBOL(__ioremap);
321EXPORT_SYMBOL(iounmap);
4cb3cee0 322EXPORT_SYMBOL(__iounmap);
ab1f9dac 323
017e3c53
BH
324static DEFINE_SPINLOCK(phb_io_lock);
325
ab1f9dac
PM
326void __iomem * reserve_phb_iospace(unsigned long size)
327{
328 void __iomem *virt_addr;
329
330 if (phbs_io_bot >= IMALLOC_BASE)
331 panic("reserve_phb_iospace(): phb io space overflow\n");
332
017e3c53 333 spin_lock(&phb_io_lock);
ab1f9dac
PM
334 virt_addr = (void __iomem *) phbs_io_bot;
335 phbs_io_bot += size;
017e3c53 336 spin_unlock(&phb_io_lock);
ab1f9dac
PM
337
338 return virt_addr;
339}