sparc/iommu: use sbus_iommu_unmap_page in sbus_iommu_unmap_sg
[linux-2.6-block.git] / arch / sparc / mm / iommu.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * iommu.c: IOMMU specific routines for memory management.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
ce65d36f 16#include <linux/dma-mapping.h>
9dc69230
DM
17#include <linux/of.h>
18#include <linux/of_device.h>
1da177e4 19
1da177e4
LT
20#include <asm/pgalloc.h>
21#include <asm/pgtable.h>
1da177e4
LT
22#include <asm/io.h>
23#include <asm/mxcc.h>
24#include <asm/mbus.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/bitext.h>
28#include <asm/iommu.h>
29#include <asm/dma.h>
30
e8c29c83
SR
31#include "mm_32.h"
32
1da177e4
LT
33/*
34 * This can be sized dynamically, but we will do this
35 * only when we have a guidance about actual I/O pressures.
36 */
37#define IOMMU_RNGE IOMMU_RNGE_256MB
38#define IOMMU_START 0xF0000000
39#define IOMMU_WINSIZE (256*1024*1024U)
9a0ac1b6 40#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
1da177e4
LT
41#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
42
1da177e4
LT
43static int viking_flush;
44/* viking.S */
45extern void viking_flush_page(unsigned long page);
46extern void viking_mxcc_flush_page(unsigned long page);
47
48/*
49 * Values precomputed according to CPU type.
50 */
51static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
52static pgprot_t dvma_prot; /* Consistent mapping pte flags */
53
54#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56
cd4cd730 57static void __init sbus_iommu_init(struct platform_device *op)
1da177e4 58{
1da177e4 59 struct iommu_struct *iommu;
e0039348 60 unsigned int impl, vers;
1da177e4 61 unsigned long *bitmap;
f977ea49
SR
62 unsigned long control;
63 unsigned long base;
e0039348
DM
64 unsigned long tmp;
65
71cd03b0 66 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
1da177e4
LT
67 if (!iommu) {
68 prom_printf("Unable to allocate iommu structure\n");
69 prom_halt();
70 }
e0039348 71
046e26a8 72 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
e0039348 73 "iommu_regs");
1da177e4
LT
74 if (!iommu->regs) {
75 prom_printf("Cannot map IOMMU registers\n");
76 prom_halt();
77 }
f977ea49
SR
78
79 control = sbus_readl(&iommu->regs->control);
80 impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 vers = (control & IOMMU_CTRL_VERS) >> 24;
82 control &= ~(IOMMU_CTRL_RNGE);
83 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 sbus_writel(control, &iommu->regs->control);
85
1da177e4
LT
86 iommu_invalidate(iommu->regs);
87 iommu->start = IOMMU_START;
88 iommu->end = 0xffffffff;
89
90 /* Allocate IOMMU page table */
91 /* Stupid alignment constraints give me a headache.
92 We need 256K or 512K or 1M or 2M area aligned to
93 its size and current gfp will fortunately give
94 it to us. */
95 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 if (!tmp) {
5da444aa
AM
97 prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 IOMMU_NPTES * sizeof(iopte_t));
1da177e4
LT
99 prom_halt();
100 }
101 iommu->page_table = (iopte_t *)tmp;
102
103 /* Initialize new table. */
104 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 flush_cache_all();
106 flush_tlb_all();
f977ea49
SR
107
108 base = __pa((unsigned long)iommu->page_table) >> 4;
109 sbus_writel(base, &iommu->regs->base);
1da177e4
LT
110 iommu_invalidate(iommu->regs);
111
112 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 if (!bitmap) {
114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 (int)(IOMMU_NPTES>>3));
116 prom_halt();
117 }
118 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 /* To be coherent on HyperSparc, the page color of DVMA
120 * and physical addresses must match.
121 */
122 if (srmmu_modtype == HyperSparc)
123 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 else
125 iommu->usemap.num_colors = 1;
126
046e26a8
DM
127 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 impl, vers, iommu->page_table,
129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
1da177e4 130
e0039348 131 op->dev.archdata.iommu = iommu;
1da177e4
LT
132}
133
046e26a8
DM
134static int __init iommu_init(void)
135{
136 struct device_node *dp;
137
138 for_each_node_by_name(dp, "iommu") {
cd4cd730 139 struct platform_device *op = of_find_device_by_node(dp);
046e26a8
DM
140
141 sbus_iommu_init(op);
142 of_propagate_archdata(op);
143 }
144
145 return 0;
146}
147
148subsys_initcall(iommu_init);
149
1da177e4
LT
150/* Flush the iotlb entries to ram. */
151/* This could be better if we didn't have to flush whole pages. */
152static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153{
154 unsigned long start;
155 unsigned long end;
156
3185d4d2 157 start = (unsigned long)iopte;
1da177e4 158 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
3185d4d2 159 start &= PAGE_MASK;
1da177e4
LT
160 if (viking_mxcc_present) {
161 while(start < end) {
162 viking_mxcc_flush_page(start);
163 start += PAGE_SIZE;
164 }
165 } else if (viking_flush) {
166 while(start < end) {
167 viking_flush_page(start);
168 start += PAGE_SIZE;
169 }
170 } else {
171 while(start < end) {
172 __flush_page_to_ram(start);
173 start += PAGE_SIZE;
174 }
175 }
176}
177
260489fa 178static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
1da177e4 179{
260489fa 180 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
181 int ioptex;
182 iopte_t *iopte, *iopte0;
183 unsigned int busa, busa0;
184 int i;
185
186 /* page color = pfn of page */
187 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
188 if (ioptex < 0)
189 panic("iommu out");
190 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
191 iopte0 = &iommu->page_table[ioptex];
192
193 busa = busa0;
194 iopte = iopte0;
195 for (i = 0; i < npages; i++) {
196 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
197 iommu_invalidate_page(iommu->regs, busa);
198 busa += PAGE_SIZE;
199 iopte++;
200 page++;
201 }
202
203 iommu_flush_iotlb(iopte0, npages);
204
205 return busa0;
206}
207
ce65d36f
CH
208static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
209 unsigned long offset, size_t len)
1da177e4 210{
ce65d36f
CH
211 void *vaddr = page_address(page) + offset;
212 unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
213 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
214
215 /* XXX So what is maxphys for us and how do drivers know it? */
216 if (!len || len > 256 * 1024)
217 return DMA_MAPPING_ERROR;
218 return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
1da177e4
LT
219}
220
ce65d36f
CH
221static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
222 struct page *page, unsigned long offset, size_t len,
223 enum dma_data_direction dir, unsigned long attrs)
1da177e4
LT
224{
225 flush_page_for_dma(0);
ce65d36f 226 return __sbus_iommu_map_page(dev, page, offset, len);
1da177e4
LT
227}
228
ce65d36f
CH
229static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
230 struct page *page, unsigned long offset, size_t len,
231 enum dma_data_direction dir, unsigned long attrs)
1da177e4 232{
ce65d36f
CH
233 void *vaddr = page_address(page) + offset;
234 unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
1da177e4 235
ce65d36f
CH
236 while (p < (unsigned long)vaddr + len) {
237 flush_page_for_dma(p);
238 p += PAGE_SIZE;
1da177e4 239 }
ce65d36f
CH
240
241 return __sbus_iommu_map_page(dev, page, offset, len);
1da177e4
LT
242}
243
6c503d0d
CH
244static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
245 int nents, enum dma_data_direction dir, unsigned long attrs)
1da177e4 246{
6c503d0d
CH
247 struct scatterlist *sg;
248 int i, n;
1da177e4
LT
249
250 flush_page_for_dma(0);
6c503d0d
CH
251
252 for_each_sg(sgl, sg, nents, i) {
1da177e4 253 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
aa83a26a
RR
254 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
255 sg->dma_length = sg->length;
1da177e4 256 }
ce65d36f 257
6c503d0d 258 return nents;
1da177e4
LT
259}
260
6c503d0d
CH
261static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
262 int nents, enum dma_data_direction dir, unsigned long attrs)
1da177e4
LT
263{
264 unsigned long page, oldpage = 0;
6c503d0d
CH
265 struct scatterlist *sg;
266 int i, j, n;
1da177e4 267
6c503d0d 268 for_each_sg(sgl, sg, nents, j) {
1da177e4
LT
269 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270
271 /*
272 * We expect unmapped highmem pages to be not in the cache.
273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us?
275 */
031abf0b
CH
276 if (!PageHighMem(sg_page(sg))) {
277 page = (unsigned long)page_address(sg_page(sg));
1da177e4
LT
278 for (i = 0; i < n; i++) {
279 if (page != oldpage) { /* Already flushed? */
280 flush_page_for_dma(page);
281 oldpage = page;
282 }
283 page += PAGE_SIZE;
284 }
285 }
286
aa83a26a
RR
287 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
288 sg->dma_length = sg->length;
1da177e4 289 }
ce65d36f 290
6c503d0d 291 return nents;
1da177e4
LT
292}
293
260489fa 294static void iommu_release_one(struct device *dev, u32 busa, int npages)
1da177e4 295{
260489fa 296 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
297 int ioptex;
298 int i;
299
1ae61388 300 BUG_ON(busa < iommu->start);
1da177e4
LT
301 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
302 for (i = 0; i < npages; i++) {
303 iopte_val(iommu->page_table[ioptex + i]) = 0;
304 iommu_invalidate_page(iommu->regs, busa);
305 busa += PAGE_SIZE;
306 }
307 bit_map_clear(&iommu->usemap, ioptex, npages);
308}
309
ce65d36f
CH
310static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
311 size_t len, enum dma_data_direction dir, unsigned long attrs)
1da177e4 312{
ce65d36f 313 unsigned long off = dma_addr & ~PAGE_MASK;
1da177e4
LT
314 int npages;
315
1da177e4 316 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
ce65d36f 317 iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
1da177e4
LT
318}
319
6c503d0d
CH
320static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
321 int nents, enum dma_data_direction dir, unsigned long attrs)
1da177e4 322{
6c503d0d 323 struct scatterlist *sg;
a7fce1f7 324 int i;
1da177e4 325
6c503d0d 326 for_each_sg(sgl, sg, nents, i) {
a7fce1f7
CH
327 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
328 attrs);
aa83a26a 329 sg->dma_address = 0x21212121;
1da177e4
LT
330 }
331}
332
333#ifdef CONFIG_SBUS
ce65d36f
CH
334static void *sbus_iommu_alloc(struct device *dev, size_t len,
335 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
1da177e4 336{
4b1c5df2 337 struct iommu_struct *iommu = dev->archdata.iommu;
ce65d36f 338 unsigned long va, addr, page, end, ret;
1da177e4
LT
339 iopte_t *iopte = iommu->page_table;
340 iopte_t *first;
341 int ioptex;
342
ce65d36f
CH
343 /* XXX So what is maxphys for us and how do drivers know it? */
344 if (!len || len > 256 * 1024)
345 return NULL;
346
347 len = PAGE_ALIGN(len);
518a2f19 348 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
ce65d36f
CH
349 if (va == 0)
350 return NULL;
351
352 addr = ret = sparc_dma_alloc_resource(dev, len);
353 if (!addr)
354 goto out_free_pages;
355
1ae61388
ES
356 BUG_ON((va & ~PAGE_MASK) != 0);
357 BUG_ON((addr & ~PAGE_MASK) != 0);
358 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
359
360 /* page color = physical address */
361 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
362 addr >> PAGE_SHIFT);
363 if (ioptex < 0)
364 panic("iommu out");
365
366 iopte += ioptex;
367 first = iopte;
368 end = addr + len;
369 while(addr < end) {
370 page = va;
371 {
372 pgd_t *pgdp;
373 pmd_t *pmdp;
374 pte_t *ptep;
375
376 if (viking_mxcc_present)
377 viking_mxcc_flush_page(page);
378 else if (viking_flush)
379 viking_flush_page(page);
380 else
381 __flush_page_to_ram(page);
382
383 pgdp = pgd_offset(&init_mm, addr);
384 pmdp = pmd_offset(pgdp, addr);
385 ptep = pte_offset_map(pmdp, addr);
386
387 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
388 }
389 iopte_val(*iopte++) =
390 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
391 addr += PAGE_SIZE;
392 va += PAGE_SIZE;
393 }
394 /* P3: why do we need this?
395 *
396 * DAVEM: Because there are several aspects, none of which
397 * are handled by a single interface. Some cpus are
398 * completely not I/O DMA coherent, and some have
399 * virtually indexed caches. The driver DMA flushing
400 * methods handle the former case, but here during
401 * IOMMU page table modifications, and usage of non-cacheable
402 * cpu mappings of pages potentially in the cpu caches, we have
403 * to handle the latter case as well.
404 */
405 flush_cache_all();
406 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
407 flush_tlb_all();
408 iommu_invalidate(iommu->regs);
409
ce65d36f
CH
410 *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
411 return (void *)ret;
412
413out_free_pages:
414 free_pages(va, get_order(len));
415 return NULL;
1da177e4
LT
416}
417
ce65d36f
CH
418static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
419 dma_addr_t busa, unsigned long attrs)
1da177e4 420{
4b1c5df2 421 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4 422 iopte_t *iopte = iommu->page_table;
ce65d36f 423 struct page *page = virt_to_page(cpu_addr);
1da177e4 424 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
ce65d36f
CH
425 unsigned long end;
426
427 if (!sparc_dma_free_resource(cpu_addr, len))
428 return;
1da177e4 429
1ae61388
ES
430 BUG_ON((busa & ~PAGE_MASK) != 0);
431 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
432
433 iopte += ioptex;
434 end = busa + len;
435 while (busa < end) {
436 iopte_val(*iopte++) = 0;
437 busa += PAGE_SIZE;
438 }
439 flush_tlb_all();
440 iommu_invalidate(iommu->regs);
441 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
ce65d36f
CH
442
443 __free_pages(page, get_order(len));
1da177e4 444}
1da177e4
LT
445#endif
446
ce65d36f 447static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
d894d964 448#ifdef CONFIG_SBUS
ce65d36f
CH
449 .alloc = sbus_iommu_alloc,
450 .free = sbus_iommu_free,
d894d964 451#endif
ce65d36f
CH
452 .map_page = sbus_iommu_map_page_gflush,
453 .unmap_page = sbus_iommu_unmap_page,
454 .map_sg = sbus_iommu_map_sg_gflush,
455 .unmap_sg = sbus_iommu_unmap_sg,
d894d964
DM
456};
457
ce65d36f 458static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
d894d964 459#ifdef CONFIG_SBUS
ce65d36f
CH
460 .alloc = sbus_iommu_alloc,
461 .free = sbus_iommu_free,
d894d964 462#endif
ce65d36f
CH
463 .map_page = sbus_iommu_map_page_pflush,
464 .unmap_page = sbus_iommu_unmap_page,
465 .map_sg = sbus_iommu_map_sg_pflush,
466 .unmap_sg = sbus_iommu_unmap_sg,
d894d964
DM
467};
468
1da177e4
LT
469void __init ld_mmu_iommu(void)
470{
5d83d666 471 if (flush_page_for_dma_global) {
1da177e4 472 /* flush_page_for_dma flushes everything, no matter of what page is it */
ce65d36f 473 dma_ops = &sbus_iommu_dma_gflush_ops;
1da177e4 474 } else {
ce65d36f 475 dma_ops = &sbus_iommu_dma_pflush_ops;
1da177e4 476 }
1da177e4
LT
477
478 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
479 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
480 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
481 } else {
482 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
483 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
484 }
485}