sparc/iommu: use __sbus_iommu_map_page to implement the map_sg path
[linux-2.6-block.git] / arch / sparc / mm / iommu.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * iommu.c: IOMMU specific routines for memory management.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
ce65d36f 16#include <linux/dma-mapping.h>
9dc69230
DM
17#include <linux/of.h>
18#include <linux/of_device.h>
1da177e4 19
1da177e4
LT
20#include <asm/pgalloc.h>
21#include <asm/pgtable.h>
1da177e4
LT
22#include <asm/io.h>
23#include <asm/mxcc.h>
24#include <asm/mbus.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/bitext.h>
28#include <asm/iommu.h>
29#include <asm/dma.h>
30
e8c29c83
SR
31#include "mm_32.h"
32
1da177e4
LT
33/*
34 * This can be sized dynamically, but we will do this
35 * only when we have a guidance about actual I/O pressures.
36 */
37#define IOMMU_RNGE IOMMU_RNGE_256MB
38#define IOMMU_START 0xF0000000
39#define IOMMU_WINSIZE (256*1024*1024U)
9a0ac1b6 40#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
1da177e4
LT
41#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
42
1da177e4
LT
43static int viking_flush;
44/* viking.S */
45extern void viking_flush_page(unsigned long page);
46extern void viking_mxcc_flush_page(unsigned long page);
47
48/*
49 * Values precomputed according to CPU type.
50 */
51static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
52static pgprot_t dvma_prot; /* Consistent mapping pte flags */
53
54#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56
cd4cd730 57static void __init sbus_iommu_init(struct platform_device *op)
1da177e4 58{
1da177e4 59 struct iommu_struct *iommu;
e0039348 60 unsigned int impl, vers;
1da177e4 61 unsigned long *bitmap;
f977ea49
SR
62 unsigned long control;
63 unsigned long base;
e0039348
DM
64 unsigned long tmp;
65
71cd03b0 66 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
1da177e4
LT
67 if (!iommu) {
68 prom_printf("Unable to allocate iommu structure\n");
69 prom_halt();
70 }
e0039348 71
046e26a8 72 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
e0039348 73 "iommu_regs");
1da177e4
LT
74 if (!iommu->regs) {
75 prom_printf("Cannot map IOMMU registers\n");
76 prom_halt();
77 }
f977ea49
SR
78
79 control = sbus_readl(&iommu->regs->control);
80 impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 vers = (control & IOMMU_CTRL_VERS) >> 24;
82 control &= ~(IOMMU_CTRL_RNGE);
83 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 sbus_writel(control, &iommu->regs->control);
85
1da177e4
LT
86 iommu_invalidate(iommu->regs);
87 iommu->start = IOMMU_START;
88 iommu->end = 0xffffffff;
89
90 /* Allocate IOMMU page table */
91 /* Stupid alignment constraints give me a headache.
92 We need 256K or 512K or 1M or 2M area aligned to
93 its size and current gfp will fortunately give
94 it to us. */
95 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 if (!tmp) {
5da444aa
AM
97 prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 IOMMU_NPTES * sizeof(iopte_t));
1da177e4
LT
99 prom_halt();
100 }
101 iommu->page_table = (iopte_t *)tmp;
102
103 /* Initialize new table. */
104 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 flush_cache_all();
106 flush_tlb_all();
f977ea49
SR
107
108 base = __pa((unsigned long)iommu->page_table) >> 4;
109 sbus_writel(base, &iommu->regs->base);
1da177e4
LT
110 iommu_invalidate(iommu->regs);
111
112 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 if (!bitmap) {
114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 (int)(IOMMU_NPTES>>3));
116 prom_halt();
117 }
118 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 /* To be coherent on HyperSparc, the page color of DVMA
120 * and physical addresses must match.
121 */
122 if (srmmu_modtype == HyperSparc)
123 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 else
125 iommu->usemap.num_colors = 1;
126
046e26a8
DM
127 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 impl, vers, iommu->page_table,
129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
1da177e4 130
e0039348 131 op->dev.archdata.iommu = iommu;
1da177e4
LT
132}
133
046e26a8
DM
134static int __init iommu_init(void)
135{
136 struct device_node *dp;
137
138 for_each_node_by_name(dp, "iommu") {
cd4cd730 139 struct platform_device *op = of_find_device_by_node(dp);
046e26a8
DM
140
141 sbus_iommu_init(op);
142 of_propagate_archdata(op);
143 }
144
145 return 0;
146}
147
148subsys_initcall(iommu_init);
149
1da177e4
LT
150/* Flush the iotlb entries to ram. */
151/* This could be better if we didn't have to flush whole pages. */
152static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153{
154 unsigned long start;
155 unsigned long end;
156
3185d4d2 157 start = (unsigned long)iopte;
1da177e4 158 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
3185d4d2 159 start &= PAGE_MASK;
1da177e4
LT
160 if (viking_mxcc_present) {
161 while(start < end) {
162 viking_mxcc_flush_page(start);
163 start += PAGE_SIZE;
164 }
165 } else if (viking_flush) {
166 while(start < end) {
167 viking_flush_page(start);
168 start += PAGE_SIZE;
169 }
170 } else {
171 while(start < end) {
172 __flush_page_to_ram(start);
173 start += PAGE_SIZE;
174 }
175 }
176}
177
b8205942 178static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages)
1da177e4 179{
260489fa 180 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4
LT
181 int ioptex;
182 iopte_t *iopte, *iopte0;
183 unsigned int busa, busa0;
b8205942 184 unsigned long pfn = __phys_to_pfn(paddr);
1da177e4
LT
185 int i;
186
187 /* page color = pfn of page */
b8205942 188 ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
1da177e4
LT
189 if (ioptex < 0)
190 panic("iommu out");
191 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
192 iopte0 = &iommu->page_table[ioptex];
193
194 busa = busa0;
195 iopte = iopte0;
196 for (i = 0; i < npages; i++) {
b8205942 197 iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
1da177e4
LT
198 iommu_invalidate_page(iommu->regs, busa);
199 busa += PAGE_SIZE;
200 iopte++;
b8205942 201 pfn++;
1da177e4
LT
202 }
203
204 iommu_flush_iotlb(iopte0, npages);
205
206 return busa0;
207}
208
ce65d36f 209static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
8668b38c 210 unsigned long offset, size_t len, bool per_page_flush)
1da177e4 211{
7e996890
CH
212 phys_addr_t paddr = page_to_phys(page) + offset;
213 unsigned long off = paddr & ~PAGE_MASK;
ce65d36f 214 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8668b38c 215
ce65d36f
CH
216 /* XXX So what is maxphys for us and how do drivers know it? */
217 if (!len || len > 256 * 1024)
218 return DMA_MAPPING_ERROR;
8668b38c 219
edb1f072
CH
220 /*
221 * We expect unmapped highmem pages to be not in the cache.
222 * XXX Is this a good assumption?
223 * XXX What if someone else unmaps it here and races us?
224 */
7e996890
CH
225 if (per_page_flush && !PageHighMem(page)) {
226 unsigned long vaddr, p;
8668b38c 227
7e996890
CH
228 vaddr = (unsigned long)page_address(page) + offset;
229 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
8668b38c 230 flush_page_for_dma(p);
8668b38c
CH
231 }
232
7e996890 233 return iommu_get_one(dev, paddr, npages) + off;
1da177e4
LT
234}
235
ce65d36f
CH
236static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
237 struct page *page, unsigned long offset, size_t len,
238 enum dma_data_direction dir, unsigned long attrs)
1da177e4
LT
239{
240 flush_page_for_dma(0);
8668b38c 241 return __sbus_iommu_map_page(dev, page, offset, len, false);
1da177e4
LT
242}
243
ce65d36f
CH
244static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
245 struct page *page, unsigned long offset, size_t len,
246 enum dma_data_direction dir, unsigned long attrs)
1da177e4 247{
8668b38c 248 return __sbus_iommu_map_page(dev, page, offset, len, true);
1da177e4
LT
249}
250
ff5cbec0
CH
251static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
252 int nents, enum dma_data_direction dir, unsigned long attrs,
253 bool per_page_flush)
1da177e4 254{
6c503d0d 255 struct scatterlist *sg;
edb1f072 256 int j;
1da177e4 257
6c503d0d 258 for_each_sg(sgl, sg, nents, j) {
edb1f072
CH
259 sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
260 sg->offset, sg->length, per_page_flush);
261 if (sg->dma_address == DMA_MAPPING_ERROR)
262 return 0;
aa83a26a 263 sg->dma_length = sg->length;
1da177e4 264 }
ce65d36f 265
6c503d0d 266 return nents;
1da177e4
LT
267}
268
ff5cbec0
CH
269static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
270 int nents, enum dma_data_direction dir, unsigned long attrs)
271{
272 flush_page_for_dma(0);
273 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
274}
275
276static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
277 int nents, enum dma_data_direction dir, unsigned long attrs)
278{
279 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
280}
281
f25b23bc
CH
282static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
283 size_t len, enum dma_data_direction dir, unsigned long attrs)
1da177e4 284{
260489fa 285 struct iommu_struct *iommu = dev->archdata.iommu;
f25b23bc
CH
286 unsigned int busa = dma_addr & PAGE_MASK;
287 unsigned long off = dma_addr & ~PAGE_MASK;
288 unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
289 unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
290 unsigned int i;
1da177e4 291
1ae61388 292 BUG_ON(busa < iommu->start);
1da177e4
LT
293 for (i = 0; i < npages; i++) {
294 iopte_val(iommu->page_table[ioptex + i]) = 0;
295 iommu_invalidate_page(iommu->regs, busa);
296 busa += PAGE_SIZE;
297 }
298 bit_map_clear(&iommu->usemap, ioptex, npages);
299}
300
6c503d0d
CH
301static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
302 int nents, enum dma_data_direction dir, unsigned long attrs)
1da177e4 303{
6c503d0d 304 struct scatterlist *sg;
a7fce1f7 305 int i;
1da177e4 306
6c503d0d 307 for_each_sg(sgl, sg, nents, i) {
a7fce1f7
CH
308 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
309 attrs);
aa83a26a 310 sg->dma_address = 0x21212121;
1da177e4
LT
311 }
312}
313
314#ifdef CONFIG_SBUS
ce65d36f
CH
315static void *sbus_iommu_alloc(struct device *dev, size_t len,
316 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
1da177e4 317{
4b1c5df2 318 struct iommu_struct *iommu = dev->archdata.iommu;
ce65d36f 319 unsigned long va, addr, page, end, ret;
1da177e4
LT
320 iopte_t *iopte = iommu->page_table;
321 iopte_t *first;
322 int ioptex;
323
ce65d36f
CH
324 /* XXX So what is maxphys for us and how do drivers know it? */
325 if (!len || len > 256 * 1024)
326 return NULL;
327
328 len = PAGE_ALIGN(len);
518a2f19 329 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
ce65d36f
CH
330 if (va == 0)
331 return NULL;
332
333 addr = ret = sparc_dma_alloc_resource(dev, len);
334 if (!addr)
335 goto out_free_pages;
336
1ae61388
ES
337 BUG_ON((va & ~PAGE_MASK) != 0);
338 BUG_ON((addr & ~PAGE_MASK) != 0);
339 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
340
341 /* page color = physical address */
342 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
343 addr >> PAGE_SHIFT);
344 if (ioptex < 0)
345 panic("iommu out");
346
347 iopte += ioptex;
348 first = iopte;
349 end = addr + len;
350 while(addr < end) {
351 page = va;
352 {
353 pgd_t *pgdp;
354 pmd_t *pmdp;
355 pte_t *ptep;
356
357 if (viking_mxcc_present)
358 viking_mxcc_flush_page(page);
359 else if (viking_flush)
360 viking_flush_page(page);
361 else
362 __flush_page_to_ram(page);
363
364 pgdp = pgd_offset(&init_mm, addr);
365 pmdp = pmd_offset(pgdp, addr);
366 ptep = pte_offset_map(pmdp, addr);
367
368 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
369 }
370 iopte_val(*iopte++) =
371 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
372 addr += PAGE_SIZE;
373 va += PAGE_SIZE;
374 }
375 /* P3: why do we need this?
376 *
377 * DAVEM: Because there are several aspects, none of which
378 * are handled by a single interface. Some cpus are
379 * completely not I/O DMA coherent, and some have
380 * virtually indexed caches. The driver DMA flushing
381 * methods handle the former case, but here during
382 * IOMMU page table modifications, and usage of non-cacheable
383 * cpu mappings of pages potentially in the cpu caches, we have
384 * to handle the latter case as well.
385 */
386 flush_cache_all();
387 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
388 flush_tlb_all();
389 iommu_invalidate(iommu->regs);
390
ce65d36f
CH
391 *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
392 return (void *)ret;
393
394out_free_pages:
395 free_pages(va, get_order(len));
396 return NULL;
1da177e4
LT
397}
398
ce65d36f
CH
399static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
400 dma_addr_t busa, unsigned long attrs)
1da177e4 401{
4b1c5df2 402 struct iommu_struct *iommu = dev->archdata.iommu;
1da177e4 403 iopte_t *iopte = iommu->page_table;
ce65d36f 404 struct page *page = virt_to_page(cpu_addr);
1da177e4 405 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
ce65d36f
CH
406 unsigned long end;
407
408 if (!sparc_dma_free_resource(cpu_addr, len))
409 return;
1da177e4 410
1ae61388
ES
411 BUG_ON((busa & ~PAGE_MASK) != 0);
412 BUG_ON((len & ~PAGE_MASK) != 0);
1da177e4
LT
413
414 iopte += ioptex;
415 end = busa + len;
416 while (busa < end) {
417 iopte_val(*iopte++) = 0;
418 busa += PAGE_SIZE;
419 }
420 flush_tlb_all();
421 iommu_invalidate(iommu->regs);
422 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
ce65d36f
CH
423
424 __free_pages(page, get_order(len));
1da177e4 425}
1da177e4
LT
426#endif
427
ce65d36f 428static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
d894d964 429#ifdef CONFIG_SBUS
ce65d36f
CH
430 .alloc = sbus_iommu_alloc,
431 .free = sbus_iommu_free,
d894d964 432#endif
ce65d36f
CH
433 .map_page = sbus_iommu_map_page_gflush,
434 .unmap_page = sbus_iommu_unmap_page,
435 .map_sg = sbus_iommu_map_sg_gflush,
436 .unmap_sg = sbus_iommu_unmap_sg,
d894d964
DM
437};
438
ce65d36f 439static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
d894d964 440#ifdef CONFIG_SBUS
ce65d36f
CH
441 .alloc = sbus_iommu_alloc,
442 .free = sbus_iommu_free,
d894d964 443#endif
ce65d36f
CH
444 .map_page = sbus_iommu_map_page_pflush,
445 .unmap_page = sbus_iommu_unmap_page,
446 .map_sg = sbus_iommu_map_sg_pflush,
447 .unmap_sg = sbus_iommu_unmap_sg,
d894d964
DM
448};
449
1da177e4
LT
450void __init ld_mmu_iommu(void)
451{
5d83d666 452 if (flush_page_for_dma_global) {
1da177e4 453 /* flush_page_for_dma flushes everything, no matter of what page is it */
ce65d36f 454 dma_ops = &sbus_iommu_dma_gflush_ops;
1da177e4 455 } else {
ce65d36f 456 dma_ops = &sbus_iommu_dma_pflush_ops;
1da177e4 457 }
1da177e4
LT
458
459 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
460 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
461 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
462 } else {
463 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
464 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
465 }
466}