1 // SPDX-License-Identifier: GPL-2.0
3 * iommu.c: IOMMU specific routines for memory management.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
18 #include <linux/of_device.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
34 * This can be sized dynamically, but we will do this
35 * only when we have a guidance about actual I/O pressures.
37 #define IOMMU_RNGE IOMMU_RNGE_256MB
38 #define IOMMU_START 0xF0000000
39 #define IOMMU_WINSIZE (256*1024*1024U)
40 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
41 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
43 static int viking_flush;
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
49 * Values precomputed according to CPU type.
51 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
54 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
57 static void __init sbus_iommu_init(struct platform_device *op)
59 struct iommu_struct *iommu;
60 unsigned int impl, vers;
61 unsigned long *bitmap;
62 unsigned long control;
66 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
68 prom_printf("Unable to allocate iommu structure\n");
72 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
75 prom_printf("Cannot map IOMMU registers\n");
79 control = sbus_readl(&iommu->regs->control);
80 impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 vers = (control & IOMMU_CTRL_VERS) >> 24;
82 control &= ~(IOMMU_CTRL_RNGE);
83 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 sbus_writel(control, &iommu->regs->control);
86 iommu_invalidate(iommu->regs);
87 iommu->start = IOMMU_START;
88 iommu->end = 0xffffffff;
90 /* Allocate IOMMU page table */
91 /* Stupid alignment constraints give me a headache.
92 We need 256K or 512K or 1M or 2M area aligned to
93 its size and current gfp will fortunately give
95 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
97 prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 IOMMU_NPTES * sizeof(iopte_t));
101 iommu->page_table = (iopte_t *)tmp;
103 /* Initialize new table. */
104 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
108 base = __pa((unsigned long)iommu->page_table) >> 4;
109 sbus_writel(base, &iommu->regs->base);
110 iommu_invalidate(iommu->regs);
112 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 (int)(IOMMU_NPTES>>3));
118 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 /* To be coherent on HyperSparc, the page color of DVMA
120 * and physical addresses must match.
122 if (srmmu_modtype == HyperSparc)
123 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
125 iommu->usemap.num_colors = 1;
127 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 impl, vers, iommu->page_table,
129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
131 op->dev.archdata.iommu = iommu;
134 static int __init iommu_init(void)
136 struct device_node *dp;
138 for_each_node_by_name(dp, "iommu") {
139 struct platform_device *op = of_find_device_by_node(dp);
142 of_propagate_archdata(op);
148 subsys_initcall(iommu_init);
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
157 start = (unsigned long)iopte;
158 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
160 if (viking_mxcc_present) {
162 viking_mxcc_flush_page(start);
165 } else if (viking_flush) {
167 viking_flush_page(start);
172 __flush_page_to_ram(start);
178 static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages)
180 struct iommu_struct *iommu = dev->archdata.iommu;
182 iopte_t *iopte, *iopte0;
183 unsigned int busa, busa0;
184 unsigned long pfn = __phys_to_pfn(paddr);
187 /* page color = pfn of page */
188 ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
191 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
192 iopte0 = &iommu->page_table[ioptex];
196 for (i = 0; i < npages; i++) {
197 iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
198 iommu_invalidate_page(iommu->regs, busa);
204 iommu_flush_iotlb(iopte0, npages);
209 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
210 unsigned long offset, size_t len, bool per_page_flush)
212 phys_addr_t paddr = page_to_phys(page) + offset;
213 unsigned long off = paddr & ~PAGE_MASK;
214 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
216 /* XXX So what is maxphys for us and how do drivers know it? */
217 if (!len || len > 256 * 1024)
218 return DMA_MAPPING_ERROR;
220 if (per_page_flush && !PageHighMem(page)) {
221 unsigned long vaddr, p;
223 vaddr = (unsigned long)page_address(page) + offset;
224 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
225 flush_page_for_dma(p);
228 return iommu_get_one(dev, paddr, npages) + off;
231 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
232 struct page *page, unsigned long offset, size_t len,
233 enum dma_data_direction dir, unsigned long attrs)
235 flush_page_for_dma(0);
236 return __sbus_iommu_map_page(dev, page, offset, len, false);
239 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
240 struct page *page, unsigned long offset, size_t len,
241 enum dma_data_direction dir, unsigned long attrs)
243 return __sbus_iommu_map_page(dev, page, offset, len, true);
246 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
247 int nents, enum dma_data_direction dir, unsigned long attrs,
250 unsigned long page, oldpage = 0;
251 struct scatterlist *sg;
254 for_each_sg(sgl, sg, nents, j) {
255 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
258 * We expect unmapped highmem pages to be not in the cache.
259 * XXX Is this a good assumption?
260 * XXX What if someone else unmaps it here and races us?
262 if (per_page_flush && !PageHighMem(sg_page(sg))) {
263 page = (unsigned long)page_address(sg_page(sg));
264 for (i = 0; i < n; i++) {
265 if (page != oldpage) { /* Already flushed? */
266 flush_page_for_dma(page);
273 sg->dma_address = iommu_get_one(dev, sg_phys(sg), n) + sg->offset;
274 sg->dma_length = sg->length;
280 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
281 int nents, enum dma_data_direction dir, unsigned long attrs)
283 flush_page_for_dma(0);
284 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
287 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
288 int nents, enum dma_data_direction dir, unsigned long attrs)
290 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
293 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
294 size_t len, enum dma_data_direction dir, unsigned long attrs)
296 struct iommu_struct *iommu = dev->archdata.iommu;
297 unsigned int busa = dma_addr & PAGE_MASK;
298 unsigned long off = dma_addr & ~PAGE_MASK;
299 unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
300 unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
303 BUG_ON(busa < iommu->start);
304 for (i = 0; i < npages; i++) {
305 iopte_val(iommu->page_table[ioptex + i]) = 0;
306 iommu_invalidate_page(iommu->regs, busa);
309 bit_map_clear(&iommu->usemap, ioptex, npages);
312 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
313 int nents, enum dma_data_direction dir, unsigned long attrs)
315 struct scatterlist *sg;
318 for_each_sg(sgl, sg, nents, i) {
319 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
321 sg->dma_address = 0x21212121;
326 static void *sbus_iommu_alloc(struct device *dev, size_t len,
327 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
329 struct iommu_struct *iommu = dev->archdata.iommu;
330 unsigned long va, addr, page, end, ret;
331 iopte_t *iopte = iommu->page_table;
335 /* XXX So what is maxphys for us and how do drivers know it? */
336 if (!len || len > 256 * 1024)
339 len = PAGE_ALIGN(len);
340 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
344 addr = ret = sparc_dma_alloc_resource(dev, len);
348 BUG_ON((va & ~PAGE_MASK) != 0);
349 BUG_ON((addr & ~PAGE_MASK) != 0);
350 BUG_ON((len & ~PAGE_MASK) != 0);
352 /* page color = physical address */
353 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
368 if (viking_mxcc_present)
369 viking_mxcc_flush_page(page);
370 else if (viking_flush)
371 viking_flush_page(page);
373 __flush_page_to_ram(page);
375 pgdp = pgd_offset(&init_mm, addr);
376 pmdp = pmd_offset(pgdp, addr);
377 ptep = pte_offset_map(pmdp, addr);
379 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
381 iopte_val(*iopte++) =
382 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
386 /* P3: why do we need this?
388 * DAVEM: Because there are several aspects, none of which
389 * are handled by a single interface. Some cpus are
390 * completely not I/O DMA coherent, and some have
391 * virtually indexed caches. The driver DMA flushing
392 * methods handle the former case, but here during
393 * IOMMU page table modifications, and usage of non-cacheable
394 * cpu mappings of pages potentially in the cpu caches, we have
395 * to handle the latter case as well.
398 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
400 iommu_invalidate(iommu->regs);
402 *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
406 free_pages(va, get_order(len));
410 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
411 dma_addr_t busa, unsigned long attrs)
413 struct iommu_struct *iommu = dev->archdata.iommu;
414 iopte_t *iopte = iommu->page_table;
415 struct page *page = virt_to_page(cpu_addr);
416 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
419 if (!sparc_dma_free_resource(cpu_addr, len))
422 BUG_ON((busa & ~PAGE_MASK) != 0);
423 BUG_ON((len & ~PAGE_MASK) != 0);
428 iopte_val(*iopte++) = 0;
432 iommu_invalidate(iommu->regs);
433 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
435 __free_pages(page, get_order(len));
439 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
441 .alloc = sbus_iommu_alloc,
442 .free = sbus_iommu_free,
444 .map_page = sbus_iommu_map_page_gflush,
445 .unmap_page = sbus_iommu_unmap_page,
446 .map_sg = sbus_iommu_map_sg_gflush,
447 .unmap_sg = sbus_iommu_unmap_sg,
450 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
452 .alloc = sbus_iommu_alloc,
453 .free = sbus_iommu_free,
455 .map_page = sbus_iommu_map_page_pflush,
456 .unmap_page = sbus_iommu_unmap_page,
457 .map_sg = sbus_iommu_map_sg_pflush,
458 .unmap_sg = sbus_iommu_unmap_sg,
461 void __init ld_mmu_iommu(void)
463 if (flush_page_for_dma_global) {
464 /* flush_page_for_dma flushes everything, no matter of what page is it */
465 dma_ops = &sbus_iommu_dma_gflush_ops;
467 dma_ops = &sbus_iommu_dma_pflush_ops;
470 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
471 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
472 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
474 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
475 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;