1 // SPDX-License-Identifier: GPL-2.0
3 * iommu.c: IOMMU specific routines for memory management.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
18 #include <linux/of_device.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
34 * This can be sized dynamically, but we will do this
35 * only when we have a guidance about actual I/O pressures.
37 #define IOMMU_RNGE IOMMU_RNGE_256MB
38 #define IOMMU_START 0xF0000000
39 #define IOMMU_WINSIZE (256*1024*1024U)
40 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
41 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
43 static int viking_flush;
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
49 * Values precomputed according to CPU type.
51 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
54 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
57 static void __init sbus_iommu_init(struct platform_device *op)
59 struct iommu_struct *iommu;
60 unsigned int impl, vers;
61 unsigned long *bitmap;
62 unsigned long control;
66 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
68 prom_printf("Unable to allocate iommu structure\n");
72 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
75 prom_printf("Cannot map IOMMU registers\n");
79 control = sbus_readl(&iommu->regs->control);
80 impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 vers = (control & IOMMU_CTRL_VERS) >> 24;
82 control &= ~(IOMMU_CTRL_RNGE);
83 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 sbus_writel(control, &iommu->regs->control);
86 iommu_invalidate(iommu->regs);
87 iommu->start = IOMMU_START;
88 iommu->end = 0xffffffff;
90 /* Allocate IOMMU page table */
91 /* Stupid alignment constraints give me a headache.
92 We need 256K or 512K or 1M or 2M area aligned to
93 its size and current gfp will fortunately give
95 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
97 prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 IOMMU_NPTES * sizeof(iopte_t));
101 iommu->page_table = (iopte_t *)tmp;
103 /* Initialize new table. */
104 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
108 base = __pa((unsigned long)iommu->page_table) >> 4;
109 sbus_writel(base, &iommu->regs->base);
110 iommu_invalidate(iommu->regs);
112 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 (int)(IOMMU_NPTES>>3));
118 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 /* To be coherent on HyperSparc, the page color of DVMA
120 * and physical addresses must match.
122 if (srmmu_modtype == HyperSparc)
123 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
125 iommu->usemap.num_colors = 1;
127 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 impl, vers, iommu->page_table,
129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
131 op->dev.archdata.iommu = iommu;
134 static int __init iommu_init(void)
136 struct device_node *dp;
138 for_each_node_by_name(dp, "iommu") {
139 struct platform_device *op = of_find_device_by_node(dp);
142 of_propagate_archdata(op);
148 subsys_initcall(iommu_init);
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
157 start = (unsigned long)iopte;
158 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
160 if (viking_mxcc_present) {
162 viking_mxcc_flush_page(start);
165 } else if (viking_flush) {
167 viking_flush_page(start);
172 __flush_page_to_ram(start);
178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
180 struct iommu_struct *iommu = dev->archdata.iommu;
182 iopte_t *iopte, *iopte0;
183 unsigned int busa, busa0;
186 /* page color = pfn of page */
187 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
190 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
191 iopte0 = &iommu->page_table[ioptex];
195 for (i = 0; i < npages; i++) {
196 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
197 iommu_invalidate_page(iommu->regs, busa);
203 iommu_flush_iotlb(iopte0, npages);
208 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
209 unsigned long offset, size_t len)
211 void *vaddr = page_address(page) + offset;
212 unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
213 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
215 /* XXX So what is maxphys for us and how do drivers know it? */
216 if (!len || len > 256 * 1024)
217 return DMA_MAPPING_ERROR;
218 return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
221 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
222 struct page *page, unsigned long offset, size_t len,
223 enum dma_data_direction dir, unsigned long attrs)
225 flush_page_for_dma(0);
226 return __sbus_iommu_map_page(dev, page, offset, len);
229 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
230 struct page *page, unsigned long offset, size_t len,
231 enum dma_data_direction dir, unsigned long attrs)
233 void *vaddr = page_address(page) + offset;
234 unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
236 while (p < (unsigned long)vaddr + len) {
237 flush_page_for_dma(p);
241 return __sbus_iommu_map_page(dev, page, offset, len);
244 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
245 int nents, enum dma_data_direction dir, unsigned long attrs)
247 struct scatterlist *sg;
250 flush_page_for_dma(0);
252 for_each_sg(sgl, sg, nents, i) {
253 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
254 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
255 sg->dma_length = sg->length;
261 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
262 int nents, enum dma_data_direction dir, unsigned long attrs)
264 unsigned long page, oldpage = 0;
265 struct scatterlist *sg;
268 for_each_sg(sgl, sg, nents, j) {
269 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
272 * We expect unmapped highmem pages to be not in the cache.
273 * XXX Is this a good assumption?
274 * XXX What if someone else unmaps it here and races us?
276 if (!PageHighMem(sg_page(sg))) {
277 page = (unsigned long)page_address(sg_page(sg));
278 for (i = 0; i < n; i++) {
279 if (page != oldpage) { /* Already flushed? */
280 flush_page_for_dma(page);
287 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
288 sg->dma_length = sg->length;
294 static void iommu_release_one(struct device *dev, u32 busa, int npages)
296 struct iommu_struct *iommu = dev->archdata.iommu;
300 BUG_ON(busa < iommu->start);
301 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
302 for (i = 0; i < npages; i++) {
303 iopte_val(iommu->page_table[ioptex + i]) = 0;
304 iommu_invalidate_page(iommu->regs, busa);
307 bit_map_clear(&iommu->usemap, ioptex, npages);
310 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
311 size_t len, enum dma_data_direction dir, unsigned long attrs)
313 unsigned long off = dma_addr & ~PAGE_MASK;
316 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
317 iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
320 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
321 int nents, enum dma_data_direction dir, unsigned long attrs)
323 struct scatterlist *sg;
326 for_each_sg(sgl, sg, nents, i) {
327 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
329 sg->dma_address = 0x21212121;
334 static void *sbus_iommu_alloc(struct device *dev, size_t len,
335 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
337 struct iommu_struct *iommu = dev->archdata.iommu;
338 unsigned long va, addr, page, end, ret;
339 iopte_t *iopte = iommu->page_table;
343 /* XXX So what is maxphys for us and how do drivers know it? */
344 if (!len || len > 256 * 1024)
347 len = PAGE_ALIGN(len);
348 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
352 addr = ret = sparc_dma_alloc_resource(dev, len);
356 BUG_ON((va & ~PAGE_MASK) != 0);
357 BUG_ON((addr & ~PAGE_MASK) != 0);
358 BUG_ON((len & ~PAGE_MASK) != 0);
360 /* page color = physical address */
361 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
376 if (viking_mxcc_present)
377 viking_mxcc_flush_page(page);
378 else if (viking_flush)
379 viking_flush_page(page);
381 __flush_page_to_ram(page);
383 pgdp = pgd_offset(&init_mm, addr);
384 pmdp = pmd_offset(pgdp, addr);
385 ptep = pte_offset_map(pmdp, addr);
387 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
389 iopte_val(*iopte++) =
390 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
394 /* P3: why do we need this?
396 * DAVEM: Because there are several aspects, none of which
397 * are handled by a single interface. Some cpus are
398 * completely not I/O DMA coherent, and some have
399 * virtually indexed caches. The driver DMA flushing
400 * methods handle the former case, but here during
401 * IOMMU page table modifications, and usage of non-cacheable
402 * cpu mappings of pages potentially in the cpu caches, we have
403 * to handle the latter case as well.
406 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
408 iommu_invalidate(iommu->regs);
410 *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
414 free_pages(va, get_order(len));
418 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
419 dma_addr_t busa, unsigned long attrs)
421 struct iommu_struct *iommu = dev->archdata.iommu;
422 iopte_t *iopte = iommu->page_table;
423 struct page *page = virt_to_page(cpu_addr);
424 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
427 if (!sparc_dma_free_resource(cpu_addr, len))
430 BUG_ON((busa & ~PAGE_MASK) != 0);
431 BUG_ON((len & ~PAGE_MASK) != 0);
436 iopte_val(*iopte++) = 0;
440 iommu_invalidate(iommu->regs);
441 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
443 __free_pages(page, get_order(len));
447 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
449 .alloc = sbus_iommu_alloc,
450 .free = sbus_iommu_free,
452 .map_page = sbus_iommu_map_page_gflush,
453 .unmap_page = sbus_iommu_unmap_page,
454 .map_sg = sbus_iommu_map_sg_gflush,
455 .unmap_sg = sbus_iommu_unmap_sg,
458 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
460 .alloc = sbus_iommu_alloc,
461 .free = sbus_iommu_free,
463 .map_page = sbus_iommu_map_page_pflush,
464 .unmap_page = sbus_iommu_unmap_page,
465 .map_sg = sbus_iommu_map_sg_pflush,
466 .unmap_sg = sbus_iommu_unmap_sg,
469 void __init ld_mmu_iommu(void)
471 if (flush_page_for_dma_global) {
472 /* flush_page_for_dma flushes everything, no matter of what page is it */
473 dma_ops = &sbus_iommu_dma_gflush_ops;
475 dma_ops = &sbus_iommu_dma_pflush_ops;
478 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
479 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
480 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
482 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
483 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;