1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/iommu-helper.h>
15 #include <linux/bitmap.h>
16 #include <linux/hash.h>
17 #include <linux/iommu-common.h>
20 #include <linux/pci.h>
23 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
25 #include <asm/iommu.h>
27 #include "iommu_common.h"
30 #define STC_CTXMATCH_ADDR(STC, CTX) \
31 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
32 #define STC_FLUSHFLAG_INIT(STC) \
33 (*((STC)->strbuf_flushflag) = 0UL)
34 #define STC_FLUSHFLAG_SET(STC) \
35 (*((STC)->strbuf_flushflag) != 0UL)
37 #define iommu_read(__reg) \
39 __asm__ __volatile__("ldxa [%1] %2, %0" \
41 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
45 #define iommu_write(__reg, __val) \
46 __asm__ __volatile__("stxa %0, [%1] %2" \
48 : "r" (__val), "r" (__reg), \
49 "i" (ASI_PHYS_BYPASS_EC_E))
51 /* Must be invoked under the IOMMU lock. */
52 static void iommu_flushall(struct iommu_table *iommu_table)
54 struct iommu *iommu = container_of(iommu_table, struct iommu, tbl);
55 if (iommu->iommu_flushinv) {
56 iommu_write(iommu->iommu_flushinv, ~(u64)0);
61 tag = iommu->iommu_tags;
62 for (entry = 0; entry < 16; entry++) {
67 /* Ensure completion of previous PIO writes. */
68 (void) iommu_read(iommu->write_complete_reg);
72 #define IOPTE_CONSISTENT(CTX) \
73 (IOPTE_VALID | IOPTE_CACHE | \
74 (((CTX) << 47) & IOPTE_CONTEXT))
76 #define IOPTE_STREAMING(CTX) \
77 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
79 /* Existing mappings are never marked invalid, instead they
80 * are pointed to a dummy page.
82 #define IOPTE_IS_DUMMY(iommu, iopte) \
83 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
85 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
87 unsigned long val = iopte_val(*iopte);
90 val |= iommu->dummy_page_pa;
92 iopte_val(*iopte) = val;
95 static struct iommu_tbl_ops iommu_sparc_ops = {
96 .reset = iommu_flushall
99 static void setup_iommu_pool_hash(void)
107 for_each_possible_cpu(i)
108 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
112 int iommu_table_init(struct iommu *iommu, int tsbsize,
113 u32 dma_offset, u32 dma_addr_mask,
116 unsigned long i, order, sz, num_tsb_entries;
119 num_tsb_entries = tsbsize / sizeof(iopte_t);
121 /* Setup initial software IOMMU state. */
122 spin_lock_init(&iommu->lock);
123 iommu->ctx_lowest_free = 1;
124 iommu->tbl.page_table_map_base = dma_offset;
125 iommu->dma_addr_mask = dma_addr_mask;
127 /* Allocate and initialize the free area map. */
128 sz = num_tsb_entries / 8;
129 sz = (sz + 7UL) & ~7UL;
130 iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
133 memset(iommu->tbl.map, 0, sz);
134 if (tlb_type != hypervisor)
135 iommu_sparc_ops.reset = NULL; /* not needed on on sun4v */
137 setup_iommu_pool_hash();
138 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
139 &iommu_sparc_ops, false, 1);
141 /* Allocate and initialize the dummy page which we
142 * set inactive IO PTEs to point to.
144 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
146 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
149 iommu->dummy_page = (unsigned long) page_address(page);
150 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
151 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
153 /* Now allocate and setup the IOMMU page table itself. */
154 order = get_order(tsbsize);
155 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
157 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
158 goto out_free_dummy_page;
160 iommu->page_table = (iopte_t *)page_address(page);
162 for (i = 0; i < num_tsb_entries; i++)
163 iopte_make_dummy(iommu, &iommu->page_table[i]);
168 free_page(iommu->dummy_page);
169 iommu->dummy_page = 0UL;
172 kfree(iommu->tbl.map);
173 iommu->tbl.map = NULL;
178 static inline iopte_t *alloc_npages(struct device *dev,
180 unsigned long npages)
184 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
185 __this_cpu_read(iommu_pool_hash));
186 if (unlikely(entry == DMA_ERROR_CODE))
189 return iommu->page_table + entry;
192 static int iommu_alloc_ctx(struct iommu *iommu)
194 int lowest = iommu->ctx_lowest_free;
195 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
197 if (unlikely(n == IOMMU_NUM_CTXS)) {
198 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
199 if (unlikely(n == lowest)) {
200 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
205 __set_bit(n, iommu->ctx_bitmap);
210 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
213 __clear_bit(ctx, iommu->ctx_bitmap);
214 if (ctx < iommu->ctx_lowest_free)
215 iommu->ctx_lowest_free = ctx;
219 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
220 dma_addr_t *dma_addrp, gfp_t gfp,
221 struct dma_attrs *attrs)
223 unsigned long order, first_page;
230 size = IO_PAGE_ALIGN(size);
231 order = get_order(size);
235 nid = dev->archdata.numa_node;
236 page = alloc_pages_node(nid, gfp, order);
240 first_page = (unsigned long) page_address(page);
241 memset((char *)first_page, 0, PAGE_SIZE << order);
243 iommu = dev->archdata.iommu;
245 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
247 if (unlikely(iopte == NULL)) {
248 free_pages(first_page, order);
252 *dma_addrp = (iommu->tbl.page_table_map_base +
253 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
254 ret = (void *) first_page;
255 npages = size >> IO_PAGE_SHIFT;
256 first_page = __pa(first_page);
258 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
260 (first_page & IOPTE_PAGE));
262 first_page += IO_PAGE_SIZE;
268 static void dma_4u_free_coherent(struct device *dev, size_t size,
269 void *cpu, dma_addr_t dvma,
270 struct dma_attrs *attrs)
273 unsigned long order, npages;
275 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
276 iommu = dev->archdata.iommu;
278 iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL);
280 order = get_order(size);
282 free_pages((unsigned long)cpu, order);
285 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
286 unsigned long offset, size_t sz,
287 enum dma_data_direction direction,
288 struct dma_attrs *attrs)
291 struct strbuf *strbuf;
293 unsigned long flags, npages, oaddr;
294 unsigned long i, base_paddr, ctx;
296 unsigned long iopte_protection;
298 iommu = dev->archdata.iommu;
299 strbuf = dev->archdata.stc;
301 if (unlikely(direction == DMA_NONE))
304 oaddr = (unsigned long)(page_address(page) + offset);
305 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
306 npages >>= IO_PAGE_SHIFT;
308 base = alloc_npages(dev, iommu, npages);
309 spin_lock_irqsave(&iommu->lock, flags);
311 if (iommu->iommu_ctxflush)
312 ctx = iommu_alloc_ctx(iommu);
313 spin_unlock_irqrestore(&iommu->lock, flags);
318 bus_addr = (iommu->tbl.page_table_map_base +
319 ((base - iommu->page_table) << IO_PAGE_SHIFT));
320 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
321 base_paddr = __pa(oaddr & IO_PAGE_MASK);
322 if (strbuf->strbuf_enabled)
323 iopte_protection = IOPTE_STREAMING(ctx);
325 iopte_protection = IOPTE_CONSISTENT(ctx);
326 if (direction != DMA_TO_DEVICE)
327 iopte_protection |= IOPTE_WRITE;
329 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
330 iopte_val(*base) = iopte_protection | base_paddr;
335 iommu_free_ctx(iommu, ctx);
337 if (printk_ratelimit())
339 return DMA_ERROR_CODE;
342 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
343 u32 vaddr, unsigned long ctx, unsigned long npages,
344 enum dma_data_direction direction)
348 if (strbuf->strbuf_ctxflush &&
349 iommu->iommu_ctxflush) {
350 unsigned long matchreg, flushreg;
353 flushreg = strbuf->strbuf_ctxflush;
354 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
356 iommu_write(flushreg, ctx);
357 val = iommu_read(matchreg);
364 iommu_write(flushreg, ctx);
367 val = iommu_read(matchreg);
369 printk(KERN_WARNING "strbuf_flush: ctx flush "
370 "timeout matchreg[%llx] ctx[%lx]\n",
378 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
379 iommu_write(strbuf->strbuf_pflush, vaddr);
383 /* If the device could not have possibly put dirty data into
384 * the streaming cache, no flush-flag synchronization needs
387 if (direction == DMA_TO_DEVICE)
390 STC_FLUSHFLAG_INIT(strbuf);
391 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
392 (void) iommu_read(iommu->write_complete_reg);
395 while (!STC_FLUSHFLAG_SET(strbuf)) {
403 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
404 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
408 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
409 size_t sz, enum dma_data_direction direction,
410 struct dma_attrs *attrs)
413 struct strbuf *strbuf;
415 unsigned long flags, npages, ctx, i;
417 if (unlikely(direction == DMA_NONE)) {
418 if (printk_ratelimit())
423 iommu = dev->archdata.iommu;
424 strbuf = dev->archdata.stc;
426 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
427 npages >>= IO_PAGE_SHIFT;
428 base = iommu->page_table +
429 ((bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT);
430 bus_addr &= IO_PAGE_MASK;
432 spin_lock_irqsave(&iommu->lock, flags);
434 /* Record the context, if any. */
436 if (iommu->iommu_ctxflush)
437 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
439 /* Step 1: Kick data out of streaming buffers if necessary. */
440 if (strbuf->strbuf_enabled)
441 strbuf_flush(strbuf, iommu, bus_addr, ctx,
444 /* Step 2: Clear out TSB entries. */
445 for (i = 0; i < npages; i++)
446 iopte_make_dummy(iommu, base + i);
448 iommu_free_ctx(iommu, ctx);
449 spin_unlock_irqrestore(&iommu->lock, flags);
451 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages,
455 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
456 int nelems, enum dma_data_direction direction,
457 struct dma_attrs *attrs)
459 struct scatterlist *s, *outs, *segstart;
460 unsigned long flags, handle, prot, ctx;
461 dma_addr_t dma_next = 0, dma_addr;
462 unsigned int max_seg_size;
463 unsigned long seg_boundary_size;
464 int outcount, incount, i;
465 struct strbuf *strbuf;
467 unsigned long base_shift;
469 BUG_ON(direction == DMA_NONE);
471 iommu = dev->archdata.iommu;
472 strbuf = dev->archdata.stc;
473 if (nelems == 0 || !iommu)
476 spin_lock_irqsave(&iommu->lock, flags);
479 if (iommu->iommu_ctxflush)
480 ctx = iommu_alloc_ctx(iommu);
482 if (strbuf->strbuf_enabled)
483 prot = IOPTE_STREAMING(ctx);
485 prot = IOPTE_CONSISTENT(ctx);
486 if (direction != DMA_TO_DEVICE)
489 outs = s = segstart = &sglist[0];
494 /* Init first segment length for backout at failure */
495 outs->dma_length = 0;
497 max_seg_size = dma_get_max_seg_size(dev);
498 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
499 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
500 base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT;
501 for_each_sg(sglist, s, nelems, i) {
502 unsigned long paddr, npages, entry, out_entry = 0, slen;
511 /* Allocate iommu entries for that segment */
512 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
513 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
514 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle,
515 __this_cpu_read(iommu_pool_hash));
518 if (unlikely(entry == DMA_ERROR_CODE)) {
519 if (printk_ratelimit())
520 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
521 " npages %lx\n", iommu, paddr, npages);
522 goto iommu_map_failed;
525 base = iommu->page_table + entry;
527 /* Convert entry to a dma_addr_t */
528 dma_addr = iommu->tbl.page_table_map_base +
529 (entry << IO_PAGE_SHIFT);
530 dma_addr |= (s->offset & ~IO_PAGE_MASK);
532 /* Insert into HW table */
533 paddr &= IO_PAGE_MASK;
535 iopte_val(*base) = prot | paddr;
537 paddr += IO_PAGE_SIZE;
540 /* If we are in an open segment, try merging */
542 /* We cannot merge if:
543 * - allocated dma_addr isn't contiguous to previous allocation
545 if ((dma_addr != dma_next) ||
546 (outs->dma_length + s->length > max_seg_size) ||
547 (is_span_boundary(out_entry, base_shift,
548 seg_boundary_size, outs, s))) {
549 /* Can't merge: create a new segment */
552 outs = sg_next(outs);
554 outs->dma_length += s->length;
559 /* This is a new segment, fill entries */
560 outs->dma_address = dma_addr;
561 outs->dma_length = slen;
565 /* Calculate next page pointer for contiguous check */
566 dma_next = dma_addr + slen;
569 spin_unlock_irqrestore(&iommu->lock, flags);
571 if (outcount < incount) {
572 outs = sg_next(outs);
573 outs->dma_address = DMA_ERROR_CODE;
574 outs->dma_length = 0;
580 for_each_sg(sglist, s, nelems, i) {
581 if (s->dma_length != 0) {
582 unsigned long vaddr, npages, entry, j;
585 vaddr = s->dma_address & IO_PAGE_MASK;
586 npages = iommu_num_pages(s->dma_address, s->dma_length,
589 entry = (vaddr - iommu->tbl.page_table_map_base)
591 base = iommu->page_table + entry;
593 for (j = 0; j < npages; j++)
594 iopte_make_dummy(iommu, base + j);
596 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
599 s->dma_address = DMA_ERROR_CODE;
605 spin_unlock_irqrestore(&iommu->lock, flags);
610 /* If contexts are being used, they are the same in all of the mappings
611 * we make for a particular SG.
613 static unsigned long fetch_sg_ctx(struct iommu *iommu,
614 struct scatterlist *sg)
616 unsigned long ctx = 0;
618 if (iommu->iommu_ctxflush) {
621 struct iommu_table *tbl = &iommu->tbl;
623 bus_addr = sg->dma_address & IO_PAGE_MASK;
624 base = iommu->page_table +
625 ((bus_addr - tbl->page_table_map_base) >> IO_PAGE_SHIFT);
627 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
632 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
633 int nelems, enum dma_data_direction direction,
634 struct dma_attrs *attrs)
636 unsigned long flags, ctx;
637 struct scatterlist *sg;
638 struct strbuf *strbuf;
641 BUG_ON(direction == DMA_NONE);
643 iommu = dev->archdata.iommu;
644 strbuf = dev->archdata.stc;
646 ctx = fetch_sg_ctx(iommu, sglist);
648 spin_lock_irqsave(&iommu->lock, flags);
652 dma_addr_t dma_handle = sg->dma_address;
653 unsigned int len = sg->dma_length;
654 unsigned long npages, entry;
660 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
662 entry = ((dma_handle - iommu->tbl.page_table_map_base)
664 base = iommu->page_table + entry;
666 dma_handle &= IO_PAGE_MASK;
667 if (strbuf->strbuf_enabled)
668 strbuf_flush(strbuf, iommu, dma_handle, ctx,
671 for (i = 0; i < npages; i++)
672 iopte_make_dummy(iommu, base + i);
674 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, false,
679 iommu_free_ctx(iommu, ctx);
681 spin_unlock_irqrestore(&iommu->lock, flags);
684 static void dma_4u_sync_single_for_cpu(struct device *dev,
685 dma_addr_t bus_addr, size_t sz,
686 enum dma_data_direction direction)
689 struct strbuf *strbuf;
690 unsigned long flags, ctx, npages;
692 iommu = dev->archdata.iommu;
693 strbuf = dev->archdata.stc;
695 if (!strbuf->strbuf_enabled)
698 spin_lock_irqsave(&iommu->lock, flags);
700 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
701 npages >>= IO_PAGE_SHIFT;
702 bus_addr &= IO_PAGE_MASK;
704 /* Step 1: Record the context, if any. */
706 if (iommu->iommu_ctxflush &&
707 strbuf->strbuf_ctxflush) {
709 struct iommu_table *tbl = &iommu->tbl;
711 iopte = iommu->page_table +
712 ((bus_addr - tbl->page_table_map_base)>>IO_PAGE_SHIFT);
713 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
716 /* Step 2: Kick data out of streaming buffers. */
717 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
719 spin_unlock_irqrestore(&iommu->lock, flags);
722 static void dma_4u_sync_sg_for_cpu(struct device *dev,
723 struct scatterlist *sglist, int nelems,
724 enum dma_data_direction direction)
727 struct strbuf *strbuf;
728 unsigned long flags, ctx, npages, i;
729 struct scatterlist *sg, *sgprv;
732 iommu = dev->archdata.iommu;
733 strbuf = dev->archdata.stc;
735 if (!strbuf->strbuf_enabled)
738 spin_lock_irqsave(&iommu->lock, flags);
740 /* Step 1: Record the context, if any. */
742 if (iommu->iommu_ctxflush &&
743 strbuf->strbuf_ctxflush) {
745 struct iommu_table *tbl = &iommu->tbl;
747 iopte = iommu->page_table + ((sglist[0].dma_address -
748 tbl->page_table_map_base) >> IO_PAGE_SHIFT);
749 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
752 /* Step 2: Kick data out of streaming buffers. */
753 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
755 for_each_sg(sglist, sg, nelems, i) {
756 if (sg->dma_length == 0)
761 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
762 - bus_addr) >> IO_PAGE_SHIFT;
763 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
765 spin_unlock_irqrestore(&iommu->lock, flags);
768 static struct dma_map_ops sun4u_dma_ops = {
769 .alloc = dma_4u_alloc_coherent,
770 .free = dma_4u_free_coherent,
771 .map_page = dma_4u_map_page,
772 .unmap_page = dma_4u_unmap_page,
773 .map_sg = dma_4u_map_sg,
774 .unmap_sg = dma_4u_unmap_sg,
775 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
776 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
779 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
780 EXPORT_SYMBOL(dma_ops);
782 int dma_supported(struct device *dev, u64 device_mask)
784 struct iommu *iommu = dev->archdata.iommu;
785 u64 dma_addr_mask = iommu->dma_addr_mask;
787 if (device_mask >= (1UL << 32UL))
790 if ((device_mask & dma_addr_mask) == dma_addr_mask)
795 return pci64_dma_supported(to_pci_dev(dev), device_mask);
800 EXPORT_SYMBOL(dma_supported);