sparc: Replace sbus_map_single and sbus_unmap_single with sbus_map_page and sbus_unma...
[linux-block.git] / arch / sparc / kernel / iommu.c
CommitLineData
ad7ad57c 1/* iommu.c: Generic sparc64 IOMMU support.
1da177e4 2 *
d284142c 3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
ad7ad57c 8#include <linux/module.h>
4dbc30fb 9#include <linux/delay.h>
ad7ad57c
DM
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
d284142c 13#include <linux/iommu-helper.h>
ad7ad57c
DM
14
15#ifdef CONFIG_PCI
c57c2ffb 16#include <linux/pci.h>
ad7ad57c 17#endif
1da177e4 18
ad7ad57c 19#include <asm/iommu.h>
1da177e4
LT
20
21#include "iommu_common.h"
22
ad7ad57c 23#define STC_CTXMATCH_ADDR(STC, CTX) \
1da177e4 24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
ad7ad57c
DM
25#define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27#define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
1da177e4 29
ad7ad57c 30#define iommu_read(__reg) \
1da177e4
LT
31({ u64 __ret; \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "=r" (__ret) \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 : "memory"); \
36 __ret; \
37})
ad7ad57c 38#define iommu_write(__reg, __val) \
1da177e4
LT
39 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : /* no outputs */ \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
43
44/* Must be invoked under the IOMMU lock. */
d284142c 45static void iommu_flushall(struct iommu *iommu)
1da177e4 46{
861fe906 47 if (iommu->iommu_flushinv) {
ad7ad57c 48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
861fe906
DM
49 } else {
50 unsigned long tag;
51 int entry;
1da177e4 52
ad7ad57c 53 tag = iommu->iommu_tags;
861fe906 54 for (entry = 0; entry < 16; entry++) {
ad7ad57c 55 iommu_write(tag, 0);
861fe906
DM
56 tag += 8;
57 }
1da177e4 58
861fe906 59 /* Ensure completion of previous PIO writes. */
ad7ad57c 60 (void) iommu_read(iommu->write_complete_reg);
861fe906 61 }
1da177e4
LT
62}
63
64#define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
67
68#define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
70
71/* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
73 */
74#define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
76
16ce82d8 77static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
1da177e4
LT
78{
79 unsigned long val = iopte_val(*iopte);
80
81 val &= ~IOPTE_PAGE;
82 val |= iommu->dummy_page_pa;
83
84 iopte_val(*iopte) = val;
85}
86
d284142c
DM
87/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
89 *
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
93 */
94unsigned long iommu_range_alloc(struct device *dev,
95 struct iommu *iommu,
96 unsigned long npages,
97 unsigned long *handle)
688cb30b 98{
d284142c 99 unsigned long n, end, start, limit, boundary_size;
9b3627f3 100 struct iommu_arena *arena = &iommu->arena;
d284142c
DM
101 int pass = 0;
102
103 /* This allocator was derived from x86_64's bit string search */
104
105 /* Sanity check */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
108 WARN_ON(1);
109 return DMA_ERROR_CODE;
110 }
111
112 if (handle && *handle)
113 start = *handle;
114 else
115 start = arena->hint;
688cb30b
DM
116
117 limit = arena->limit;
688cb30b 118
d284142c
DM
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
122 */
123 if (start >= limit) {
124 start = 0;
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
127 }
128
129 again:
130
131 if (dev)
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
133 1 << IO_PAGE_SHIFT);
134 else
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
136
89c94f2f
FT
137 n = iommu_area_alloc(arena->map, limit, start, npages,
138 iommu->page_table_map_base >> IO_PAGE_SHIFT,
d284142c
DM
139 boundary_size >> IO_PAGE_SHIFT, 0);
140 if (n == -1) {
688cb30b 141 if (likely(pass < 1)) {
d284142c 142 /* First failure, rescan from the beginning. */
688cb30b 143 start = 0;
d284142c
DM
144 if (iommu->flush_all)
145 iommu->flush_all(iommu);
688cb30b
DM
146 pass++;
147 goto again;
148 } else {
d284142c
DM
149 /* Second failure, give up */
150 return DMA_ERROR_CODE;
688cb30b
DM
151 }
152 }
153
d284142c 154 end = n + npages;
688cb30b
DM
155
156 arena->hint = end;
157
d284142c
DM
158 /* Update handle for SG allocations */
159 if (handle)
160 *handle = end;
161
688cb30b
DM
162 return n;
163}
164
d284142c 165void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
688cb30b 166{
d284142c
DM
167 struct iommu_arena *arena = &iommu->arena;
168 unsigned long entry;
688cb30b 169
d284142c
DM
170 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
171
172 iommu_area_free(arena->map, entry, npages);
688cb30b
DM
173}
174
ad7ad57c 175int iommu_table_init(struct iommu *iommu, int tsbsize,
c1b1a5f1
DM
176 u32 dma_offset, u32 dma_addr_mask,
177 int numa_node)
1da177e4 178{
c1b1a5f1
DM
179 unsigned long i, order, sz, num_tsb_entries;
180 struct page *page;
688cb30b
DM
181
182 num_tsb_entries = tsbsize / sizeof(iopte_t);
51e85136
DM
183
184 /* Setup initial software IOMMU state. */
185 spin_lock_init(&iommu->lock);
186 iommu->ctx_lowest_free = 1;
187 iommu->page_table_map_base = dma_offset;
188 iommu->dma_addr_mask = dma_addr_mask;
189
688cb30b
DM
190 /* Allocate and initialize the free area map. */
191 sz = num_tsb_entries / 8;
192 sz = (sz + 7UL) & ~7UL;
c1b1a5f1 193 iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
688cb30b 194 if (!iommu->arena.map) {
ad7ad57c
DM
195 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
196 return -ENOMEM;
51e85136 197 }
c1b1a5f1 198 memset(iommu->arena.map, 0, sz);
688cb30b 199 iommu->arena.limit = num_tsb_entries;
1da177e4 200
d284142c
DM
201 if (tlb_type != hypervisor)
202 iommu->flush_all = iommu_flushall;
203
51e85136
DM
204 /* Allocate and initialize the dummy page which we
205 * set inactive IO PTEs to point to.
206 */
c1b1a5f1
DM
207 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
208 if (!page) {
ad7ad57c
DM
209 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
210 goto out_free_map;
51e85136 211 }
c1b1a5f1
DM
212 iommu->dummy_page = (unsigned long) page_address(page);
213 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
51e85136
DM
214 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
215
216 /* Now allocate and setup the IOMMU page table itself. */
217 order = get_order(tsbsize);
c1b1a5f1
DM
218 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
219 if (!page) {
ad7ad57c
DM
220 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
221 goto out_free_dummy_page;
51e85136 222 }
c1b1a5f1 223 iommu->page_table = (iopte_t *)page_address(page);
1da177e4 224
688cb30b 225 for (i = 0; i < num_tsb_entries; i++)
1da177e4 226 iopte_make_dummy(iommu, &iommu->page_table[i]);
ad7ad57c
DM
227
228 return 0;
229
230out_free_dummy_page:
231 free_page(iommu->dummy_page);
232 iommu->dummy_page = 0UL;
233
234out_free_map:
235 kfree(iommu->arena.map);
236 iommu->arena.map = NULL;
237
238 return -ENOMEM;
1da177e4
LT
239}
240
d284142c
DM
241static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
242 unsigned long npages)
1da177e4 243{
d284142c 244 unsigned long entry;
1da177e4 245
d284142c
DM
246 entry = iommu_range_alloc(dev, iommu, npages, NULL);
247 if (unlikely(entry == DMA_ERROR_CODE))
688cb30b 248 return NULL;
1da177e4 249
688cb30b 250 return iommu->page_table + entry;
1da177e4
LT
251}
252
16ce82d8 253static int iommu_alloc_ctx(struct iommu *iommu)
7c963ad1
DM
254{
255 int lowest = iommu->ctx_lowest_free;
256 int sz = IOMMU_NUM_CTXS - lowest;
257 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
258
259 if (unlikely(n == sz)) {
260 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
261 if (unlikely(n == lowest)) {
262 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
263 n = 0;
264 }
265 }
266 if (n)
267 __set_bit(n, iommu->ctx_bitmap);
268
269 return n;
270}
271
16ce82d8 272static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
7c963ad1
DM
273{
274 if (likely(ctx)) {
275 __clear_bit(ctx, iommu->ctx_bitmap);
276 if (ctx < iommu->ctx_lowest_free)
277 iommu->ctx_lowest_free = ctx;
278 }
279}
280
ad7ad57c
DM
281static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
282 dma_addr_t *dma_addrp, gfp_t gfp)
1da177e4 283{
c1b1a5f1 284 unsigned long flags, order, first_page;
16ce82d8 285 struct iommu *iommu;
c1b1a5f1
DM
286 struct page *page;
287 int npages, nid;
1da177e4 288 iopte_t *iopte;
1da177e4 289 void *ret;
1da177e4
LT
290
291 size = IO_PAGE_ALIGN(size);
292 order = get_order(size);
293 if (order >= 10)
294 return NULL;
295
c1b1a5f1
DM
296 nid = dev->archdata.numa_node;
297 page = alloc_pages_node(nid, gfp, order);
298 if (unlikely(!page))
1da177e4 299 return NULL;
c1b1a5f1
DM
300
301 first_page = (unsigned long) page_address(page);
1da177e4
LT
302 memset((char *)first_page, 0, PAGE_SIZE << order);
303
ad7ad57c 304 iommu = dev->archdata.iommu;
1da177e4
LT
305
306 spin_lock_irqsave(&iommu->lock, flags);
d284142c 307 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
688cb30b
DM
308 spin_unlock_irqrestore(&iommu->lock, flags);
309
310 if (unlikely(iopte == NULL)) {
1da177e4
LT
311 free_pages(first_page, order);
312 return NULL;
313 }
314
315 *dma_addrp = (iommu->page_table_map_base +
316 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
317 ret = (void *) first_page;
318 npages = size >> IO_PAGE_SHIFT;
1da177e4
LT
319 first_page = __pa(first_page);
320 while (npages--) {
688cb30b 321 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
1da177e4
LT
322 IOPTE_WRITE |
323 (first_page & IOPTE_PAGE));
324 iopte++;
325 first_page += IO_PAGE_SIZE;
326 }
327
1da177e4
LT
328 return ret;
329}
330
ad7ad57c
DM
331static void dma_4u_free_coherent(struct device *dev, size_t size,
332 void *cpu, dma_addr_t dvma)
1da177e4 333{
16ce82d8 334 struct iommu *iommu;
1da177e4 335 iopte_t *iopte;
688cb30b 336 unsigned long flags, order, npages;
1da177e4
LT
337
338 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c 339 iommu = dev->archdata.iommu;
1da177e4
LT
340 iopte = iommu->page_table +
341 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
342
343 spin_lock_irqsave(&iommu->lock, flags);
344
d284142c 345 iommu_range_free(iommu, dvma, npages);
7c963ad1 346
1da177e4
LT
347 spin_unlock_irqrestore(&iommu->lock, flags);
348
349 order = get_order(size);
350 if (order < 10)
351 free_pages((unsigned long)cpu, order);
352}
353
797a7568
FT
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz,
bc0a14f1
FT
356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
1da177e4 358{
16ce82d8
DM
359 struct iommu *iommu;
360 struct strbuf *strbuf;
1da177e4
LT
361 iopte_t *base;
362 unsigned long flags, npages, oaddr;
363 unsigned long i, base_paddr, ctx;
364 u32 bus_addr, ret;
365 unsigned long iopte_protection;
366
ad7ad57c
DM
367 iommu = dev->archdata.iommu;
368 strbuf = dev->archdata.stc;
1da177e4 369
ad7ad57c 370 if (unlikely(direction == DMA_NONE))
688cb30b 371 goto bad_no_ctx;
1da177e4 372
797a7568 373 oaddr = (unsigned long)(page_address(page) + offset);
1da177e4
LT
374 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
375 npages >>= IO_PAGE_SHIFT;
376
377 spin_lock_irqsave(&iommu->lock, flags);
d284142c 378 base = alloc_npages(dev, iommu, npages);
688cb30b
DM
379 ctx = 0;
380 if (iommu->iommu_ctxflush)
381 ctx = iommu_alloc_ctx(iommu);
382 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 383
688cb30b 384 if (unlikely(!base))
1da177e4 385 goto bad;
688cb30b 386
1da177e4
LT
387 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
1da177e4
LT
391 if (strbuf->strbuf_enabled)
392 iopte_protection = IOPTE_STREAMING(ctx);
393 else
394 iopte_protection = IOPTE_CONSISTENT(ctx);
ad7ad57c 395 if (direction != DMA_TO_DEVICE)
1da177e4
LT
396 iopte_protection |= IOPTE_WRITE;
397
398 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
399 iopte_val(*base) = iopte_protection | base_paddr;
400
1da177e4
LT
401 return ret;
402
403bad:
688cb30b
DM
404 iommu_free_ctx(iommu, ctx);
405bad_no_ctx:
406 if (printk_ratelimit())
407 WARN_ON(1);
ad7ad57c 408 return DMA_ERROR_CODE;
1da177e4
LT
409}
410
ad7ad57c
DM
411static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
412 u32 vaddr, unsigned long ctx, unsigned long npages,
413 enum dma_data_direction direction)
4dbc30fb
DM
414{
415 int limit;
416
4dbc30fb
DM
417 if (strbuf->strbuf_ctxflush &&
418 iommu->iommu_ctxflush) {
419 unsigned long matchreg, flushreg;
7c963ad1 420 u64 val;
4dbc30fb
DM
421
422 flushreg = strbuf->strbuf_ctxflush;
ad7ad57c 423 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
4dbc30fb 424
ad7ad57c
DM
425 iommu_write(flushreg, ctx);
426 val = iommu_read(matchreg);
88314ee7
DM
427 val &= 0xffff;
428 if (!val)
7c963ad1
DM
429 goto do_flush_sync;
430
7c963ad1
DM
431 while (val) {
432 if (val & 0x1)
ad7ad57c 433 iommu_write(flushreg, ctx);
7c963ad1 434 val >>= 1;
a228dfd5 435 }
ad7ad57c 436 val = iommu_read(matchreg);
7c963ad1 437 if (unlikely(val)) {
ad7ad57c 438 printk(KERN_WARNING "strbuf_flush: ctx flush "
90181136 439 "timeout matchreg[%llx] ctx[%lx]\n",
7c963ad1
DM
440 val, ctx);
441 goto do_page_flush;
442 }
4dbc30fb
DM
443 } else {
444 unsigned long i;
445
7c963ad1 446 do_page_flush:
4dbc30fb 447 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
ad7ad57c 448 iommu_write(strbuf->strbuf_pflush, vaddr);
4dbc30fb
DM
449 }
450
7c963ad1
DM
451do_flush_sync:
452 /* If the device could not have possibly put dirty data into
453 * the streaming cache, no flush-flag synchronization needs
454 * to be performed.
455 */
ad7ad57c 456 if (direction == DMA_TO_DEVICE)
7c963ad1
DM
457 return;
458
ad7ad57c
DM
459 STC_FLUSHFLAG_INIT(strbuf);
460 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
461 (void) iommu_read(iommu->write_complete_reg);
4dbc30fb 462
a228dfd5 463 limit = 100000;
ad7ad57c 464 while (!STC_FLUSHFLAG_SET(strbuf)) {
4dbc30fb
DM
465 limit--;
466 if (!limit)
467 break;
a228dfd5 468 udelay(1);
4f07118f 469 rmb();
4dbc30fb
DM
470 }
471 if (!limit)
ad7ad57c 472 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
4dbc30fb
DM
473 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
474 vaddr, ctx, npages);
475}
476
797a7568 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
bc0a14f1
FT
478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
1da177e4 480{
16ce82d8
DM
481 struct iommu *iommu;
482 struct strbuf *strbuf;
1da177e4 483 iopte_t *base;
688cb30b 484 unsigned long flags, npages, ctx, i;
1da177e4 485
ad7ad57c 486 if (unlikely(direction == DMA_NONE)) {
688cb30b
DM
487 if (printk_ratelimit())
488 WARN_ON(1);
489 return;
490 }
1da177e4 491
ad7ad57c
DM
492 iommu = dev->archdata.iommu;
493 strbuf = dev->archdata.stc;
1da177e4
LT
494
495 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496 npages >>= IO_PAGE_SHIFT;
497 base = iommu->page_table +
498 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
1da177e4
LT
499 bus_addr &= IO_PAGE_MASK;
500
501 spin_lock_irqsave(&iommu->lock, flags);
502
503 /* Record the context, if any. */
504 ctx = 0;
505 if (iommu->iommu_ctxflush)
506 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
507
508 /* Step 1: Kick data out of streaming buffers if necessary. */
4dbc30fb 509 if (strbuf->strbuf_enabled)
ad7ad57c
DM
510 strbuf_flush(strbuf, iommu, bus_addr, ctx,
511 npages, direction);
1da177e4 512
688cb30b
DM
513 /* Step 2: Clear out TSB entries. */
514 for (i = 0; i < npages; i++)
515 iopte_make_dummy(iommu, base + i);
1da177e4 516
d284142c 517 iommu_range_free(iommu, bus_addr, npages);
1da177e4 518
7c963ad1
DM
519 iommu_free_ctx(iommu, ctx);
520
1da177e4
LT
521 spin_unlock_irqrestore(&iommu->lock, flags);
522}
523
ad7ad57c 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
1da177e4 527{
13fa14e1
DM
528 struct scatterlist *s, *outs, *segstart;
529 unsigned long flags, handle, prot, ctx;
530 dma_addr_t dma_next = 0, dma_addr;
531 unsigned int max_seg_size;
f0880257 532 unsigned long seg_boundary_size;
13fa14e1 533 int outcount, incount, i;
16ce82d8 534 struct strbuf *strbuf;
38192d52 535 struct iommu *iommu;
f0880257 536 unsigned long base_shift;
13fa14e1
DM
537
538 BUG_ON(direction == DMA_NONE);
1da177e4 539
ad7ad57c
DM
540 iommu = dev->archdata.iommu;
541 strbuf = dev->archdata.stc;
13fa14e1
DM
542 if (nelems == 0 || !iommu)
543 return 0;
1da177e4
LT
544
545 spin_lock_irqsave(&iommu->lock, flags);
546
688cb30b
DM
547 ctx = 0;
548 if (iommu->iommu_ctxflush)
549 ctx = iommu_alloc_ctx(iommu);
550
1da177e4 551 if (strbuf->strbuf_enabled)
13fa14e1 552 prot = IOPTE_STREAMING(ctx);
1da177e4 553 else
13fa14e1 554 prot = IOPTE_CONSISTENT(ctx);
ad7ad57c 555 if (direction != DMA_TO_DEVICE)
13fa14e1
DM
556 prot |= IOPTE_WRITE;
557
558 outs = s = segstart = &sglist[0];
559 outcount = 1;
560 incount = nelems;
561 handle = 0;
562
563 /* Init first segment length for backout at failure */
564 outs->dma_length = 0;
565
566 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
567 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
568 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
569 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
13fa14e1 570 for_each_sg(sglist, s, nelems, i) {
f0880257 571 unsigned long paddr, npages, entry, out_entry = 0, slen;
13fa14e1
DM
572 iopte_t *base;
573
574 slen = s->length;
575 /* Sanity check */
576 if (slen == 0) {
577 dma_next = 0;
578 continue;
579 }
580 /* Allocate iommu entries for that segment */
581 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0fcff28f 582 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
13fa14e1
DM
583 entry = iommu_range_alloc(dev, iommu, npages, &handle);
584
585 /* Handle failure */
586 if (unlikely(entry == DMA_ERROR_CODE)) {
587 if (printk_ratelimit())
588 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
589 " npages %lx\n", iommu, paddr, npages);
590 goto iommu_map_failed;
591 }
688cb30b 592
13fa14e1 593 base = iommu->page_table + entry;
1da177e4 594
13fa14e1
DM
595 /* Convert entry to a dma_addr_t */
596 dma_addr = iommu->page_table_map_base +
597 (entry << IO_PAGE_SHIFT);
598 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 599
13fa14e1 600 /* Insert into HW table */
38192d52 601 paddr &= IO_PAGE_MASK;
13fa14e1
DM
602 while (npages--) {
603 iopte_val(*base) = prot | paddr;
38192d52
DM
604 base++;
605 paddr += IO_PAGE_SIZE;
38192d52 606 }
13fa14e1
DM
607
608 /* If we are in an open segment, try merging */
609 if (segstart != s) {
610 /* We cannot merge if:
611 * - allocated dma_addr isn't contiguous to previous allocation
612 */
613 if ((dma_addr != dma_next) ||
f0880257
FT
614 (outs->dma_length + s->length > max_seg_size) ||
615 (is_span_boundary(out_entry, base_shift,
616 seg_boundary_size, outs, s))) {
13fa14e1
DM
617 /* Can't merge: create a new segment */
618 segstart = s;
619 outcount++;
620 outs = sg_next(outs);
621 } else {
622 outs->dma_length += s->length;
623 }
624 }
625
626 if (segstart == s) {
627 /* This is a new segment, fill entries */
628 outs->dma_address = dma_addr;
629 outs->dma_length = slen;
f0880257 630 out_entry = entry;
13fa14e1
DM
631 }
632
633 /* Calculate next page pointer for contiguous check */
634 dma_next = dma_addr + slen;
38192d52
DM
635 }
636
13fa14e1
DM
637 spin_unlock_irqrestore(&iommu->lock, flags);
638
639 if (outcount < incount) {
640 outs = sg_next(outs);
641 outs->dma_address = DMA_ERROR_CODE;
642 outs->dma_length = 0;
643 }
644
645 return outcount;
646
647iommu_map_failed:
648 for_each_sg(sglist, s, nelems, i) {
649 if (s->dma_length != 0) {
6c830fef 650 unsigned long vaddr, npages, entry, j;
13fa14e1
DM
651 iopte_t *base;
652
653 vaddr = s->dma_address & IO_PAGE_MASK;
0fcff28f
JR
654 npages = iommu_num_pages(s->dma_address, s->dma_length,
655 IO_PAGE_SIZE);
13fa14e1
DM
656 iommu_range_free(iommu, vaddr, npages);
657
658 entry = (vaddr - iommu->page_table_map_base)
659 >> IO_PAGE_SHIFT;
660 base = iommu->page_table + entry;
661
6c830fef
DM
662 for (j = 0; j < npages; j++)
663 iopte_make_dummy(iommu, base + j);
13fa14e1
DM
664
665 s->dma_address = DMA_ERROR_CODE;
666 s->dma_length = 0;
667 }
668 if (s == outs)
669 break;
670 }
671 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 672
688cb30b 673 return 0;
1da177e4
LT
674}
675
13fa14e1
DM
676/* If contexts are being used, they are the same in all of the mappings
677 * we make for a particular SG.
678 */
679static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
680{
681 unsigned long ctx = 0;
682
683 if (iommu->iommu_ctxflush) {
684 iopte_t *base;
685 u32 bus_addr;
686
687 bus_addr = sg->dma_address & IO_PAGE_MASK;
688 base = iommu->page_table +
689 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
690
691 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
692 }
693 return ctx;
694}
695
ad7ad57c 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
1da177e4 699{
13fa14e1
DM
700 unsigned long flags, ctx;
701 struct scatterlist *sg;
16ce82d8 702 struct strbuf *strbuf;
38192d52 703 struct iommu *iommu;
1da177e4 704
13fa14e1 705 BUG_ON(direction == DMA_NONE);
1da177e4 706
ad7ad57c
DM
707 iommu = dev->archdata.iommu;
708 strbuf = dev->archdata.stc;
709
13fa14e1 710 ctx = fetch_sg_ctx(iommu, sglist);
1da177e4 711
13fa14e1 712 spin_lock_irqsave(&iommu->lock, flags);
1da177e4 713
13fa14e1
DM
714 sg = sglist;
715 while (nelems--) {
716 dma_addr_t dma_handle = sg->dma_address;
717 unsigned int len = sg->dma_length;
718 unsigned long npages, entry;
719 iopte_t *base;
720 int i;
1da177e4 721
13fa14e1
DM
722 if (!len)
723 break;
0fcff28f 724 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
13fa14e1 725 iommu_range_free(iommu, dma_handle, npages);
1da177e4 726
13fa14e1
DM
727 entry = ((dma_handle - iommu->page_table_map_base)
728 >> IO_PAGE_SHIFT);
729 base = iommu->page_table + entry;
1da177e4 730
13fa14e1
DM
731 dma_handle &= IO_PAGE_MASK;
732 if (strbuf->strbuf_enabled)
733 strbuf_flush(strbuf, iommu, dma_handle, ctx,
734 npages, direction);
1da177e4 735
13fa14e1
DM
736 for (i = 0; i < npages; i++)
737 iopte_make_dummy(iommu, base + i);
1da177e4 738
13fa14e1
DM
739 sg = sg_next(sg);
740 }
1da177e4 741
7c963ad1
DM
742 iommu_free_ctx(iommu, ctx);
743
1da177e4
LT
744 spin_unlock_irqrestore(&iommu->lock, flags);
745}
746
ad7ad57c
DM
747static void dma_4u_sync_single_for_cpu(struct device *dev,
748 dma_addr_t bus_addr, size_t sz,
749 enum dma_data_direction direction)
1da177e4 750{
16ce82d8
DM
751 struct iommu *iommu;
752 struct strbuf *strbuf;
1da177e4
LT
753 unsigned long flags, ctx, npages;
754
ad7ad57c
DM
755 iommu = dev->archdata.iommu;
756 strbuf = dev->archdata.stc;
1da177e4
LT
757
758 if (!strbuf->strbuf_enabled)
759 return;
760
761 spin_lock_irqsave(&iommu->lock, flags);
762
763 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
764 npages >>= IO_PAGE_SHIFT;
765 bus_addr &= IO_PAGE_MASK;
766
767 /* Step 1: Record the context, if any. */
768 ctx = 0;
769 if (iommu->iommu_ctxflush &&
770 strbuf->strbuf_ctxflush) {
771 iopte_t *iopte;
772
773 iopte = iommu->page_table +
774 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
775 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776 }
777
778 /* Step 2: Kick data out of streaming buffers. */
ad7ad57c 779 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
780
781 spin_unlock_irqrestore(&iommu->lock, flags);
782}
783
ad7ad57c
DM
784static void dma_4u_sync_sg_for_cpu(struct device *dev,
785 struct scatterlist *sglist, int nelems,
786 enum dma_data_direction direction)
1da177e4 787{
16ce82d8
DM
788 struct iommu *iommu;
789 struct strbuf *strbuf;
4dbc30fb 790 unsigned long flags, ctx, npages, i;
2c941a20 791 struct scatterlist *sg, *sgprv;
4dbc30fb 792 u32 bus_addr;
1da177e4 793
ad7ad57c
DM
794 iommu = dev->archdata.iommu;
795 strbuf = dev->archdata.stc;
1da177e4
LT
796
797 if (!strbuf->strbuf_enabled)
798 return;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801
802 /* Step 1: Record the context, if any. */
803 ctx = 0;
804 if (iommu->iommu_ctxflush &&
805 strbuf->strbuf_ctxflush) {
806 iopte_t *iopte;
807
808 iopte = iommu->page_table +
809 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811 }
812
813 /* Step 2: Kick data out of streaming buffers. */
4dbc30fb 814 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
2c941a20
JA
815 sgprv = NULL;
816 for_each_sg(sglist, sg, nelems, i) {
817 if (sg->dma_length == 0)
4dbc30fb 818 break;
2c941a20
JA
819 sgprv = sg;
820 }
821
822 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
4dbc30fb 823 - bus_addr) >> IO_PAGE_SHIFT;
ad7ad57c 824 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
825
826 spin_unlock_irqrestore(&iommu->lock, flags);
827}
828
02f7a189 829static struct dma_map_ops sun4u_dma_ops = {
ad7ad57c
DM
830 .alloc_coherent = dma_4u_alloc_coherent,
831 .free_coherent = dma_4u_free_coherent,
797a7568
FT
832 .map_page = dma_4u_map_page,
833 .unmap_page = dma_4u_unmap_page,
ad7ad57c
DM
834 .map_sg = dma_4u_map_sg,
835 .unmap_sg = dma_4u_unmap_sg,
836 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
8f6a93a1
DM
838};
839
02f7a189 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
ad7ad57c 841EXPORT_SYMBOL(dma_ops);
1da177e4 842
ad7ad57c 843int dma_supported(struct device *dev, u64 device_mask)
1da177e4 844{
ad7ad57c
DM
845 struct iommu *iommu = dev->archdata.iommu;
846 u64 dma_addr_mask = iommu->dma_addr_mask;
1da177e4 847
ad7ad57c
DM
848 if (device_mask >= (1UL << 32UL))
849 return 0;
1da177e4 850
ad7ad57c
DM
851 if ((device_mask & dma_addr_mask) == dma_addr_mask)
852 return 1;
1da177e4 853
ad7ad57c
DM
854#ifdef CONFIG_PCI
855 if (dev->bus == &pci_bus_type)
856 return pci_dma_supported(to_pci_dev(dev), device_mask);
857#endif
1da177e4 858
ad7ad57c
DM
859 return 0;
860}
861EXPORT_SYMBOL(dma_supported);
1da177e4 862
ad7ad57c
DM
863int dma_set_mask(struct device *dev, u64 dma_mask)
864{
865#ifdef CONFIG_PCI
866 if (dev->bus == &pci_bus_type)
867 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
868#endif
869 return -EINVAL;
1da177e4 870}
ad7ad57c 871EXPORT_SYMBOL(dma_set_mask);