Commit | Line | Data |
---|---|---|
ad7ad57c | 1 | /* iommu.c: Generic sparc64 IOMMU support. |
1da177e4 | 2 | * |
d284142c | 3 | * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
4 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
066bcaca | 8 | #include <linux/export.h> |
5a0e3ad6 | 9 | #include <linux/slab.h> |
4dbc30fb | 10 | #include <linux/delay.h> |
ad7ad57c DM |
11 | #include <linux/device.h> |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/errno.h> | |
d284142c | 14 | #include <linux/iommu-helper.h> |
a66022c4 | 15 | #include <linux/bitmap.h> |
bb620c3d | 16 | #include <linux/iommu-common.h> |
ad7ad57c DM |
17 | |
18 | #ifdef CONFIG_PCI | |
c57c2ffb | 19 | #include <linux/pci.h> |
ad7ad57c | 20 | #endif |
1da177e4 | 21 | |
ad7ad57c | 22 | #include <asm/iommu.h> |
1da177e4 LT |
23 | |
24 | #include "iommu_common.h" | |
4ac7b826 | 25 | #include "kernel.h" |
1da177e4 | 26 | |
ad7ad57c | 27 | #define STC_CTXMATCH_ADDR(STC, CTX) \ |
1da177e4 | 28 | ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) |
ad7ad57c DM |
29 | #define STC_FLUSHFLAG_INIT(STC) \ |
30 | (*((STC)->strbuf_flushflag) = 0UL) | |
31 | #define STC_FLUSHFLAG_SET(STC) \ | |
32 | (*((STC)->strbuf_flushflag) != 0UL) | |
1da177e4 | 33 | |
ad7ad57c | 34 | #define iommu_read(__reg) \ |
1da177e4 LT |
35 | ({ u64 __ret; \ |
36 | __asm__ __volatile__("ldxa [%1] %2, %0" \ | |
37 | : "=r" (__ret) \ | |
38 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ | |
39 | : "memory"); \ | |
40 | __ret; \ | |
41 | }) | |
ad7ad57c | 42 | #define iommu_write(__reg, __val) \ |
1da177e4 LT |
43 | __asm__ __volatile__("stxa %0, [%1] %2" \ |
44 | : /* no outputs */ \ | |
45 | : "r" (__val), "r" (__reg), \ | |
46 | "i" (ASI_PHYS_BYPASS_EC_E)) | |
47 | ||
48 | /* Must be invoked under the IOMMU lock. */ | |
bb620c3d | 49 | static void iommu_flushall(struct iommu_map_table *iommu_map_table) |
1da177e4 | 50 | { |
bb620c3d | 51 | struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); |
861fe906 | 52 | if (iommu->iommu_flushinv) { |
ad7ad57c | 53 | iommu_write(iommu->iommu_flushinv, ~(u64)0); |
861fe906 DM |
54 | } else { |
55 | unsigned long tag; | |
56 | int entry; | |
1da177e4 | 57 | |
ad7ad57c | 58 | tag = iommu->iommu_tags; |
861fe906 | 59 | for (entry = 0; entry < 16; entry++) { |
ad7ad57c | 60 | iommu_write(tag, 0); |
861fe906 DM |
61 | tag += 8; |
62 | } | |
1da177e4 | 63 | |
861fe906 | 64 | /* Ensure completion of previous PIO writes. */ |
ad7ad57c | 65 | (void) iommu_read(iommu->write_complete_reg); |
861fe906 | 66 | } |
1da177e4 LT |
67 | } |
68 | ||
69 | #define IOPTE_CONSISTENT(CTX) \ | |
70 | (IOPTE_VALID | IOPTE_CACHE | \ | |
71 | (((CTX) << 47) & IOPTE_CONTEXT)) | |
72 | ||
73 | #define IOPTE_STREAMING(CTX) \ | |
74 | (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) | |
75 | ||
76 | /* Existing mappings are never marked invalid, instead they | |
77 | * are pointed to a dummy page. | |
78 | */ | |
79 | #define IOPTE_IS_DUMMY(iommu, iopte) \ | |
80 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) | |
81 | ||
16ce82d8 | 82 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) |
1da177e4 LT |
83 | { |
84 | unsigned long val = iopte_val(*iopte); | |
85 | ||
86 | val &= ~IOPTE_PAGE; | |
87 | val |= iommu->dummy_page_pa; | |
88 | ||
89 | iopte_val(*iopte) = val; | |
90 | } | |
91 | ||
ad7ad57c | 92 | int iommu_table_init(struct iommu *iommu, int tsbsize, |
c1b1a5f1 DM |
93 | u32 dma_offset, u32 dma_addr_mask, |
94 | int numa_node) | |
1da177e4 | 95 | { |
c1b1a5f1 DM |
96 | unsigned long i, order, sz, num_tsb_entries; |
97 | struct page *page; | |
688cb30b DM |
98 | |
99 | num_tsb_entries = tsbsize / sizeof(iopte_t); | |
51e85136 DM |
100 | |
101 | /* Setup initial software IOMMU state. */ | |
102 | spin_lock_init(&iommu->lock); | |
103 | iommu->ctx_lowest_free = 1; | |
bb620c3d | 104 | iommu->tbl.table_map_base = dma_offset; |
51e85136 DM |
105 | iommu->dma_addr_mask = dma_addr_mask; |
106 | ||
688cb30b DM |
107 | /* Allocate and initialize the free area map. */ |
108 | sz = num_tsb_entries / 8; | |
109 | sz = (sz + 7UL) & ~7UL; | |
bb620c3d SV |
110 | iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); |
111 | if (!iommu->tbl.map) | |
ad7ad57c | 112 | return -ENOMEM; |
bb620c3d | 113 | memset(iommu->tbl.map, 0, sz); |
f1600e54 | 114 | |
bb620c3d SV |
115 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
116 | (tlb_type != hypervisor ? iommu_flushall : NULL), | |
117 | false, 1, false); | |
d284142c | 118 | |
51e85136 DM |
119 | /* Allocate and initialize the dummy page which we |
120 | * set inactive IO PTEs to point to. | |
121 | */ | |
c1b1a5f1 DM |
122 | page = alloc_pages_node(numa_node, GFP_KERNEL, 0); |
123 | if (!page) { | |
ad7ad57c DM |
124 | printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); |
125 | goto out_free_map; | |
51e85136 | 126 | } |
c1b1a5f1 DM |
127 | iommu->dummy_page = (unsigned long) page_address(page); |
128 | memset((void *)iommu->dummy_page, 0, PAGE_SIZE); | |
51e85136 DM |
129 | iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); |
130 | ||
131 | /* Now allocate and setup the IOMMU page table itself. */ | |
132 | order = get_order(tsbsize); | |
c1b1a5f1 DM |
133 | page = alloc_pages_node(numa_node, GFP_KERNEL, order); |
134 | if (!page) { | |
ad7ad57c DM |
135 | printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); |
136 | goto out_free_dummy_page; | |
51e85136 | 137 | } |
c1b1a5f1 | 138 | iommu->page_table = (iopte_t *)page_address(page); |
1da177e4 | 139 | |
688cb30b | 140 | for (i = 0; i < num_tsb_entries; i++) |
1da177e4 | 141 | iopte_make_dummy(iommu, &iommu->page_table[i]); |
ad7ad57c DM |
142 | |
143 | return 0; | |
144 | ||
145 | out_free_dummy_page: | |
146 | free_page(iommu->dummy_page); | |
147 | iommu->dummy_page = 0UL; | |
148 | ||
149 | out_free_map: | |
bb620c3d SV |
150 | kfree(iommu->tbl.map); |
151 | iommu->tbl.map = NULL; | |
ad7ad57c DM |
152 | |
153 | return -ENOMEM; | |
1da177e4 LT |
154 | } |
155 | ||
bb620c3d SV |
156 | static inline iopte_t *alloc_npages(struct device *dev, |
157 | struct iommu *iommu, | |
d284142c | 158 | unsigned long npages) |
1da177e4 | 159 | { |
d284142c | 160 | unsigned long entry; |
1da177e4 | 161 | |
bb620c3d SV |
162 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
163 | (unsigned long)(-1), 0); | |
d618382b | 164 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
688cb30b | 165 | return NULL; |
1da177e4 | 166 | |
688cb30b | 167 | return iommu->page_table + entry; |
1da177e4 LT |
168 | } |
169 | ||
16ce82d8 | 170 | static int iommu_alloc_ctx(struct iommu *iommu) |
7c963ad1 DM |
171 | { |
172 | int lowest = iommu->ctx_lowest_free; | |
711c71a0 | 173 | int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); |
7c963ad1 | 174 | |
711c71a0 | 175 | if (unlikely(n == IOMMU_NUM_CTXS)) { |
7c963ad1 DM |
176 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); |
177 | if (unlikely(n == lowest)) { | |
178 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | |
179 | n = 0; | |
180 | } | |
181 | } | |
182 | if (n) | |
183 | __set_bit(n, iommu->ctx_bitmap); | |
184 | ||
185 | return n; | |
186 | } | |
187 | ||
16ce82d8 | 188 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) |
7c963ad1 DM |
189 | { |
190 | if (likely(ctx)) { | |
191 | __clear_bit(ctx, iommu->ctx_bitmap); | |
192 | if (ctx < iommu->ctx_lowest_free) | |
193 | iommu->ctx_lowest_free = ctx; | |
194 | } | |
195 | } | |
196 | ||
ad7ad57c | 197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, |
c416258a AP |
198 | dma_addr_t *dma_addrp, gfp_t gfp, |
199 | struct dma_attrs *attrs) | |
1da177e4 | 200 | { |
bb620c3d | 201 | unsigned long order, first_page; |
16ce82d8 | 202 | struct iommu *iommu; |
c1b1a5f1 DM |
203 | struct page *page; |
204 | int npages, nid; | |
1da177e4 | 205 | iopte_t *iopte; |
1da177e4 | 206 | void *ret; |
1da177e4 LT |
207 | |
208 | size = IO_PAGE_ALIGN(size); | |
209 | order = get_order(size); | |
210 | if (order >= 10) | |
211 | return NULL; | |
212 | ||
c1b1a5f1 DM |
213 | nid = dev->archdata.numa_node; |
214 | page = alloc_pages_node(nid, gfp, order); | |
215 | if (unlikely(!page)) | |
1da177e4 | 216 | return NULL; |
c1b1a5f1 DM |
217 | |
218 | first_page = (unsigned long) page_address(page); | |
1da177e4 LT |
219 | memset((char *)first_page, 0, PAGE_SIZE << order); |
220 | ||
ad7ad57c | 221 | iommu = dev->archdata.iommu; |
1da177e4 | 222 | |
d284142c | 223 | iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); |
688cb30b DM |
224 | |
225 | if (unlikely(iopte == NULL)) { | |
1da177e4 LT |
226 | free_pages(first_page, order); |
227 | return NULL; | |
228 | } | |
229 | ||
bb620c3d | 230 | *dma_addrp = (iommu->tbl.table_map_base + |
1da177e4 LT |
231 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); |
232 | ret = (void *) first_page; | |
233 | npages = size >> IO_PAGE_SHIFT; | |
1da177e4 LT |
234 | first_page = __pa(first_page); |
235 | while (npages--) { | |
688cb30b | 236 | iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | |
1da177e4 LT |
237 | IOPTE_WRITE | |
238 | (first_page & IOPTE_PAGE)); | |
239 | iopte++; | |
240 | first_page += IO_PAGE_SIZE; | |
241 | } | |
242 | ||
1da177e4 LT |
243 | return ret; |
244 | } | |
245 | ||
ad7ad57c | 246 | static void dma_4u_free_coherent(struct device *dev, size_t size, |
c416258a AP |
247 | void *cpu, dma_addr_t dvma, |
248 | struct dma_attrs *attrs) | |
1da177e4 | 249 | { |
16ce82d8 | 250 | struct iommu *iommu; |
bb620c3d | 251 | unsigned long order, npages; |
1da177e4 LT |
252 | |
253 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
ad7ad57c | 254 | iommu = dev->archdata.iommu; |
1da177e4 | 255 | |
d618382b | 256 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); |
1da177e4 LT |
257 | |
258 | order = get_order(size); | |
259 | if (order < 10) | |
260 | free_pages((unsigned long)cpu, order); | |
261 | } | |
262 | ||
797a7568 FT |
263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
264 | unsigned long offset, size_t sz, | |
bc0a14f1 FT |
265 | enum dma_data_direction direction, |
266 | struct dma_attrs *attrs) | |
1da177e4 | 267 | { |
16ce82d8 DM |
268 | struct iommu *iommu; |
269 | struct strbuf *strbuf; | |
1da177e4 LT |
270 | iopte_t *base; |
271 | unsigned long flags, npages, oaddr; | |
272 | unsigned long i, base_paddr, ctx; | |
273 | u32 bus_addr, ret; | |
274 | unsigned long iopte_protection; | |
275 | ||
ad7ad57c DM |
276 | iommu = dev->archdata.iommu; |
277 | strbuf = dev->archdata.stc; | |
1da177e4 | 278 | |
ad7ad57c | 279 | if (unlikely(direction == DMA_NONE)) |
688cb30b | 280 | goto bad_no_ctx; |
1da177e4 | 281 | |
797a7568 | 282 | oaddr = (unsigned long)(page_address(page) + offset); |
1da177e4 LT |
283 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
284 | npages >>= IO_PAGE_SHIFT; | |
285 | ||
c12f048f | 286 | base = alloc_npages(dev, iommu, npages); |
bb620c3d | 287 | spin_lock_irqsave(&iommu->lock, flags); |
688cb30b DM |
288 | ctx = 0; |
289 | if (iommu->iommu_ctxflush) | |
290 | ctx = iommu_alloc_ctx(iommu); | |
291 | spin_unlock_irqrestore(&iommu->lock, flags); | |
1da177e4 | 292 | |
688cb30b | 293 | if (unlikely(!base)) |
1da177e4 | 294 | goto bad; |
688cb30b | 295 | |
bb620c3d | 296 | bus_addr = (iommu->tbl.table_map_base + |
1da177e4 LT |
297 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); |
298 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
299 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
1da177e4 LT |
300 | if (strbuf->strbuf_enabled) |
301 | iopte_protection = IOPTE_STREAMING(ctx); | |
302 | else | |
303 | iopte_protection = IOPTE_CONSISTENT(ctx); | |
ad7ad57c | 304 | if (direction != DMA_TO_DEVICE) |
1da177e4 LT |
305 | iopte_protection |= IOPTE_WRITE; |
306 | ||
307 | for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) | |
308 | iopte_val(*base) = iopte_protection | base_paddr; | |
309 | ||
1da177e4 LT |
310 | return ret; |
311 | ||
312 | bad: | |
688cb30b DM |
313 | iommu_free_ctx(iommu, ctx); |
314 | bad_no_ctx: | |
315 | if (printk_ratelimit()) | |
316 | WARN_ON(1); | |
ad7ad57c | 317 | return DMA_ERROR_CODE; |
1da177e4 LT |
318 | } |
319 | ||
ad7ad57c DM |
320 | static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, |
321 | u32 vaddr, unsigned long ctx, unsigned long npages, | |
322 | enum dma_data_direction direction) | |
4dbc30fb DM |
323 | { |
324 | int limit; | |
325 | ||
4dbc30fb DM |
326 | if (strbuf->strbuf_ctxflush && |
327 | iommu->iommu_ctxflush) { | |
328 | unsigned long matchreg, flushreg; | |
7c963ad1 | 329 | u64 val; |
4dbc30fb DM |
330 | |
331 | flushreg = strbuf->strbuf_ctxflush; | |
ad7ad57c | 332 | matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); |
4dbc30fb | 333 | |
ad7ad57c DM |
334 | iommu_write(flushreg, ctx); |
335 | val = iommu_read(matchreg); | |
88314ee7 DM |
336 | val &= 0xffff; |
337 | if (!val) | |
7c963ad1 DM |
338 | goto do_flush_sync; |
339 | ||
7c963ad1 DM |
340 | while (val) { |
341 | if (val & 0x1) | |
ad7ad57c | 342 | iommu_write(flushreg, ctx); |
7c963ad1 | 343 | val >>= 1; |
a228dfd5 | 344 | } |
ad7ad57c | 345 | val = iommu_read(matchreg); |
7c963ad1 | 346 | if (unlikely(val)) { |
ad7ad57c | 347 | printk(KERN_WARNING "strbuf_flush: ctx flush " |
90181136 | 348 | "timeout matchreg[%llx] ctx[%lx]\n", |
7c963ad1 DM |
349 | val, ctx); |
350 | goto do_page_flush; | |
351 | } | |
4dbc30fb DM |
352 | } else { |
353 | unsigned long i; | |
354 | ||
7c963ad1 | 355 | do_page_flush: |
4dbc30fb | 356 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) |
ad7ad57c | 357 | iommu_write(strbuf->strbuf_pflush, vaddr); |
4dbc30fb DM |
358 | } |
359 | ||
7c963ad1 DM |
360 | do_flush_sync: |
361 | /* If the device could not have possibly put dirty data into | |
362 | * the streaming cache, no flush-flag synchronization needs | |
363 | * to be performed. | |
364 | */ | |
ad7ad57c | 365 | if (direction == DMA_TO_DEVICE) |
7c963ad1 DM |
366 | return; |
367 | ||
ad7ad57c DM |
368 | STC_FLUSHFLAG_INIT(strbuf); |
369 | iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | |
370 | (void) iommu_read(iommu->write_complete_reg); | |
4dbc30fb | 371 | |
a228dfd5 | 372 | limit = 100000; |
ad7ad57c | 373 | while (!STC_FLUSHFLAG_SET(strbuf)) { |
4dbc30fb DM |
374 | limit--; |
375 | if (!limit) | |
376 | break; | |
a228dfd5 | 377 | udelay(1); |
4f07118f | 378 | rmb(); |
4dbc30fb DM |
379 | } |
380 | if (!limit) | |
ad7ad57c | 381 | printk(KERN_WARNING "strbuf_flush: flushflag timeout " |
4dbc30fb DM |
382 | "vaddr[%08x] ctx[%lx] npages[%ld]\n", |
383 | vaddr, ctx, npages); | |
384 | } | |
385 | ||
797a7568 | 386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
bc0a14f1 FT |
387 | size_t sz, enum dma_data_direction direction, |
388 | struct dma_attrs *attrs) | |
1da177e4 | 389 | { |
16ce82d8 DM |
390 | struct iommu *iommu; |
391 | struct strbuf *strbuf; | |
1da177e4 | 392 | iopte_t *base; |
688cb30b | 393 | unsigned long flags, npages, ctx, i; |
1da177e4 | 394 | |
ad7ad57c | 395 | if (unlikely(direction == DMA_NONE)) { |
688cb30b DM |
396 | if (printk_ratelimit()) |
397 | WARN_ON(1); | |
398 | return; | |
399 | } | |
1da177e4 | 400 | |
ad7ad57c DM |
401 | iommu = dev->archdata.iommu; |
402 | strbuf = dev->archdata.stc; | |
1da177e4 LT |
403 | |
404 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
405 | npages >>= IO_PAGE_SHIFT; | |
406 | base = iommu->page_table + | |
bb620c3d | 407 | ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
1da177e4 LT |
408 | bus_addr &= IO_PAGE_MASK; |
409 | ||
410 | spin_lock_irqsave(&iommu->lock, flags); | |
411 | ||
412 | /* Record the context, if any. */ | |
413 | ctx = 0; | |
414 | if (iommu->iommu_ctxflush) | |
415 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | |
416 | ||
417 | /* Step 1: Kick data out of streaming buffers if necessary. */ | |
4dbc30fb | 418 | if (strbuf->strbuf_enabled) |
ad7ad57c DM |
419 | strbuf_flush(strbuf, iommu, bus_addr, ctx, |
420 | npages, direction); | |
1da177e4 | 421 | |
688cb30b DM |
422 | /* Step 2: Clear out TSB entries. */ |
423 | for (i = 0; i < npages; i++) | |
424 | iopte_make_dummy(iommu, base + i); | |
1da177e4 | 425 | |
7c963ad1 | 426 | iommu_free_ctx(iommu, ctx); |
c12f048f | 427 | spin_unlock_irqrestore(&iommu->lock, flags); |
bb620c3d | 428 | |
d618382b | 429 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
1da177e4 LT |
430 | } |
431 | ||
ad7ad57c | 432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
bc0a14f1 FT |
433 | int nelems, enum dma_data_direction direction, |
434 | struct dma_attrs *attrs) | |
1da177e4 | 435 | { |
13fa14e1 DM |
436 | struct scatterlist *s, *outs, *segstart; |
437 | unsigned long flags, handle, prot, ctx; | |
438 | dma_addr_t dma_next = 0, dma_addr; | |
439 | unsigned int max_seg_size; | |
f0880257 | 440 | unsigned long seg_boundary_size; |
13fa14e1 | 441 | int outcount, incount, i; |
16ce82d8 | 442 | struct strbuf *strbuf; |
38192d52 | 443 | struct iommu *iommu; |
f0880257 | 444 | unsigned long base_shift; |
13fa14e1 DM |
445 | |
446 | BUG_ON(direction == DMA_NONE); | |
1da177e4 | 447 | |
ad7ad57c DM |
448 | iommu = dev->archdata.iommu; |
449 | strbuf = dev->archdata.stc; | |
13fa14e1 DM |
450 | if (nelems == 0 || !iommu) |
451 | return 0; | |
1da177e4 LT |
452 | |
453 | spin_lock_irqsave(&iommu->lock, flags); | |
454 | ||
688cb30b DM |
455 | ctx = 0; |
456 | if (iommu->iommu_ctxflush) | |
457 | ctx = iommu_alloc_ctx(iommu); | |
458 | ||
1da177e4 | 459 | if (strbuf->strbuf_enabled) |
13fa14e1 | 460 | prot = IOPTE_STREAMING(ctx); |
1da177e4 | 461 | else |
13fa14e1 | 462 | prot = IOPTE_CONSISTENT(ctx); |
ad7ad57c | 463 | if (direction != DMA_TO_DEVICE) |
13fa14e1 DM |
464 | prot |= IOPTE_WRITE; |
465 | ||
466 | outs = s = segstart = &sglist[0]; | |
467 | outcount = 1; | |
468 | incount = nelems; | |
469 | handle = 0; | |
470 | ||
471 | /* Init first segment length for backout at failure */ | |
472 | outs->dma_length = 0; | |
473 | ||
474 | max_seg_size = dma_get_max_seg_size(dev); | |
f0880257 FT |
475 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
476 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | |
bb620c3d | 477 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; |
13fa14e1 | 478 | for_each_sg(sglist, s, nelems, i) { |
f0880257 | 479 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
13fa14e1 DM |
480 | iopte_t *base; |
481 | ||
482 | slen = s->length; | |
483 | /* Sanity check */ | |
484 | if (slen == 0) { | |
485 | dma_next = 0; | |
486 | continue; | |
487 | } | |
488 | /* Allocate iommu entries for that segment */ | |
489 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | |
0fcff28f | 490 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); |
bb620c3d SV |
491 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, |
492 | &handle, (unsigned long)(-1), 0); | |
13fa14e1 DM |
493 | |
494 | /* Handle failure */ | |
d618382b | 495 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
13fa14e1 DM |
496 | if (printk_ratelimit()) |
497 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | |
498 | " npages %lx\n", iommu, paddr, npages); | |
499 | goto iommu_map_failed; | |
500 | } | |
688cb30b | 501 | |
13fa14e1 | 502 | base = iommu->page_table + entry; |
1da177e4 | 503 | |
13fa14e1 | 504 | /* Convert entry to a dma_addr_t */ |
bb620c3d | 505 | dma_addr = iommu->tbl.table_map_base + |
13fa14e1 DM |
506 | (entry << IO_PAGE_SHIFT); |
507 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | |
38192d52 | 508 | |
13fa14e1 | 509 | /* Insert into HW table */ |
38192d52 | 510 | paddr &= IO_PAGE_MASK; |
13fa14e1 DM |
511 | while (npages--) { |
512 | iopte_val(*base) = prot | paddr; | |
38192d52 DM |
513 | base++; |
514 | paddr += IO_PAGE_SIZE; | |
38192d52 | 515 | } |
13fa14e1 DM |
516 | |
517 | /* If we are in an open segment, try merging */ | |
518 | if (segstart != s) { | |
519 | /* We cannot merge if: | |
520 | * - allocated dma_addr isn't contiguous to previous allocation | |
521 | */ | |
522 | if ((dma_addr != dma_next) || | |
f0880257 FT |
523 | (outs->dma_length + s->length > max_seg_size) || |
524 | (is_span_boundary(out_entry, base_shift, | |
525 | seg_boundary_size, outs, s))) { | |
13fa14e1 DM |
526 | /* Can't merge: create a new segment */ |
527 | segstart = s; | |
528 | outcount++; | |
529 | outs = sg_next(outs); | |
530 | } else { | |
531 | outs->dma_length += s->length; | |
532 | } | |
533 | } | |
534 | ||
535 | if (segstart == s) { | |
536 | /* This is a new segment, fill entries */ | |
537 | outs->dma_address = dma_addr; | |
538 | outs->dma_length = slen; | |
f0880257 | 539 | out_entry = entry; |
13fa14e1 DM |
540 | } |
541 | ||
542 | /* Calculate next page pointer for contiguous check */ | |
543 | dma_next = dma_addr + slen; | |
38192d52 DM |
544 | } |
545 | ||
13fa14e1 DM |
546 | spin_unlock_irqrestore(&iommu->lock, flags); |
547 | ||
548 | if (outcount < incount) { | |
549 | outs = sg_next(outs); | |
550 | outs->dma_address = DMA_ERROR_CODE; | |
551 | outs->dma_length = 0; | |
552 | } | |
553 | ||
554 | return outcount; | |
555 | ||
556 | iommu_map_failed: | |
557 | for_each_sg(sglist, s, nelems, i) { | |
558 | if (s->dma_length != 0) { | |
6c830fef | 559 | unsigned long vaddr, npages, entry, j; |
13fa14e1 DM |
560 | iopte_t *base; |
561 | ||
562 | vaddr = s->dma_address & IO_PAGE_MASK; | |
0fcff28f JR |
563 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
564 | IO_PAGE_SIZE); | |
13fa14e1 | 565 | |
bb620c3d | 566 | entry = (vaddr - iommu->tbl.table_map_base) |
13fa14e1 DM |
567 | >> IO_PAGE_SHIFT; |
568 | base = iommu->page_table + entry; | |
569 | ||
6c830fef DM |
570 | for (j = 0; j < npages; j++) |
571 | iopte_make_dummy(iommu, base + j); | |
13fa14e1 | 572 | |
bb620c3d | 573 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
d618382b | 574 | IOMMU_ERROR_CODE); |
bb620c3d | 575 | |
13fa14e1 DM |
576 | s->dma_address = DMA_ERROR_CODE; |
577 | s->dma_length = 0; | |
578 | } | |
579 | if (s == outs) | |
580 | break; | |
581 | } | |
582 | spin_unlock_irqrestore(&iommu->lock, flags); | |
1da177e4 | 583 | |
688cb30b | 584 | return 0; |
1da177e4 LT |
585 | } |
586 | ||
13fa14e1 DM |
587 | /* If contexts are being used, they are the same in all of the mappings |
588 | * we make for a particular SG. | |
589 | */ | |
c12f048f | 590 | static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) |
13fa14e1 DM |
591 | { |
592 | unsigned long ctx = 0; | |
593 | ||
594 | if (iommu->iommu_ctxflush) { | |
595 | iopte_t *base; | |
596 | u32 bus_addr; | |
bb620c3d | 597 | struct iommu_map_table *tbl = &iommu->tbl; |
13fa14e1 DM |
598 | |
599 | bus_addr = sg->dma_address & IO_PAGE_MASK; | |
600 | base = iommu->page_table + | |
bb620c3d | 601 | ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); |
13fa14e1 DM |
602 | |
603 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | |
604 | } | |
605 | return ctx; | |
606 | } | |
607 | ||
ad7ad57c | 608 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
bc0a14f1 FT |
609 | int nelems, enum dma_data_direction direction, |
610 | struct dma_attrs *attrs) | |
1da177e4 | 611 | { |
13fa14e1 DM |
612 | unsigned long flags, ctx; |
613 | struct scatterlist *sg; | |
16ce82d8 | 614 | struct strbuf *strbuf; |
38192d52 | 615 | struct iommu *iommu; |
1da177e4 | 616 | |
13fa14e1 | 617 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 618 | |
ad7ad57c DM |
619 | iommu = dev->archdata.iommu; |
620 | strbuf = dev->archdata.stc; | |
621 | ||
13fa14e1 | 622 | ctx = fetch_sg_ctx(iommu, sglist); |
1da177e4 | 623 | |
13fa14e1 | 624 | spin_lock_irqsave(&iommu->lock, flags); |
1da177e4 | 625 | |
13fa14e1 DM |
626 | sg = sglist; |
627 | while (nelems--) { | |
628 | dma_addr_t dma_handle = sg->dma_address; | |
629 | unsigned int len = sg->dma_length; | |
630 | unsigned long npages, entry; | |
631 | iopte_t *base; | |
632 | int i; | |
1da177e4 | 633 | |
13fa14e1 DM |
634 | if (!len) |
635 | break; | |
0fcff28f | 636 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); |
1da177e4 | 637 | |
bb620c3d | 638 | entry = ((dma_handle - iommu->tbl.table_map_base) |
13fa14e1 DM |
639 | >> IO_PAGE_SHIFT); |
640 | base = iommu->page_table + entry; | |
1da177e4 | 641 | |
13fa14e1 DM |
642 | dma_handle &= IO_PAGE_MASK; |
643 | if (strbuf->strbuf_enabled) | |
644 | strbuf_flush(strbuf, iommu, dma_handle, ctx, | |
645 | npages, direction); | |
1da177e4 | 646 | |
13fa14e1 DM |
647 | for (i = 0; i < npages; i++) |
648 | iopte_make_dummy(iommu, base + i); | |
1da177e4 | 649 | |
bb620c3d | 650 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
d618382b | 651 | IOMMU_ERROR_CODE); |
13fa14e1 DM |
652 | sg = sg_next(sg); |
653 | } | |
1da177e4 | 654 | |
7c963ad1 DM |
655 | iommu_free_ctx(iommu, ctx); |
656 | ||
1da177e4 LT |
657 | spin_unlock_irqrestore(&iommu->lock, flags); |
658 | } | |
659 | ||
ad7ad57c DM |
660 | static void dma_4u_sync_single_for_cpu(struct device *dev, |
661 | dma_addr_t bus_addr, size_t sz, | |
662 | enum dma_data_direction direction) | |
1da177e4 | 663 | { |
16ce82d8 DM |
664 | struct iommu *iommu; |
665 | struct strbuf *strbuf; | |
1da177e4 LT |
666 | unsigned long flags, ctx, npages; |
667 | ||
ad7ad57c DM |
668 | iommu = dev->archdata.iommu; |
669 | strbuf = dev->archdata.stc; | |
1da177e4 LT |
670 | |
671 | if (!strbuf->strbuf_enabled) | |
672 | return; | |
673 | ||
674 | spin_lock_irqsave(&iommu->lock, flags); | |
675 | ||
676 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
677 | npages >>= IO_PAGE_SHIFT; | |
678 | bus_addr &= IO_PAGE_MASK; | |
679 | ||
680 | /* Step 1: Record the context, if any. */ | |
681 | ctx = 0; | |
682 | if (iommu->iommu_ctxflush && | |
683 | strbuf->strbuf_ctxflush) { | |
684 | iopte_t *iopte; | |
bb620c3d | 685 | struct iommu_map_table *tbl = &iommu->tbl; |
1da177e4 LT |
686 | |
687 | iopte = iommu->page_table + | |
bb620c3d | 688 | ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); |
1da177e4 LT |
689 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
690 | } | |
691 | ||
692 | /* Step 2: Kick data out of streaming buffers. */ | |
ad7ad57c | 693 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
1da177e4 LT |
694 | |
695 | spin_unlock_irqrestore(&iommu->lock, flags); | |
696 | } | |
697 | ||
ad7ad57c DM |
698 | static void dma_4u_sync_sg_for_cpu(struct device *dev, |
699 | struct scatterlist *sglist, int nelems, | |
700 | enum dma_data_direction direction) | |
1da177e4 | 701 | { |
16ce82d8 DM |
702 | struct iommu *iommu; |
703 | struct strbuf *strbuf; | |
4dbc30fb | 704 | unsigned long flags, ctx, npages, i; |
2c941a20 | 705 | struct scatterlist *sg, *sgprv; |
4dbc30fb | 706 | u32 bus_addr; |
1da177e4 | 707 | |
ad7ad57c DM |
708 | iommu = dev->archdata.iommu; |
709 | strbuf = dev->archdata.stc; | |
1da177e4 LT |
710 | |
711 | if (!strbuf->strbuf_enabled) | |
712 | return; | |
713 | ||
714 | spin_lock_irqsave(&iommu->lock, flags); | |
715 | ||
716 | /* Step 1: Record the context, if any. */ | |
717 | ctx = 0; | |
718 | if (iommu->iommu_ctxflush && | |
719 | strbuf->strbuf_ctxflush) { | |
720 | iopte_t *iopte; | |
bb620c3d | 721 | struct iommu_map_table *tbl = &iommu->tbl; |
1da177e4 | 722 | |
bb620c3d SV |
723 | iopte = iommu->page_table + ((sglist[0].dma_address - |
724 | tbl->table_map_base) >> IO_PAGE_SHIFT); | |
1da177e4 LT |
725 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
726 | } | |
727 | ||
728 | /* Step 2: Kick data out of streaming buffers. */ | |
4dbc30fb | 729 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
2c941a20 JA |
730 | sgprv = NULL; |
731 | for_each_sg(sglist, sg, nelems, i) { | |
732 | if (sg->dma_length == 0) | |
4dbc30fb | 733 | break; |
2c941a20 JA |
734 | sgprv = sg; |
735 | } | |
736 | ||
737 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) | |
4dbc30fb | 738 | - bus_addr) >> IO_PAGE_SHIFT; |
ad7ad57c | 739 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
1da177e4 LT |
740 | |
741 | spin_unlock_irqrestore(&iommu->lock, flags); | |
742 | } | |
743 | ||
02f7a189 | 744 | static struct dma_map_ops sun4u_dma_ops = { |
c416258a AP |
745 | .alloc = dma_4u_alloc_coherent, |
746 | .free = dma_4u_free_coherent, | |
797a7568 FT |
747 | .map_page = dma_4u_map_page, |
748 | .unmap_page = dma_4u_unmap_page, | |
ad7ad57c DM |
749 | .map_sg = dma_4u_map_sg, |
750 | .unmap_sg = dma_4u_unmap_sg, | |
751 | .sync_single_for_cpu = dma_4u_sync_single_for_cpu, | |
752 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, | |
8f6a93a1 DM |
753 | }; |
754 | ||
02f7a189 | 755 | struct dma_map_ops *dma_ops = &sun4u_dma_ops; |
ad7ad57c | 756 | EXPORT_SYMBOL(dma_ops); |
1da177e4 | 757 | |
ad7ad57c | 758 | int dma_supported(struct device *dev, u64 device_mask) |
1da177e4 | 759 | { |
ad7ad57c DM |
760 | struct iommu *iommu = dev->archdata.iommu; |
761 | u64 dma_addr_mask = iommu->dma_addr_mask; | |
1da177e4 | 762 | |
ad7ad57c DM |
763 | if (device_mask >= (1UL << 32UL)) |
764 | return 0; | |
1da177e4 | 765 | |
ad7ad57c DM |
766 | if ((device_mask & dma_addr_mask) == dma_addr_mask) |
767 | return 1; | |
1da177e4 | 768 | |
ad7ad57c | 769 | #ifdef CONFIG_PCI |
bf70053c | 770 | if (dev_is_pci(dev)) |
ee664a92 | 771 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
ad7ad57c | 772 | #endif |
1da177e4 | 773 | |
ad7ad57c DM |
774 | return 0; |
775 | } | |
776 | EXPORT_SYMBOL(dma_supported); |