Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
3 | * | |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. | |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
6 | * with more than 4GB. | |
7 | * | |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
9 | * | |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
ff7f3649 | 11 | * Subject to the GNU General Public License v2 only. |
1da177e4 LT |
12 | */ |
13 | ||
1da177e4 LT |
14 | #include <linux/types.h> |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/topology.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/bitops.h> | |
1eeb66a1 | 26 | #include <linux/kdebug.h> |
9ee1bea4 | 27 | #include <linux/scatterlist.h> |
1da177e4 LT |
28 | #include <asm/atomic.h> |
29 | #include <asm/io.h> | |
30 | #include <asm/mtrr.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/proto.h> | |
f2cf8e08 | 33 | #include <asm/iommu.h> |
1da177e4 | 34 | #include <asm/cacheflush.h> |
17a941d8 MBY |
35 | #include <asm/swiotlb.h> |
36 | #include <asm/dma.h> | |
a32073bf | 37 | #include <asm/k8.h> |
1da177e4 LT |
38 | |
39 | unsigned long iommu_bus_base; /* GART remapping area (physical) */ | |
40 | static unsigned long iommu_size; /* size of remapping area bytes */ | |
41 | static unsigned long iommu_pages; /* .. and in pages */ | |
42 | ||
43 | u32 *iommu_gatt_base; /* Remapping table */ | |
44 | ||
1da177e4 LT |
45 | /* If this is disabled the IOMMU will use an optimized flushing strategy |
46 | of only flushing when an mapping is reused. With it true the GART is flushed | |
47 | for every mapping. Problem is that doing the lazy flush seems to trigger | |
48 | bugs with some popular PCI cards, in particular 3ware (but has been also | |
49 | also seen with Qlogic at least). */ | |
50 | int iommu_fullflush = 1; | |
51 | ||
1da177e4 LT |
52 | /* Allocation bitmap for the remapping area */ |
53 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | |
54 | static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ | |
55 | ||
56 | static u32 gart_unmapped_entry; | |
57 | ||
58 | #define GPTE_VALID 1 | |
59 | #define GPTE_COHERENT 2 | |
60 | #define GPTE_ENCODE(x) \ | |
61 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
62 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
63 | ||
64 | #define to_pages(addr,size) \ | |
65 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | |
66 | ||
1da177e4 LT |
67 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
68 | ||
69 | #ifdef CONFIG_AGP | |
70 | #define AGPEXTERN extern | |
71 | #else | |
72 | #define AGPEXTERN | |
73 | #endif | |
74 | ||
75 | /* backdoor interface to AGP driver */ | |
76 | AGPEXTERN int agp_memory_reserved; | |
77 | AGPEXTERN __u32 *agp_gatt_table; | |
78 | ||
79 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
80 | static int need_flush; /* global flush state. set for each gart wrap */ | |
1da177e4 LT |
81 | |
82 | static unsigned long alloc_iommu(int size) | |
83 | { | |
84 | unsigned long offset, flags; | |
85 | ||
86 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
87 | offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); | |
88 | if (offset == -1) { | |
89 | need_flush = 1; | |
f5adc9c7 | 90 | offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size); |
1da177e4 LT |
91 | } |
92 | if (offset != -1) { | |
93 | set_bit_string(iommu_gart_bitmap, offset, size); | |
94 | next_bit = offset+size; | |
95 | if (next_bit >= iommu_pages) { | |
96 | next_bit = 0; | |
97 | need_flush = 1; | |
98 | } | |
99 | } | |
100 | if (iommu_fullflush) | |
101 | need_flush = 1; | |
102 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
103 | return offset; | |
104 | } | |
105 | ||
106 | static void free_iommu(unsigned long offset, int size) | |
107 | { | |
108 | unsigned long flags; | |
1da177e4 LT |
109 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
110 | __clear_bit_string(iommu_gart_bitmap, offset, size); | |
111 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
112 | } | |
113 | ||
114 | /* | |
115 | * Use global flush state to avoid races with multiple flushers. | |
116 | */ | |
a32073bf | 117 | static void flush_gart(void) |
1da177e4 LT |
118 | { |
119 | unsigned long flags; | |
1da177e4 | 120 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf AK |
121 | if (need_flush) { |
122 | k8_flush_garts(); | |
1da177e4 LT |
123 | need_flush = 0; |
124 | } | |
125 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
126 | } | |
127 | ||
1da177e4 LT |
128 | #ifdef CONFIG_IOMMU_LEAK |
129 | ||
130 | #define SET_LEAK(x) if (iommu_leak_tab) \ | |
131 | iommu_leak_tab[x] = __builtin_return_address(0); | |
132 | #define CLEAR_LEAK(x) if (iommu_leak_tab) \ | |
133 | iommu_leak_tab[x] = NULL; | |
134 | ||
135 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
136 | static void **iommu_leak_tab; | |
137 | static int leak_trace; | |
138 | int iommu_leak_pages = 20; | |
139 | void dump_leak(void) | |
140 | { | |
141 | int i; | |
142 | static int dump; | |
143 | if (dump || !iommu_leak_tab) return; | |
144 | dump = 1; | |
145 | show_stack(NULL,NULL); | |
146 | /* Very crude. dump some from the end of the table too */ | |
147 | printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); | |
148 | for (i = 0; i < iommu_leak_pages; i+=2) { | |
149 | printk("%lu: ", iommu_pages-i); | |
150 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); | |
151 | printk("%c", (i+1)%2 == 0 ? '\n' : ' '); | |
152 | } | |
153 | printk("\n"); | |
154 | } | |
155 | #else | |
156 | #define SET_LEAK(x) | |
157 | #define CLEAR_LEAK(x) | |
158 | #endif | |
159 | ||
17a941d8 | 160 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 LT |
161 | { |
162 | /* | |
163 | * Ran out of IOMMU space for this operation. This is very bad. | |
164 | * Unfortunately the drivers cannot handle this operation properly. | |
165 | * Return some non mapped prereserved space in the aperture and | |
166 | * let the Northbridge deal with it. This will result in garbage | |
167 | * in the IO operation. When the size exceeds the prereserved space | |
168 | * memory corruption will occur or random memory will be DMAed | |
169 | * out. Hopefully no network devices use single mappings that big. | |
170 | */ | |
171 | ||
172 | printk(KERN_ERR | |
173 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
174 | size, dev->bus_id); | |
175 | ||
17a941d8 | 176 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
177 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
178 | panic("PCI-DMA: Memory would be corrupted\n"); | |
179 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | |
17a941d8 | 180 | panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n"); |
1da177e4 LT |
181 | } |
182 | ||
183 | #ifdef CONFIG_IOMMU_LEAK | |
184 | dump_leak(); | |
185 | #endif | |
186 | } | |
187 | ||
188 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | |
189 | { | |
190 | u64 mask = *dev->dma_mask; | |
00edefae | 191 | int high = addr + size > mask; |
1da177e4 LT |
192 | int mmu = high; |
193 | if (force_iommu) | |
194 | mmu = 1; | |
1da177e4 LT |
195 | return mmu; |
196 | } | |
197 | ||
198 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
199 | { | |
200 | u64 mask = *dev->dma_mask; | |
00edefae | 201 | int high = addr + size > mask; |
1da177e4 | 202 | int mmu = high; |
1da177e4 LT |
203 | return mmu; |
204 | } | |
205 | ||
206 | /* Map a single continuous physical area into the IOMMU. | |
207 | * Caller needs to check if the iommu is needed and flush. | |
208 | */ | |
17a941d8 MBY |
209 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
210 | size_t size, int dir) | |
1da177e4 LT |
211 | { |
212 | unsigned long npages = to_pages(phys_mem, size); | |
213 | unsigned long iommu_page = alloc_iommu(npages); | |
214 | int i; | |
215 | if (iommu_page == -1) { | |
216 | if (!nonforced_iommu(dev, phys_mem, size)) | |
217 | return phys_mem; | |
218 | if (panic_on_overflow) | |
219 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 220 | iommu_full(dev, size, dir); |
1da177e4 LT |
221 | return bad_dma_address; |
222 | } | |
223 | ||
224 | for (i = 0; i < npages; i++) { | |
225 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
226 | SET_LEAK(iommu_page + i); | |
227 | phys_mem += PAGE_SIZE; | |
228 | } | |
229 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
230 | } | |
231 | ||
17a941d8 MBY |
232 | static dma_addr_t gart_map_simple(struct device *dev, char *buf, |
233 | size_t size, int dir) | |
234 | { | |
235 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | |
a32073bf | 236 | flush_gart(); |
17a941d8 MBY |
237 | return map; |
238 | } | |
239 | ||
1da177e4 | 240 | /* Map a single area into the IOMMU */ |
1048fa52 | 241 | static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) |
1da177e4 LT |
242 | { |
243 | unsigned long phys_mem, bus; | |
244 | ||
1da177e4 LT |
245 | if (!dev) |
246 | dev = &fallback_dev; | |
247 | ||
248 | phys_mem = virt_to_phys(addr); | |
249 | if (!need_iommu(dev, phys_mem, size)) | |
250 | return phys_mem; | |
251 | ||
17a941d8 | 252 | bus = gart_map_simple(dev, addr, size, dir); |
1da177e4 | 253 | return bus; |
17a941d8 MBY |
254 | } |
255 | ||
7c2d9cd2 JM |
256 | /* |
257 | * Free a DMA mapping. | |
258 | */ | |
1048fa52 | 259 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
7c2d9cd2 JM |
260 | size_t size, int direction) |
261 | { | |
262 | unsigned long iommu_page; | |
263 | int npages; | |
264 | int i; | |
265 | ||
266 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
267 | dma_addr >= iommu_bus_base + iommu_size) | |
268 | return; | |
269 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | |
270 | npages = to_pages(dma_addr, size); | |
271 | for (i = 0; i < npages; i++) { | |
272 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
273 | CLEAR_LEAK(iommu_page + i); | |
274 | } | |
275 | free_iommu(iommu_page, npages); | |
276 | } | |
277 | ||
17a941d8 MBY |
278 | /* |
279 | * Wrapper for pci_unmap_single working with scatterlists. | |
280 | */ | |
1048fa52 | 281 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
17a941d8 | 282 | { |
9ee1bea4 | 283 | struct scatterlist *s; |
17a941d8 MBY |
284 | int i; |
285 | ||
9ee1bea4 | 286 | for_each_sg(sg, s, nents, i) { |
60b08c67 | 287 | if (!s->dma_length || !s->length) |
17a941d8 | 288 | break; |
7c2d9cd2 | 289 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
17a941d8 MBY |
290 | } |
291 | } | |
1da177e4 LT |
292 | |
293 | /* Fallback for dma_map_sg in case of overflow */ | |
294 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
295 | int nents, int dir) | |
296 | { | |
9ee1bea4 | 297 | struct scatterlist *s; |
1da177e4 LT |
298 | int i; |
299 | ||
300 | #ifdef CONFIG_IOMMU_DEBUG | |
301 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
302 | #endif | |
303 | ||
9ee1bea4 | 304 | for_each_sg(sg, s, nents, i) { |
58b053e4 | 305 | unsigned long addr = sg_phys(s); |
1da177e4 | 306 | if (nonforced_iommu(dev, addr, s->length)) { |
17a941d8 | 307 | addr = dma_map_area(dev, addr, s->length, dir); |
1da177e4 LT |
308 | if (addr == bad_dma_address) { |
309 | if (i > 0) | |
17a941d8 | 310 | gart_unmap_sg(dev, sg, i, dir); |
1da177e4 LT |
311 | nents = 0; |
312 | sg[0].dma_length = 0; | |
313 | break; | |
314 | } | |
315 | } | |
316 | s->dma_address = addr; | |
317 | s->dma_length = s->length; | |
318 | } | |
a32073bf | 319 | flush_gart(); |
1da177e4 LT |
320 | return nents; |
321 | } | |
322 | ||
323 | /* Map multiple scatterlist entries continuous into the first. */ | |
9ee1bea4 | 324 | static int __dma_map_cont(struct scatterlist *start, int nelems, |
1da177e4 LT |
325 | struct scatterlist *sout, unsigned long pages) |
326 | { | |
327 | unsigned long iommu_start = alloc_iommu(pages); | |
328 | unsigned long iommu_page = iommu_start; | |
9ee1bea4 | 329 | struct scatterlist *s; |
1da177e4 LT |
330 | int i; |
331 | ||
332 | if (iommu_start == -1) | |
333 | return -1; | |
9ee1bea4 JA |
334 | |
335 | for_each_sg(start, s, nelems, i) { | |
1da177e4 LT |
336 | unsigned long pages, addr; |
337 | unsigned long phys_addr = s->dma_address; | |
338 | ||
9ee1bea4 JA |
339 | BUG_ON(s != start && s->offset); |
340 | if (s == start) { | |
1da177e4 LT |
341 | sout->dma_address = iommu_bus_base; |
342 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
343 | sout->dma_length = s->length; | |
344 | } else { | |
345 | sout->dma_length += s->length; | |
346 | } | |
347 | ||
348 | addr = phys_addr; | |
349 | pages = to_pages(s->offset, s->length); | |
350 | while (pages--) { | |
351 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
352 | SET_LEAK(iommu_page); | |
353 | addr += PAGE_SIZE; | |
354 | iommu_page++; | |
0d541064 | 355 | } |
1da177e4 LT |
356 | } |
357 | BUG_ON(iommu_page - iommu_start != pages); | |
358 | return 0; | |
359 | } | |
360 | ||
9ee1bea4 | 361 | static inline int dma_map_cont(struct scatterlist *start, int nelems, |
1da177e4 LT |
362 | struct scatterlist *sout, |
363 | unsigned long pages, int need) | |
364 | { | |
9ee1bea4 JA |
365 | if (!need) { |
366 | BUG_ON(nelems != 1); | |
e88a39de | 367 | sout->dma_address = start->dma_address; |
9ee1bea4 | 368 | sout->dma_length = start->length; |
1da177e4 | 369 | return 0; |
9ee1bea4 JA |
370 | } |
371 | return __dma_map_cont(start, nelems, sout, pages); | |
1da177e4 LT |
372 | } |
373 | ||
374 | /* | |
375 | * DMA map all entries in a scatterlist. | |
376 | * Merge chunks that have page aligned sizes into a continuous mapping. | |
377 | */ | |
ff7f3649 AK |
378 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
379 | int dir) | |
1da177e4 LT |
380 | { |
381 | int i; | |
382 | int out; | |
383 | int start; | |
384 | unsigned long pages = 0; | |
385 | int need = 0, nextneed; | |
9ee1bea4 | 386 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
1da177e4 | 387 | |
1da177e4 LT |
388 | if (nents == 0) |
389 | return 0; | |
390 | ||
1da177e4 LT |
391 | if (!dev) |
392 | dev = &fallback_dev; | |
393 | ||
394 | out = 0; | |
395 | start = 0; | |
9ee1bea4 JA |
396 | start_sg = sgmap = sg; |
397 | ps = NULL; /* shut up gcc */ | |
398 | for_each_sg(sg, s, nents, i) { | |
58b053e4 | 399 | dma_addr_t addr = sg_phys(s); |
1da177e4 LT |
400 | s->dma_address = addr; |
401 | BUG_ON(s->length == 0); | |
402 | ||
403 | nextneed = need_iommu(dev, addr, s->length); | |
404 | ||
405 | /* Handle the previous not yet processed entries */ | |
406 | if (i > start) { | |
1da177e4 LT |
407 | /* Can only merge when the last chunk ends on a page |
408 | boundary and the new one doesn't have an offset. */ | |
409 | if (!iommu_merge || !nextneed || !need || s->offset || | |
9ee1bea4 JA |
410 | (ps->offset + ps->length) % PAGE_SIZE) { |
411 | if (dma_map_cont(start_sg, i - start, sgmap, | |
412 | pages, need) < 0) | |
1da177e4 LT |
413 | goto error; |
414 | out++; | |
9ee1bea4 | 415 | sgmap = sg_next(sgmap); |
1da177e4 | 416 | pages = 0; |
9ee1bea4 JA |
417 | start = i; |
418 | start_sg = s; | |
1da177e4 LT |
419 | } |
420 | } | |
421 | ||
422 | need = nextneed; | |
423 | pages += to_pages(s->offset, s->length); | |
9ee1bea4 | 424 | ps = s; |
1da177e4 | 425 | } |
9ee1bea4 | 426 | if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) |
1da177e4 LT |
427 | goto error; |
428 | out++; | |
a32073bf | 429 | flush_gart(); |
9ee1bea4 JA |
430 | if (out < nents) { |
431 | sgmap = sg_next(sgmap); | |
432 | sgmap->dma_length = 0; | |
433 | } | |
1da177e4 LT |
434 | return out; |
435 | ||
436 | error: | |
a32073bf | 437 | flush_gart(); |
17a941d8 | 438 | gart_unmap_sg(dev, sg, nents, dir); |
a1002a48 KV |
439 | /* When it was forced or merged try again in a dumb way */ |
440 | if (force_iommu || iommu_merge) { | |
441 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
442 | if (out > 0) | |
443 | return out; | |
444 | } | |
1da177e4 LT |
445 | if (panic_on_overflow) |
446 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
17a941d8 | 447 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
9ee1bea4 JA |
448 | for_each_sg(sg, s, nents, i) |
449 | s->dma_address = bad_dma_address; | |
1da177e4 LT |
450 | return 0; |
451 | } | |
452 | ||
17a941d8 | 453 | static int no_agp; |
1da177e4 LT |
454 | |
455 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
456 | { | |
457 | unsigned long a; | |
458 | if (!iommu_size) { | |
459 | iommu_size = aper_size; | |
460 | if (!no_agp) | |
461 | iommu_size /= 2; | |
462 | } | |
463 | ||
464 | a = aper + iommu_size; | |
465 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; | |
466 | ||
467 | if (iommu_size < 64*1024*1024) | |
468 | printk(KERN_WARNING | |
469 | "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); | |
470 | ||
471 | return iommu_size; | |
472 | } | |
473 | ||
474 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |
475 | { | |
476 | unsigned aper_size = 0, aper_base_32; | |
477 | u64 aper_base; | |
478 | unsigned aper_order; | |
479 | ||
480 | pci_read_config_dword(dev, 0x94, &aper_base_32); | |
481 | pci_read_config_dword(dev, 0x90, &aper_order); | |
482 | aper_order = (aper_order >> 1) & 7; | |
483 | ||
484 | aper_base = aper_base_32 & 0x7fff; | |
485 | aper_base <<= 25; | |
486 | ||
487 | aper_size = (32 * 1024 * 1024) << aper_order; | |
547c5355 | 488 | if (aper_base + aper_size > 0x100000000UL || !aper_size) |
1da177e4 LT |
489 | aper_base = 0; |
490 | ||
491 | *size = aper_size; | |
492 | return aper_base; | |
493 | } | |
494 | ||
495 | /* | |
496 | * Private Northbridge GATT initialization in case we cannot use the | |
497 | * AGP driver for some reason. | |
498 | */ | |
499 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
500 | { | |
501 | struct pci_dev *dev; | |
502 | void *gatt; | |
503 | unsigned aper_base, new_aper_base; | |
504 | unsigned aper_size, gatt_size, new_aper_size; | |
a32073bf AK |
505 | int i; |
506 | ||
1da177e4 LT |
507 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
508 | aper_size = aper_base = info->aper_size = 0; | |
a32073bf AK |
509 | dev = NULL; |
510 | for (i = 0; i < num_k8_northbridges; i++) { | |
511 | dev = k8_northbridges[i]; | |
1da177e4 LT |
512 | new_aper_base = read_aperture(dev, &new_aper_size); |
513 | if (!new_aper_base) | |
514 | goto nommu; | |
515 | ||
516 | if (!aper_base) { | |
517 | aper_size = new_aper_size; | |
518 | aper_base = new_aper_base; | |
519 | } | |
520 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
521 | goto nommu; | |
522 | } | |
523 | if (!aper_base) | |
524 | goto nommu; | |
525 | info->aper_base = aper_base; | |
526 | info->aper_size = aper_size>>20; | |
527 | ||
528 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | |
529 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
530 | if (!gatt) | |
cf6387da JD |
531 | panic("Cannot allocate GATT table"); |
532 | if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE)) | |
533 | panic("Could not set GART PTEs to uncacheable pages"); | |
534 | global_flush_tlb(); | |
535 | ||
1da177e4 LT |
536 | memset(gatt, 0, gatt_size); |
537 | agp_gatt_table = gatt; | |
a32073bf AK |
538 | |
539 | for (i = 0; i < num_k8_northbridges; i++) { | |
1da177e4 LT |
540 | u32 ctl; |
541 | u32 gatt_reg; | |
542 | ||
a32073bf | 543 | dev = k8_northbridges[i]; |
1da177e4 LT |
544 | gatt_reg = __pa(gatt) >> 12; |
545 | gatt_reg <<= 4; | |
546 | pci_write_config_dword(dev, 0x98, gatt_reg); | |
547 | pci_read_config_dword(dev, 0x90, &ctl); | |
548 | ||
549 | ctl |= 1; | |
550 | ctl &= ~((1<<4) | (1<<5)); | |
551 | ||
552 | pci_write_config_dword(dev, 0x90, ctl); | |
553 | } | |
a32073bf | 554 | flush_gart(); |
1da177e4 LT |
555 | |
556 | printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); | |
557 | return 0; | |
558 | ||
559 | nommu: | |
560 | /* Should not happen anymore */ | |
561 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | |
f46ace69 | 562 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
1da177e4 LT |
563 | return -1; |
564 | } | |
565 | ||
566 | extern int agp_amd64_init(void); | |
567 | ||
e6584504 | 568 | static const struct dma_mapping_ops gart_dma_ops = { |
17a941d8 MBY |
569 | .mapping_error = NULL, |
570 | .map_single = gart_map_single, | |
571 | .map_simple = gart_map_simple, | |
572 | .unmap_single = gart_unmap_single, | |
573 | .sync_single_for_cpu = NULL, | |
574 | .sync_single_for_device = NULL, | |
575 | .sync_single_range_for_cpu = NULL, | |
576 | .sync_single_range_for_device = NULL, | |
577 | .sync_sg_for_cpu = NULL, | |
578 | .sync_sg_for_device = NULL, | |
579 | .map_sg = gart_map_sg, | |
580 | .unmap_sg = gart_unmap_sg, | |
581 | }; | |
582 | ||
bc2cea6a YL |
583 | void gart_iommu_shutdown(void) |
584 | { | |
585 | struct pci_dev *dev; | |
586 | int i; | |
587 | ||
588 | if (no_agp && (dma_ops != &gart_dma_ops)) | |
589 | return; | |
590 | ||
591 | for (i = 0; i < num_k8_northbridges; i++) { | |
592 | u32 ctl; | |
593 | ||
594 | dev = k8_northbridges[i]; | |
595 | pci_read_config_dword(dev, 0x90, &ctl); | |
596 | ||
597 | ctl &= ~1; | |
598 | ||
599 | pci_write_config_dword(dev, 0x90, ctl); | |
600 | } | |
601 | } | |
602 | ||
0dc243ae | 603 | void __init gart_iommu_init(void) |
1da177e4 LT |
604 | { |
605 | struct agp_kern_info info; | |
606 | unsigned long aper_size; | |
607 | unsigned long iommu_start; | |
1da177e4 LT |
608 | unsigned long scratch; |
609 | long i; | |
610 | ||
a32073bf AK |
611 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
612 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | |
0dc243ae | 613 | return; |
a32073bf AK |
614 | } |
615 | ||
1da177e4 LT |
616 | #ifndef CONFIG_AGP_AMD64 |
617 | no_agp = 1; | |
618 | #else | |
619 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
620 | /* Add other K8 AGP bridge drivers here */ | |
621 | no_agp = no_agp || | |
622 | (agp_amd64_init() < 0) || | |
623 | (agp_copy_info(agp_bridge, &info) < 0); | |
624 | #endif | |
625 | ||
60b08c67 | 626 | if (swiotlb) |
0dc243ae | 627 | return; |
60b08c67 | 628 | |
8d4f6b93 JM |
629 | /* Did we detect a different HW IOMMU? */ |
630 | if (iommu_detected && !iommu_aperture) | |
0dc243ae | 631 | return; |
8d4f6b93 | 632 | |
1da177e4 | 633 | if (no_iommu || |
17a941d8 | 634 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
1da177e4 LT |
635 | !iommu_aperture || |
636 | (no_agp && init_k8_gatt(&info) < 0)) { | |
5b7b644c JM |
637 | if (end_pfn > MAX_DMA32_PFN) { |
638 | printk(KERN_ERR "WARNING more than 4GB of memory " | |
3807fd46 | 639 | "but GART IOMMU not available.\n" |
dc9a7195 | 640 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); |
5b7b644c | 641 | } |
0dc243ae | 642 | return; |
1da177e4 LT |
643 | } |
644 | ||
5b7b644c | 645 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
1da177e4 LT |
646 | aper_size = info.aper_size * 1024 * 1024; |
647 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
648 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
649 | ||
650 | iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, | |
651 | get_order(iommu_pages/8)); | |
652 | if (!iommu_gart_bitmap) | |
653 | panic("Cannot allocate iommu bitmap\n"); | |
654 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | |
655 | ||
656 | #ifdef CONFIG_IOMMU_LEAK | |
657 | if (leak_trace) { | |
658 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
659 | get_order(iommu_pages*sizeof(void *))); | |
660 | if (iommu_leak_tab) | |
661 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
662 | else | |
663 | printk("PCI-DMA: Cannot allocate leak trace area\n"); | |
664 | } | |
665 | #endif | |
666 | ||
667 | /* | |
668 | * Out of IOMMU space handling. | |
669 | * Reserve some invalid pages at the beginning of the GART. | |
670 | */ | |
671 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
672 | ||
673 | agp_memory_reserved = iommu_size; | |
674 | printk(KERN_INFO | |
675 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
676 | iommu_size>>20); | |
677 | ||
678 | iommu_start = aper_size - iommu_size; | |
679 | iommu_bus_base = info.aper_base + iommu_start; | |
680 | bad_dma_address = iommu_bus_base; | |
681 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
682 | ||
683 | /* | |
684 | * Unmap the IOMMU part of the GART. The alias of the page is | |
685 | * always mapped with cache enabled and there is no full cache | |
686 | * coherency across the GART remapping. The unmapping avoids | |
687 | * automatic prefetches from the CPU allocating cache lines in | |
688 | * there. All CPU accesses are done via the direct mapping to | |
689 | * the backing memory. The GART address is only used by PCI | |
690 | * devices. | |
691 | */ | |
692 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | |
693 | ||
694 | /* | |
695 | * Try to workaround a bug (thanks to BenH) | |
696 | * Set unmapped entries to a scratch page instead of 0. | |
697 | * Any prefetches that hit unmapped entries won't get an bus abort | |
698 | * then. | |
699 | */ | |
700 | scratch = get_zeroed_page(GFP_KERNEL); | |
701 | if (!scratch) | |
702 | panic("Cannot allocate iommu scratch page"); | |
703 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
704 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) | |
705 | iommu_gatt_base[i] = gart_unmapped_entry; | |
706 | ||
a32073bf | 707 | flush_gart(); |
17a941d8 | 708 | dma_ops = &gart_dma_ops; |
1da177e4 LT |
709 | } |
710 | ||
43999d9e | 711 | void __init gart_parse_options(char *p) |
17a941d8 MBY |
712 | { |
713 | int arg; | |
714 | ||
1da177e4 | 715 | #ifdef CONFIG_IOMMU_LEAK |
17a941d8 MBY |
716 | if (!strncmp(p,"leak",4)) { |
717 | leak_trace = 1; | |
718 | p += 4; | |
719 | if (*p == '=') ++p; | |
720 | if (isdigit(*p) && get_option(&p, &arg)) | |
721 | iommu_leak_pages = arg; | |
722 | } | |
1da177e4 | 723 | #endif |
17a941d8 MBY |
724 | if (isdigit(*p) && get_option(&p, &arg)) |
725 | iommu_size = arg; | |
726 | if (!strncmp(p, "fullflush",8)) | |
727 | iommu_fullflush = 1; | |
728 | if (!strncmp(p, "nofullflush",11)) | |
729 | iommu_fullflush = 0; | |
730 | if (!strncmp(p,"noagp",5)) | |
731 | no_agp = 1; | |
732 | if (!strncmp(p, "noaperture",10)) | |
733 | fix_aperture = 0; | |
734 | /* duplicated from pci-dma.c */ | |
735 | if (!strncmp(p,"force",5)) | |
736 | iommu_aperture_allowed = 1; | |
737 | if (!strncmp(p,"allowed",7)) | |
738 | iommu_aperture_allowed = 1; | |
739 | if (!strncmp(p, "memaper", 7)) { | |
740 | fallback_aper_force = 1; | |
741 | p += 7; | |
742 | if (*p == '=') { | |
743 | ++p; | |
744 | if (get_option(&p, &arg)) | |
745 | fallback_aper_order = arg; | |
746 | } | |
747 | } | |
748 | } |