Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
3 | * | |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. | |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
6 | * with more than 4GB. | |
7 | * | |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
9 | * | |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
11 | */ | |
12 | ||
13 | #include <linux/config.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/topology.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/bitops.h> | |
26 | #include <asm/atomic.h> | |
27 | #include <asm/io.h> | |
28 | #include <asm/mtrr.h> | |
29 | #include <asm/pgtable.h> | |
30 | #include <asm/proto.h> | |
31 | #include <asm/cacheflush.h> | |
32 | #include <asm/kdebug.h> | |
17a941d8 MBY |
33 | #include <asm/swiotlb.h> |
34 | #include <asm/dma.h> | |
1da177e4 LT |
35 | |
36 | unsigned long iommu_bus_base; /* GART remapping area (physical) */ | |
37 | static unsigned long iommu_size; /* size of remapping area bytes */ | |
38 | static unsigned long iommu_pages; /* .. and in pages */ | |
39 | ||
40 | u32 *iommu_gatt_base; /* Remapping table */ | |
41 | ||
1da177e4 LT |
42 | /* If this is disabled the IOMMU will use an optimized flushing strategy |
43 | of only flushing when an mapping is reused. With it true the GART is flushed | |
44 | for every mapping. Problem is that doing the lazy flush seems to trigger | |
45 | bugs with some popular PCI cards, in particular 3ware (but has been also | |
46 | also seen with Qlogic at least). */ | |
47 | int iommu_fullflush = 1; | |
48 | ||
1da177e4 LT |
49 | #define MAX_NB 8 |
50 | ||
51 | /* Allocation bitmap for the remapping area */ | |
52 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | |
53 | static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ | |
54 | ||
55 | static u32 gart_unmapped_entry; | |
56 | ||
57 | #define GPTE_VALID 1 | |
58 | #define GPTE_COHERENT 2 | |
59 | #define GPTE_ENCODE(x) \ | |
60 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
61 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
62 | ||
63 | #define to_pages(addr,size) \ | |
64 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | |
65 | ||
66 | #define for_all_nb(dev) \ | |
67 | dev = NULL; \ | |
68 | while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\ | |
69 | if (dev->bus->number == 0 && \ | |
70 | (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31)) | |
71 | ||
72 | static struct pci_dev *northbridges[MAX_NB]; | |
73 | static u32 northbridge_flush_word[MAX_NB]; | |
74 | ||
75 | #define EMERGENCY_PAGES 32 /* = 128KB */ | |
76 | ||
77 | #ifdef CONFIG_AGP | |
78 | #define AGPEXTERN extern | |
79 | #else | |
80 | #define AGPEXTERN | |
81 | #endif | |
82 | ||
83 | /* backdoor interface to AGP driver */ | |
84 | AGPEXTERN int agp_memory_reserved; | |
85 | AGPEXTERN __u32 *agp_gatt_table; | |
86 | ||
87 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
88 | static int need_flush; /* global flush state. set for each gart wrap */ | |
1da177e4 LT |
89 | |
90 | static unsigned long alloc_iommu(int size) | |
91 | { | |
92 | unsigned long offset, flags; | |
93 | ||
94 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
95 | offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); | |
96 | if (offset == -1) { | |
97 | need_flush = 1; | |
98 | offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size); | |
99 | } | |
100 | if (offset != -1) { | |
101 | set_bit_string(iommu_gart_bitmap, offset, size); | |
102 | next_bit = offset+size; | |
103 | if (next_bit >= iommu_pages) { | |
104 | next_bit = 0; | |
105 | need_flush = 1; | |
106 | } | |
107 | } | |
108 | if (iommu_fullflush) | |
109 | need_flush = 1; | |
110 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
111 | return offset; | |
112 | } | |
113 | ||
114 | static void free_iommu(unsigned long offset, int size) | |
115 | { | |
116 | unsigned long flags; | |
117 | if (size == 1) { | |
118 | clear_bit(offset, iommu_gart_bitmap); | |
119 | return; | |
120 | } | |
121 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
122 | __clear_bit_string(iommu_gart_bitmap, offset, size); | |
123 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
124 | } | |
125 | ||
126 | /* | |
127 | * Use global flush state to avoid races with multiple flushers. | |
128 | */ | |
129 | static void flush_gart(struct device *dev) | |
130 | { | |
131 | unsigned long flags; | |
132 | int flushed = 0; | |
133 | int i, max; | |
134 | ||
135 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
136 | if (need_flush) { | |
137 | max = 0; | |
138 | for (i = 0; i < MAX_NB; i++) { | |
139 | if (!northbridges[i]) | |
140 | continue; | |
141 | pci_write_config_dword(northbridges[i], 0x9c, | |
142 | northbridge_flush_word[i] | 1); | |
143 | flushed++; | |
144 | max = i; | |
145 | } | |
146 | for (i = 0; i <= max; i++) { | |
147 | u32 w; | |
148 | if (!northbridges[i]) | |
149 | continue; | |
150 | /* Make sure the hardware actually executed the flush. */ | |
151 | do { | |
152 | pci_read_config_dword(northbridges[i], 0x9c, &w); | |
153 | } while (w & 1); | |
154 | } | |
155 | if (!flushed) | |
156 | printk("nothing to flush?\n"); | |
157 | need_flush = 0; | |
158 | } | |
159 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
160 | } | |
161 | ||
1da177e4 | 162 | |
1da177e4 LT |
163 | |
164 | #ifdef CONFIG_IOMMU_LEAK | |
165 | ||
166 | #define SET_LEAK(x) if (iommu_leak_tab) \ | |
167 | iommu_leak_tab[x] = __builtin_return_address(0); | |
168 | #define CLEAR_LEAK(x) if (iommu_leak_tab) \ | |
169 | iommu_leak_tab[x] = NULL; | |
170 | ||
171 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
172 | static void **iommu_leak_tab; | |
173 | static int leak_trace; | |
174 | int iommu_leak_pages = 20; | |
175 | void dump_leak(void) | |
176 | { | |
177 | int i; | |
178 | static int dump; | |
179 | if (dump || !iommu_leak_tab) return; | |
180 | dump = 1; | |
181 | show_stack(NULL,NULL); | |
182 | /* Very crude. dump some from the end of the table too */ | |
183 | printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); | |
184 | for (i = 0; i < iommu_leak_pages; i+=2) { | |
185 | printk("%lu: ", iommu_pages-i); | |
186 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); | |
187 | printk("%c", (i+1)%2 == 0 ? '\n' : ' '); | |
188 | } | |
189 | printk("\n"); | |
190 | } | |
191 | #else | |
192 | #define SET_LEAK(x) | |
193 | #define CLEAR_LEAK(x) | |
194 | #endif | |
195 | ||
17a941d8 | 196 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 LT |
197 | { |
198 | /* | |
199 | * Ran out of IOMMU space for this operation. This is very bad. | |
200 | * Unfortunately the drivers cannot handle this operation properly. | |
201 | * Return some non mapped prereserved space in the aperture and | |
202 | * let the Northbridge deal with it. This will result in garbage | |
203 | * in the IO operation. When the size exceeds the prereserved space | |
204 | * memory corruption will occur or random memory will be DMAed | |
205 | * out. Hopefully no network devices use single mappings that big. | |
206 | */ | |
207 | ||
208 | printk(KERN_ERR | |
209 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
210 | size, dev->bus_id); | |
211 | ||
17a941d8 | 212 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
213 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
214 | panic("PCI-DMA: Memory would be corrupted\n"); | |
215 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | |
17a941d8 | 216 | panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n"); |
1da177e4 LT |
217 | } |
218 | ||
219 | #ifdef CONFIG_IOMMU_LEAK | |
220 | dump_leak(); | |
221 | #endif | |
222 | } | |
223 | ||
224 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | |
225 | { | |
226 | u64 mask = *dev->dma_mask; | |
227 | int high = addr + size >= mask; | |
228 | int mmu = high; | |
229 | if (force_iommu) | |
230 | mmu = 1; | |
231 | if (no_iommu) { | |
232 | if (high) | |
233 | panic("PCI-DMA: high address but no IOMMU.\n"); | |
234 | mmu = 0; | |
235 | } | |
236 | return mmu; | |
237 | } | |
238 | ||
239 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
240 | { | |
241 | u64 mask = *dev->dma_mask; | |
242 | int high = addr + size >= mask; | |
243 | int mmu = high; | |
244 | if (no_iommu) { | |
245 | if (high) | |
246 | panic("PCI-DMA: high address but no IOMMU.\n"); | |
247 | mmu = 0; | |
248 | } | |
249 | return mmu; | |
250 | } | |
251 | ||
252 | /* Map a single continuous physical area into the IOMMU. | |
253 | * Caller needs to check if the iommu is needed and flush. | |
254 | */ | |
17a941d8 MBY |
255 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
256 | size_t size, int dir) | |
1da177e4 LT |
257 | { |
258 | unsigned long npages = to_pages(phys_mem, size); | |
259 | unsigned long iommu_page = alloc_iommu(npages); | |
260 | int i; | |
261 | if (iommu_page == -1) { | |
262 | if (!nonforced_iommu(dev, phys_mem, size)) | |
263 | return phys_mem; | |
264 | if (panic_on_overflow) | |
265 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 266 | iommu_full(dev, size, dir); |
1da177e4 LT |
267 | return bad_dma_address; |
268 | } | |
269 | ||
270 | for (i = 0; i < npages; i++) { | |
271 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
272 | SET_LEAK(iommu_page + i); | |
273 | phys_mem += PAGE_SIZE; | |
274 | } | |
275 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
276 | } | |
277 | ||
17a941d8 MBY |
278 | static dma_addr_t gart_map_simple(struct device *dev, char *buf, |
279 | size_t size, int dir) | |
280 | { | |
281 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | |
282 | flush_gart(dev); | |
283 | return map; | |
284 | } | |
285 | ||
1da177e4 | 286 | /* Map a single area into the IOMMU */ |
17a941d8 | 287 | dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) |
1da177e4 LT |
288 | { |
289 | unsigned long phys_mem, bus; | |
290 | ||
291 | BUG_ON(dir == DMA_NONE); | |
292 | ||
1da177e4 LT |
293 | if (!dev) |
294 | dev = &fallback_dev; | |
295 | ||
296 | phys_mem = virt_to_phys(addr); | |
297 | if (!need_iommu(dev, phys_mem, size)) | |
298 | return phys_mem; | |
299 | ||
17a941d8 | 300 | bus = gart_map_simple(dev, addr, size, dir); |
1da177e4 | 301 | return bus; |
17a941d8 MBY |
302 | } |
303 | ||
304 | /* | |
305 | * Wrapper for pci_unmap_single working with scatterlists. | |
306 | */ | |
307 | void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
308 | { | |
309 | int i; | |
310 | ||
311 | for (i = 0; i < nents; i++) { | |
312 | struct scatterlist *s = &sg[i]; | |
313 | if (!s->dma_length || !s->length) | |
314 | break; | |
315 | dma_unmap_single(dev, s->dma_address, s->dma_length, dir); | |
316 | } | |
317 | } | |
1da177e4 LT |
318 | |
319 | /* Fallback for dma_map_sg in case of overflow */ | |
320 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
321 | int nents, int dir) | |
322 | { | |
323 | int i; | |
324 | ||
325 | #ifdef CONFIG_IOMMU_DEBUG | |
326 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
327 | #endif | |
328 | ||
329 | for (i = 0; i < nents; i++ ) { | |
330 | struct scatterlist *s = &sg[i]; | |
331 | unsigned long addr = page_to_phys(s->page) + s->offset; | |
332 | if (nonforced_iommu(dev, addr, s->length)) { | |
17a941d8 | 333 | addr = dma_map_area(dev, addr, s->length, dir); |
1da177e4 LT |
334 | if (addr == bad_dma_address) { |
335 | if (i > 0) | |
17a941d8 | 336 | gart_unmap_sg(dev, sg, i, dir); |
1da177e4 LT |
337 | nents = 0; |
338 | sg[0].dma_length = 0; | |
339 | break; | |
340 | } | |
341 | } | |
342 | s->dma_address = addr; | |
343 | s->dma_length = s->length; | |
344 | } | |
345 | flush_gart(dev); | |
346 | return nents; | |
347 | } | |
348 | ||
349 | /* Map multiple scatterlist entries continuous into the first. */ | |
350 | static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | |
351 | struct scatterlist *sout, unsigned long pages) | |
352 | { | |
353 | unsigned long iommu_start = alloc_iommu(pages); | |
354 | unsigned long iommu_page = iommu_start; | |
355 | int i; | |
356 | ||
357 | if (iommu_start == -1) | |
358 | return -1; | |
359 | ||
360 | for (i = start; i < stopat; i++) { | |
361 | struct scatterlist *s = &sg[i]; | |
362 | unsigned long pages, addr; | |
363 | unsigned long phys_addr = s->dma_address; | |
364 | ||
365 | BUG_ON(i > start && s->offset); | |
366 | if (i == start) { | |
367 | *sout = *s; | |
368 | sout->dma_address = iommu_bus_base; | |
369 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
370 | sout->dma_length = s->length; | |
371 | } else { | |
372 | sout->dma_length += s->length; | |
373 | } | |
374 | ||
375 | addr = phys_addr; | |
376 | pages = to_pages(s->offset, s->length); | |
377 | while (pages--) { | |
378 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
379 | SET_LEAK(iommu_page); | |
380 | addr += PAGE_SIZE; | |
381 | iommu_page++; | |
382 | } | |
383 | } | |
384 | BUG_ON(iommu_page - iommu_start != pages); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, | |
389 | struct scatterlist *sout, | |
390 | unsigned long pages, int need) | |
391 | { | |
392 | if (!need) { | |
393 | BUG_ON(stopat - start != 1); | |
394 | *sout = sg[start]; | |
395 | sout->dma_length = sg[start].length; | |
396 | return 0; | |
397 | } | |
398 | return __dma_map_cont(sg, start, stopat, sout, pages); | |
399 | } | |
400 | ||
401 | /* | |
402 | * DMA map all entries in a scatterlist. | |
403 | * Merge chunks that have page aligned sizes into a continuous mapping. | |
404 | */ | |
17a941d8 | 405 | int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
1da177e4 LT |
406 | { |
407 | int i; | |
408 | int out; | |
409 | int start; | |
410 | unsigned long pages = 0; | |
411 | int need = 0, nextneed; | |
412 | ||
413 | BUG_ON(dir == DMA_NONE); | |
414 | if (nents == 0) | |
415 | return 0; | |
416 | ||
1da177e4 LT |
417 | if (!dev) |
418 | dev = &fallback_dev; | |
419 | ||
420 | out = 0; | |
421 | start = 0; | |
422 | for (i = 0; i < nents; i++) { | |
423 | struct scatterlist *s = &sg[i]; | |
424 | dma_addr_t addr = page_to_phys(s->page) + s->offset; | |
425 | s->dma_address = addr; | |
426 | BUG_ON(s->length == 0); | |
427 | ||
428 | nextneed = need_iommu(dev, addr, s->length); | |
429 | ||
430 | /* Handle the previous not yet processed entries */ | |
431 | if (i > start) { | |
432 | struct scatterlist *ps = &sg[i-1]; | |
433 | /* Can only merge when the last chunk ends on a page | |
434 | boundary and the new one doesn't have an offset. */ | |
435 | if (!iommu_merge || !nextneed || !need || s->offset || | |
436 | (ps->offset + ps->length) % PAGE_SIZE) { | |
437 | if (dma_map_cont(sg, start, i, sg+out, pages, | |
438 | need) < 0) | |
439 | goto error; | |
440 | out++; | |
441 | pages = 0; | |
442 | start = i; | |
443 | } | |
444 | } | |
445 | ||
446 | need = nextneed; | |
447 | pages += to_pages(s->offset, s->length); | |
448 | } | |
449 | if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) | |
450 | goto error; | |
451 | out++; | |
452 | flush_gart(dev); | |
453 | if (out < nents) | |
454 | sg[out].dma_length = 0; | |
455 | return out; | |
456 | ||
457 | error: | |
458 | flush_gart(NULL); | |
17a941d8 | 459 | gart_unmap_sg(dev, sg, nents, dir); |
a1002a48 KV |
460 | /* When it was forced or merged try again in a dumb way */ |
461 | if (force_iommu || iommu_merge) { | |
462 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
463 | if (out > 0) | |
464 | return out; | |
465 | } | |
1da177e4 LT |
466 | if (panic_on_overflow) |
467 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
17a941d8 | 468 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
1da177e4 LT |
469 | for (i = 0; i < nents; i++) |
470 | sg[i].dma_address = bad_dma_address; | |
471 | return 0; | |
472 | } | |
473 | ||
474 | /* | |
475 | * Free a DMA mapping. | |
476 | */ | |
17a941d8 | 477 | void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
1da177e4 LT |
478 | size_t size, int direction) |
479 | { | |
480 | unsigned long iommu_page; | |
481 | int npages; | |
482 | int i; | |
483 | ||
1da177e4 LT |
484 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || |
485 | dma_addr >= iommu_bus_base + iommu_size) | |
486 | return; | |
487 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | |
488 | npages = to_pages(dma_addr, size); | |
489 | for (i = 0; i < npages; i++) { | |
490 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
491 | CLEAR_LEAK(iommu_page + i); | |
492 | } | |
493 | free_iommu(iommu_page, npages); | |
494 | } | |
495 | ||
17a941d8 | 496 | static int no_agp; |
1da177e4 LT |
497 | |
498 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
499 | { | |
500 | unsigned long a; | |
501 | if (!iommu_size) { | |
502 | iommu_size = aper_size; | |
503 | if (!no_agp) | |
504 | iommu_size /= 2; | |
505 | } | |
506 | ||
507 | a = aper + iommu_size; | |
508 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; | |
509 | ||
510 | if (iommu_size < 64*1024*1024) | |
511 | printk(KERN_WARNING | |
512 | "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); | |
513 | ||
514 | return iommu_size; | |
515 | } | |
516 | ||
517 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |
518 | { | |
519 | unsigned aper_size = 0, aper_base_32; | |
520 | u64 aper_base; | |
521 | unsigned aper_order; | |
522 | ||
523 | pci_read_config_dword(dev, 0x94, &aper_base_32); | |
524 | pci_read_config_dword(dev, 0x90, &aper_order); | |
525 | aper_order = (aper_order >> 1) & 7; | |
526 | ||
527 | aper_base = aper_base_32 & 0x7fff; | |
528 | aper_base <<= 25; | |
529 | ||
530 | aper_size = (32 * 1024 * 1024) << aper_order; | |
531 | if (aper_base + aper_size >= 0xffffffff || !aper_size) | |
532 | aper_base = 0; | |
533 | ||
534 | *size = aper_size; | |
535 | return aper_base; | |
536 | } | |
537 | ||
538 | /* | |
539 | * Private Northbridge GATT initialization in case we cannot use the | |
540 | * AGP driver for some reason. | |
541 | */ | |
542 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
543 | { | |
544 | struct pci_dev *dev; | |
545 | void *gatt; | |
546 | unsigned aper_base, new_aper_base; | |
547 | unsigned aper_size, gatt_size, new_aper_size; | |
548 | ||
549 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | |
550 | aper_size = aper_base = info->aper_size = 0; | |
551 | for_all_nb(dev) { | |
552 | new_aper_base = read_aperture(dev, &new_aper_size); | |
553 | if (!new_aper_base) | |
554 | goto nommu; | |
555 | ||
556 | if (!aper_base) { | |
557 | aper_size = new_aper_size; | |
558 | aper_base = new_aper_base; | |
559 | } | |
560 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
561 | goto nommu; | |
562 | } | |
563 | if (!aper_base) | |
564 | goto nommu; | |
565 | info->aper_base = aper_base; | |
566 | info->aper_size = aper_size>>20; | |
567 | ||
568 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | |
569 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
570 | if (!gatt) | |
571 | panic("Cannot allocate GATT table"); | |
572 | memset(gatt, 0, gatt_size); | |
573 | agp_gatt_table = gatt; | |
574 | ||
575 | for_all_nb(dev) { | |
576 | u32 ctl; | |
577 | u32 gatt_reg; | |
578 | ||
579 | gatt_reg = __pa(gatt) >> 12; | |
580 | gatt_reg <<= 4; | |
581 | pci_write_config_dword(dev, 0x98, gatt_reg); | |
582 | pci_read_config_dword(dev, 0x90, &ctl); | |
583 | ||
584 | ctl |= 1; | |
585 | ctl &= ~((1<<4) | (1<<5)); | |
586 | ||
587 | pci_write_config_dword(dev, 0x90, ctl); | |
588 | } | |
589 | flush_gart(NULL); | |
590 | ||
591 | printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); | |
592 | return 0; | |
593 | ||
594 | nommu: | |
595 | /* Should not happen anymore */ | |
596 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | |
f46ace69 | 597 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
1da177e4 LT |
598 | return -1; |
599 | } | |
600 | ||
601 | extern int agp_amd64_init(void); | |
602 | ||
17a941d8 MBY |
603 | static struct dma_mapping_ops gart_dma_ops = { |
604 | .mapping_error = NULL, | |
605 | .map_single = gart_map_single, | |
606 | .map_simple = gart_map_simple, | |
607 | .unmap_single = gart_unmap_single, | |
608 | .sync_single_for_cpu = NULL, | |
609 | .sync_single_for_device = NULL, | |
610 | .sync_single_range_for_cpu = NULL, | |
611 | .sync_single_range_for_device = NULL, | |
612 | .sync_sg_for_cpu = NULL, | |
613 | .sync_sg_for_device = NULL, | |
614 | .map_sg = gart_map_sg, | |
615 | .unmap_sg = gart_unmap_sg, | |
616 | }; | |
617 | ||
1da177e4 LT |
618 | static int __init pci_iommu_init(void) |
619 | { | |
620 | struct agp_kern_info info; | |
621 | unsigned long aper_size; | |
622 | unsigned long iommu_start; | |
623 | struct pci_dev *dev; | |
624 | unsigned long scratch; | |
625 | long i; | |
626 | ||
627 | #ifndef CONFIG_AGP_AMD64 | |
628 | no_agp = 1; | |
629 | #else | |
630 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
631 | /* Add other K8 AGP bridge drivers here */ | |
632 | no_agp = no_agp || | |
633 | (agp_amd64_init() < 0) || | |
634 | (agp_copy_info(agp_bridge, &info) < 0); | |
635 | #endif | |
636 | ||
637 | if (swiotlb) { | |
638 | no_iommu = 1; | |
1da177e4 LT |
639 | return -1; |
640 | } | |
641 | ||
642 | if (no_iommu || | |
17a941d8 | 643 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
1da177e4 LT |
644 | !iommu_aperture || |
645 | (no_agp && init_k8_gatt(&info) < 0)) { | |
1da177e4 | 646 | no_iommu = 1; |
17a941d8 | 647 | no_iommu_init(); |
1da177e4 LT |
648 | return -1; |
649 | } | |
650 | ||
651 | aper_size = info.aper_size * 1024 * 1024; | |
652 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
653 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
654 | ||
655 | iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, | |
656 | get_order(iommu_pages/8)); | |
657 | if (!iommu_gart_bitmap) | |
658 | panic("Cannot allocate iommu bitmap\n"); | |
659 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | |
660 | ||
661 | #ifdef CONFIG_IOMMU_LEAK | |
662 | if (leak_trace) { | |
663 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
664 | get_order(iommu_pages*sizeof(void *))); | |
665 | if (iommu_leak_tab) | |
666 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
667 | else | |
668 | printk("PCI-DMA: Cannot allocate leak trace area\n"); | |
669 | } | |
670 | #endif | |
671 | ||
672 | /* | |
673 | * Out of IOMMU space handling. | |
674 | * Reserve some invalid pages at the beginning of the GART. | |
675 | */ | |
676 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
677 | ||
678 | agp_memory_reserved = iommu_size; | |
679 | printk(KERN_INFO | |
680 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
681 | iommu_size>>20); | |
682 | ||
683 | iommu_start = aper_size - iommu_size; | |
684 | iommu_bus_base = info.aper_base + iommu_start; | |
685 | bad_dma_address = iommu_bus_base; | |
686 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
687 | ||
688 | /* | |
689 | * Unmap the IOMMU part of the GART. The alias of the page is | |
690 | * always mapped with cache enabled and there is no full cache | |
691 | * coherency across the GART remapping. The unmapping avoids | |
692 | * automatic prefetches from the CPU allocating cache lines in | |
693 | * there. All CPU accesses are done via the direct mapping to | |
694 | * the backing memory. The GART address is only used by PCI | |
695 | * devices. | |
696 | */ | |
697 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | |
698 | ||
699 | /* | |
700 | * Try to workaround a bug (thanks to BenH) | |
701 | * Set unmapped entries to a scratch page instead of 0. | |
702 | * Any prefetches that hit unmapped entries won't get an bus abort | |
703 | * then. | |
704 | */ | |
705 | scratch = get_zeroed_page(GFP_KERNEL); | |
706 | if (!scratch) | |
707 | panic("Cannot allocate iommu scratch page"); | |
708 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
709 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) | |
710 | iommu_gatt_base[i] = gart_unmapped_entry; | |
711 | ||
712 | for_all_nb(dev) { | |
713 | u32 flag; | |
714 | int cpu = PCI_SLOT(dev->devfn) - 24; | |
715 | if (cpu >= MAX_NB) | |
716 | continue; | |
717 | northbridges[cpu] = dev; | |
718 | pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */ | |
719 | northbridge_flush_word[cpu] = flag; | |
720 | } | |
721 | ||
722 | flush_gart(NULL); | |
723 | ||
17a941d8 MBY |
724 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
725 | dma_ops = &gart_dma_ops; | |
726 | ||
1da177e4 LT |
727 | return 0; |
728 | } | |
729 | ||
730 | /* Must execute after PCI subsystem */ | |
731 | fs_initcall(pci_iommu_init); | |
732 | ||
17a941d8 MBY |
733 | void gart_parse_options(char *p) |
734 | { | |
735 | int arg; | |
736 | ||
1da177e4 | 737 | #ifdef CONFIG_IOMMU_LEAK |
17a941d8 MBY |
738 | if (!strncmp(p,"leak",4)) { |
739 | leak_trace = 1; | |
740 | p += 4; | |
741 | if (*p == '=') ++p; | |
742 | if (isdigit(*p) && get_option(&p, &arg)) | |
743 | iommu_leak_pages = arg; | |
744 | } | |
1da177e4 | 745 | #endif |
17a941d8 MBY |
746 | if (isdigit(*p) && get_option(&p, &arg)) |
747 | iommu_size = arg; | |
748 | if (!strncmp(p, "fullflush",8)) | |
749 | iommu_fullflush = 1; | |
750 | if (!strncmp(p, "nofullflush",11)) | |
751 | iommu_fullflush = 0; | |
752 | if (!strncmp(p,"noagp",5)) | |
753 | no_agp = 1; | |
754 | if (!strncmp(p, "noaperture",10)) | |
755 | fix_aperture = 0; | |
756 | /* duplicated from pci-dma.c */ | |
757 | if (!strncmp(p,"force",5)) | |
758 | iommu_aperture_allowed = 1; | |
759 | if (!strncmp(p,"allowed",7)) | |
760 | iommu_aperture_allowed = 1; | |
761 | if (!strncmp(p, "memaper", 7)) { | |
762 | fallback_aper_force = 1; | |
763 | p += 7; | |
764 | if (*p == '=') { | |
765 | ++p; | |
766 | if (get_option(&p, &arg)) | |
767 | fallback_aper_order = arg; | |
768 | } | |
769 | } | |
770 | } |