x86: move dma_map_page and dma_unmap_page to common header
[linux-2.6-block.git] / arch / x86 / kernel / pci-gart_64.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 3 *
1da177e4
LT
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 6 * with more than 4GB.
1da177e4
LT
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
05fccb0e 9 *
1da177e4 10 * Copyright 2002 Andi Kleen, SuSE Labs.
ff7f3649 11 * Subject to the GNU General Public License v2 only.
1da177e4
LT
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/string.h>
20#include <linux/spinlock.h>
21#include <linux/pci.h>
22#include <linux/module.h>
23#include <linux/topology.h>
24#include <linux/interrupt.h>
25#include <linux/bitops.h>
1eeb66a1 26#include <linux/kdebug.h>
9ee1bea4 27#include <linux/scatterlist.h>
fde9a109 28#include <linux/iommu-helper.h>
1da177e4
LT
29#include <asm/atomic.h>
30#include <asm/io.h>
31#include <asm/mtrr.h>
32#include <asm/pgtable.h>
33#include <asm/proto.h>
395624fc 34#include <asm/gart.h>
1da177e4 35#include <asm/cacheflush.h>
17a941d8
MBY
36#include <asm/swiotlb.h>
37#include <asm/dma.h>
a32073bf 38#include <asm/k8.h>
1da177e4 39
79da0874 40static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 41static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
42static unsigned long iommu_pages; /* .. and in pages */
43
05fccb0e 44static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 45
05fccb0e
IM
46/*
47 * If this is disabled the IOMMU will use an optimized flushing strategy
48 * of only flushing when an mapping is reused. With it true the GART is
49 * flushed for every mapping. Problem is that doing the lazy flush seems
50 * to trigger bugs with some popular PCI cards, in particular 3ware (but
51 * has been also also seen with Qlogic at least).
52 */
1da177e4
LT
53int iommu_fullflush = 1;
54
05fccb0e 55/* Allocation bitmap for the remapping area: */
1da177e4 56static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
57/* Guarded by iommu_bitmap_lock: */
58static unsigned long *iommu_gart_bitmap;
1da177e4 59
05fccb0e 60static u32 gart_unmapped_entry;
1da177e4
LT
61
62#define GPTE_VALID 1
63#define GPTE_COHERENT 2
64#define GPTE_ENCODE(x) \
65 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
66#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
67
05fccb0e 68#define to_pages(addr, size) \
1da177e4
LT
69 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
70
05fccb0e 71#define EMERGENCY_PAGES 32 /* = 128KB */
1da177e4
LT
72
73#ifdef CONFIG_AGP
74#define AGPEXTERN extern
75#else
76#define AGPEXTERN
77#endif
78
79/* backdoor interface to AGP driver */
80AGPEXTERN int agp_memory_reserved;
81AGPEXTERN __u32 *agp_gatt_table;
82
83static unsigned long next_bit; /* protected by iommu_bitmap_lock */
05fccb0e 84static int need_flush; /* global flush state. set for each gart wrap */
1da177e4 85
fde9a109 86static unsigned long alloc_iommu(struct device *dev, int size)
05fccb0e 87{
1da177e4 88 unsigned long offset, flags;
fde9a109
FT
89 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
94 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
95 PAGE_SIZE) >> PAGE_SHIFT;
1da177e4 96
05fccb0e 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109
FT
98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
99 size, base_index, boundary_size, 0);
1da177e4
LT
100 if (offset == -1) {
101 need_flush = 1;
fde9a109
FT
102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
103 size, base_index, boundary_size, 0);
1da177e4 104 }
05fccb0e
IM
105 if (offset != -1) {
106 set_bit_string(iommu_gart_bitmap, offset, size);
107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
1da177e4
LT
109 next_bit = 0;
110 need_flush = 1;
05fccb0e
IM
111 }
112 }
1da177e4
LT
113 if (iommu_fullflush)
114 need_flush = 1;
05fccb0e
IM
115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
1da177e4 117 return offset;
05fccb0e 118}
1da177e4
LT
119
120static void free_iommu(unsigned long offset, int size)
05fccb0e 121{
1da177e4 122 unsigned long flags;
05fccb0e 123
1da177e4 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 125 iommu_area_free(iommu_gart_bitmap, offset, size);
1da177e4 126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 127}
1da177e4 128
05fccb0e 129/*
1da177e4
LT
130 * Use global flush state to avoid races with multiple flushers.
131 */
a32073bf 132static void flush_gart(void)
05fccb0e 133{
1da177e4 134 unsigned long flags;
05fccb0e 135
1da177e4 136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf
AK
137 if (need_flush) {
138 k8_flush_garts();
1da177e4 139 need_flush = 0;
05fccb0e 140 }
1da177e4 141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 142}
1da177e4 143
1da177e4
LT
144#ifdef CONFIG_IOMMU_LEAK
145
05fccb0e
IM
146#define SET_LEAK(x) \
147 do { \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
150 } while (0)
151
152#define CLEAR_LEAK(x) \
153 do { \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
156 } while (0)
1da177e4
LT
157
158/* Debugging aid for drivers that don't free their IOMMU tables */
05fccb0e 159static void **iommu_leak_tab;
1da177e4 160static int leak_trace;
79da0874 161static int iommu_leak_pages = 20;
05fccb0e 162
79da0874 163static void dump_leak(void)
1da177e4
LT
164{
165 int i;
05fccb0e
IM
166 static int dump;
167
168 if (dump || !iommu_leak_tab)
169 return;
1da177e4 170 dump = 1;
05fccb0e
IM
171 show_stack(NULL, NULL);
172
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
175 iommu_leak_pages);
176 for (i = 0; i < iommu_leak_pages; i += 2) {
177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
bc850d6b 178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
05fccb0e
IM
179 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
180 }
181 printk(KERN_DEBUG "\n");
1da177e4
LT
182}
183#else
05fccb0e
IM
184# define SET_LEAK(x)
185# define CLEAR_LEAK(x)
1da177e4
LT
186#endif
187
17a941d8 188static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 189{
05fccb0e 190 /*
1da177e4
LT
191 * Ran out of IOMMU space for this operation. This is very bad.
192 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 193 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
194 * let the Northbridge deal with it. This will result in garbage
195 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 196 * memory corruption will occur or random memory will be DMAed
1da177e4 197 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
198 */
199
200 printk(KERN_ERR
201 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
202 size, dev->bus_id);
1da177e4 203
17a941d8 204 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
1da177e4
LT
205 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
206 panic("PCI-DMA: Memory would be corrupted\n");
05fccb0e
IM
207 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
208 panic(KERN_ERR
209 "PCI-DMA: Random memory would be DMAed\n");
210 }
1da177e4 211#ifdef CONFIG_IOMMU_LEAK
05fccb0e 212 dump_leak();
1da177e4 213#endif
05fccb0e 214}
1da177e4 215
05fccb0e
IM
216static inline int
217need_iommu(struct device *dev, unsigned long addr, size_t size)
218{
1da177e4 219 u64 mask = *dev->dma_mask;
00edefae 220 int high = addr + size > mask;
1da177e4 221 int mmu = high;
05fccb0e
IM
222
223 if (force_iommu)
224 mmu = 1;
225
226 return mmu;
1da177e4
LT
227}
228
05fccb0e
IM
229static inline int
230nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
231{
1da177e4 232 u64 mask = *dev->dma_mask;
00edefae 233 int high = addr + size > mask;
1da177e4 234 int mmu = high;
05fccb0e
IM
235
236 return mmu;
1da177e4
LT
237}
238
239/* Map a single continuous physical area into the IOMMU.
240 * Caller needs to check if the iommu is needed and flush.
241 */
17a941d8
MBY
242static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
243 size_t size, int dir)
05fccb0e 244{
1da177e4 245 unsigned long npages = to_pages(phys_mem, size);
fde9a109 246 unsigned long iommu_page = alloc_iommu(dev, npages);
1da177e4 247 int i;
05fccb0e 248
1da177e4
LT
249 if (iommu_page == -1) {
250 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 251 return phys_mem;
1da177e4
LT
252 if (panic_on_overflow)
253 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 254 iommu_full(dev, size, dir);
1da177e4
LT
255 return bad_dma_address;
256 }
257
258 for (i = 0; i < npages; i++) {
259 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
260 SET_LEAK(iommu_page + i);
261 phys_mem += PAGE_SIZE;
262 }
263 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
264}
265
05fccb0e
IM
266static dma_addr_t
267gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
17a941d8
MBY
268{
269 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
05fccb0e 270
a32073bf 271 flush_gart();
05fccb0e 272
17a941d8
MBY
273 return map;
274}
275
1da177e4 276/* Map a single area into the IOMMU */
05fccb0e
IM
277static dma_addr_t
278gart_map_single(struct device *dev, void *addr, size_t size, int dir)
1da177e4
LT
279{
280 unsigned long phys_mem, bus;
281
1da177e4
LT
282 if (!dev)
283 dev = &fallback_dev;
284
05fccb0e 285 phys_mem = virt_to_phys(addr);
1da177e4 286 if (!need_iommu(dev, phys_mem, size))
05fccb0e 287 return phys_mem;
1da177e4 288
17a941d8 289 bus = gart_map_simple(dev, addr, size, dir);
05fccb0e
IM
290
291 return bus;
17a941d8
MBY
292}
293
7c2d9cd2
JM
294/*
295 * Free a DMA mapping.
296 */
1048fa52 297static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
05fccb0e 298 size_t size, int direction)
7c2d9cd2
JM
299{
300 unsigned long iommu_page;
301 int npages;
302 int i;
303
304 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
305 dma_addr >= iommu_bus_base + iommu_size)
306 return;
05fccb0e 307
7c2d9cd2
JM
308 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
309 npages = to_pages(dma_addr, size);
310 for (i = 0; i < npages; i++) {
311 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
312 CLEAR_LEAK(iommu_page + i);
313 }
314 free_iommu(iommu_page, npages);
315}
316
17a941d8
MBY
317/*
318 * Wrapper for pci_unmap_single working with scatterlists.
319 */
05fccb0e
IM
320static void
321gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
17a941d8 322{
9ee1bea4 323 struct scatterlist *s;
17a941d8
MBY
324 int i;
325
9ee1bea4 326 for_each_sg(sg, s, nents, i) {
60b08c67 327 if (!s->dma_length || !s->length)
17a941d8 328 break;
7c2d9cd2 329 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
17a941d8
MBY
330 }
331}
1da177e4
LT
332
333/* Fallback for dma_map_sg in case of overflow */
334static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
335 int nents, int dir)
336{
9ee1bea4 337 struct scatterlist *s;
1da177e4
LT
338 int i;
339
340#ifdef CONFIG_IOMMU_DEBUG
341 printk(KERN_DEBUG "dma_map_sg overflow\n");
342#endif
343
9ee1bea4 344 for_each_sg(sg, s, nents, i) {
58b053e4 345 unsigned long addr = sg_phys(s);
05fccb0e
IM
346
347 if (nonforced_iommu(dev, addr, s->length)) {
17a941d8 348 addr = dma_map_area(dev, addr, s->length, dir);
05fccb0e
IM
349 if (addr == bad_dma_address) {
350 if (i > 0)
17a941d8 351 gart_unmap_sg(dev, sg, i, dir);
05fccb0e 352 nents = 0;
1da177e4
LT
353 sg[0].dma_length = 0;
354 break;
355 }
356 }
357 s->dma_address = addr;
358 s->dma_length = s->length;
359 }
a32073bf 360 flush_gart();
05fccb0e 361
1da177e4
LT
362 return nents;
363}
364
365/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
366static int __dma_map_cont(struct device *dev, struct scatterlist *start,
367 int nelems, struct scatterlist *sout,
368 unsigned long pages)
1da177e4 369{
fde9a109 370 unsigned long iommu_start = alloc_iommu(dev, pages);
05fccb0e 371 unsigned long iommu_page = iommu_start;
9ee1bea4 372 struct scatterlist *s;
1da177e4
LT
373 int i;
374
375 if (iommu_start == -1)
376 return -1;
9ee1bea4
JA
377
378 for_each_sg(start, s, nelems, i) {
1da177e4
LT
379 unsigned long pages, addr;
380 unsigned long phys_addr = s->dma_address;
05fccb0e 381
9ee1bea4
JA
382 BUG_ON(s != start && s->offset);
383 if (s == start) {
1da177e4
LT
384 sout->dma_address = iommu_bus_base;
385 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
386 sout->dma_length = s->length;
05fccb0e
IM
387 } else {
388 sout->dma_length += s->length;
1da177e4
LT
389 }
390
391 addr = phys_addr;
05fccb0e
IM
392 pages = to_pages(s->offset, s->length);
393 while (pages--) {
394 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
395 SET_LEAK(iommu_page);
396 addr += PAGE_SIZE;
397 iommu_page++;
0d541064 398 }
05fccb0e
IM
399 }
400 BUG_ON(iommu_page - iommu_start != pages);
401
1da177e4
LT
402 return 0;
403}
404
05fccb0e 405static inline int
fde9a109
FT
406dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
407 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 408{
9ee1bea4
JA
409 if (!need) {
410 BUG_ON(nelems != 1);
e88a39de 411 sout->dma_address = start->dma_address;
9ee1bea4 412 sout->dma_length = start->length;
1da177e4 413 return 0;
9ee1bea4 414 }
fde9a109 415 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 416}
05fccb0e 417
1da177e4
LT
418/*
419 * DMA map all entries in a scatterlist.
05fccb0e 420 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 421 */
05fccb0e
IM
422static int
423gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
1da177e4 424{
9ee1bea4 425 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
426 int need = 0, nextneed, i, out, start;
427 unsigned long pages = 0;
42d00284
FT
428 unsigned int seg_size;
429 unsigned int max_seg_size;
1da177e4 430
05fccb0e 431 if (nents == 0)
1da177e4
LT
432 return 0;
433
1da177e4
LT
434 if (!dev)
435 dev = &fallback_dev;
436
437 out = 0;
438 start = 0;
9ee1bea4 439 start_sg = sgmap = sg;
42d00284
FT
440 seg_size = 0;
441 max_seg_size = dma_get_max_seg_size(dev);
9ee1bea4
JA
442 ps = NULL; /* shut up gcc */
443 for_each_sg(sg, s, nents, i) {
58b053e4 444 dma_addr_t addr = sg_phys(s);
05fccb0e 445
1da177e4 446 s->dma_address = addr;
05fccb0e 447 BUG_ON(s->length == 0);
1da177e4 448
05fccb0e 449 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
450
451 /* Handle the previous not yet processed entries */
452 if (i > start) {
05fccb0e
IM
453 /*
454 * Can only merge when the last chunk ends on a
455 * page boundary and the new one doesn't have an
456 * offset.
457 */
1da177e4 458 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 459 (s->length + seg_size > max_seg_size) ||
9ee1bea4 460 (ps->offset + ps->length) % PAGE_SIZE) {
fde9a109
FT
461 if (dma_map_cont(dev, start_sg, i - start,
462 sgmap, pages, need) < 0)
1da177e4
LT
463 goto error;
464 out++;
42d00284 465 seg_size = 0;
9ee1bea4 466 sgmap = sg_next(sgmap);
1da177e4 467 pages = 0;
9ee1bea4
JA
468 start = i;
469 start_sg = s;
1da177e4
LT
470 }
471 }
472
42d00284 473 seg_size += s->length;
1da177e4
LT
474 need = nextneed;
475 pages += to_pages(s->offset, s->length);
9ee1bea4 476 ps = s;
1da177e4 477 }
fde9a109 478 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
479 goto error;
480 out++;
a32073bf 481 flush_gart();
9ee1bea4
JA
482 if (out < nents) {
483 sgmap = sg_next(sgmap);
484 sgmap->dma_length = 0;
485 }
1da177e4
LT
486 return out;
487
488error:
a32073bf 489 flush_gart();
5336940d 490 gart_unmap_sg(dev, sg, out, dir);
05fccb0e 491
a1002a48
KV
492 /* When it was forced or merged try again in a dumb way */
493 if (force_iommu || iommu_merge) {
494 out = dma_map_sg_nonforce(dev, sg, nents, dir);
495 if (out > 0)
496 return out;
497 }
1da177e4
LT
498 if (panic_on_overflow)
499 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 500
17a941d8 501 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4
JA
502 for_each_sg(sg, s, nents, i)
503 s->dma_address = bad_dma_address;
1da177e4 504 return 0;
05fccb0e 505}
1da177e4 506
17a941d8 507static int no_agp;
1da177e4
LT
508
509static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
510{
511 unsigned long a;
512
513 if (!iommu_size) {
514 iommu_size = aper_size;
515 if (!no_agp)
516 iommu_size /= 2;
517 }
518
519 a = aper + iommu_size;
31422c51 520 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
1da177e4 521
05fccb0e 522 if (iommu_size < 64*1024*1024) {
1da177e4 523 printk(KERN_WARNING
05fccb0e
IM
524 "PCI-DMA: Warning: Small IOMMU %luMB."
525 " Consider increasing the AGP aperture in BIOS\n",
526 iommu_size >> 20);
527 }
528
1da177e4 529 return iommu_size;
05fccb0e 530}
1da177e4 531
05fccb0e
IM
532static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
533{
534 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 535 u64 aper_base;
1da177e4 536
05fccb0e 537 pci_read_config_dword(dev, 0x94, &aper_base_32);
1da177e4 538 pci_read_config_dword(dev, 0x90, &aper_order);
05fccb0e 539 aper_order = (aper_order >> 1) & 7;
1da177e4 540
05fccb0e 541 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
542 aper_base <<= 25;
543
05fccb0e
IM
544 aper_size = (32 * 1024 * 1024) << aper_order;
545 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
546 aper_base = 0;
547
548 *size = aper_size;
549 return aper_base;
05fccb0e 550}
1da177e4 551
05fccb0e 552/*
1da177e4 553 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 554 * AGP driver for some reason.
1da177e4
LT
555 */
556static __init int init_k8_gatt(struct agp_kern_info *info)
05fccb0e
IM
557{
558 unsigned aper_size, gatt_size, new_aper_size;
559 unsigned aper_base, new_aper_base;
1da177e4
LT
560 struct pci_dev *dev;
561 void *gatt;
a32073bf
AK
562 int i;
563
1da177e4
LT
564 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
565 aper_size = aper_base = info->aper_size = 0;
a32073bf
AK
566 dev = NULL;
567 for (i = 0; i < num_k8_northbridges; i++) {
568 dev = k8_northbridges[i];
05fccb0e
IM
569 new_aper_base = read_aperture(dev, &new_aper_size);
570 if (!new_aper_base)
571 goto nommu;
572
573 if (!aper_base) {
1da177e4
LT
574 aper_size = new_aper_size;
575 aper_base = new_aper_base;
05fccb0e
IM
576 }
577 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
578 goto nommu;
579 }
580 if (!aper_base)
05fccb0e 581 goto nommu;
1da177e4 582 info->aper_base = aper_base;
05fccb0e 583 info->aper_size = aper_size >> 20;
1da177e4 584
05fccb0e
IM
585 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
586 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
587 if (!gatt)
cf6387da 588 panic("Cannot allocate GATT table");
6d238cc4 589 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 590 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 591
05fccb0e 592 memset(gatt, 0, gatt_size);
1da177e4 593 agp_gatt_table = gatt;
a32073bf
AK
594
595 for (i = 0; i < num_k8_northbridges; i++) {
05fccb0e
IM
596 u32 gatt_reg;
597 u32 ctl;
1da177e4 598
a32073bf 599 dev = k8_northbridges[i];
05fccb0e
IM
600 gatt_reg = __pa(gatt) >> 12;
601 gatt_reg <<= 4;
1da177e4 602 pci_write_config_dword(dev, 0x98, gatt_reg);
05fccb0e 603 pci_read_config_dword(dev, 0x90, &ctl);
1da177e4
LT
604
605 ctl |= 1;
606 ctl &= ~((1<<4) | (1<<5));
607
05fccb0e 608 pci_write_config_dword(dev, 0x90, ctl);
1da177e4 609 }
a32073bf 610 flush_gart();
05fccb0e
IM
611
612 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
613 aper_base, aper_size>>10);
1da177e4
LT
614 return 0;
615
616 nommu:
05fccb0e 617 /* Should not happen anymore */
8f59610d
PM
618 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
619 KERN_WARNING "falling back to iommu=soft.\n");
05fccb0e
IM
620 return -1;
621}
1da177e4
LT
622
623extern int agp_amd64_init(void);
624
e6584504 625static const struct dma_mapping_ops gart_dma_ops = {
05fccb0e
IM
626 .mapping_error = NULL,
627 .map_single = gart_map_single,
628 .map_simple = gart_map_simple,
629 .unmap_single = gart_unmap_single,
630 .sync_single_for_cpu = NULL,
631 .sync_single_for_device = NULL,
632 .sync_single_range_for_cpu = NULL,
633 .sync_single_range_for_device = NULL,
634 .sync_sg_for_cpu = NULL,
635 .sync_sg_for_device = NULL,
636 .map_sg = gart_map_sg,
637 .unmap_sg = gart_unmap_sg,
17a941d8
MBY
638};
639
bc2cea6a
YL
640void gart_iommu_shutdown(void)
641{
642 struct pci_dev *dev;
643 int i;
644
645 if (no_agp && (dma_ops != &gart_dma_ops))
646 return;
647
05fccb0e
IM
648 for (i = 0; i < num_k8_northbridges; i++) {
649 u32 ctl;
bc2cea6a 650
05fccb0e
IM
651 dev = k8_northbridges[i];
652 pci_read_config_dword(dev, 0x90, &ctl);
bc2cea6a 653
05fccb0e 654 ctl &= ~1;
bc2cea6a 655
05fccb0e
IM
656 pci_write_config_dword(dev, 0x90, ctl);
657 }
bc2cea6a
YL
658}
659
0dc243ae 660void __init gart_iommu_init(void)
05fccb0e 661{
1da177e4 662 struct agp_kern_info info;
1da177e4 663 unsigned long iommu_start;
05fccb0e 664 unsigned long aper_size;
1da177e4
LT
665 unsigned long scratch;
666 long i;
667
a32073bf
AK
668 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
669 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
0dc243ae 670 return;
a32073bf
AK
671 }
672
1da177e4 673#ifndef CONFIG_AGP_AMD64
05fccb0e 674 no_agp = 1;
1da177e4
LT
675#else
676 /* Makefile puts PCI initialization via subsys_initcall first. */
677 /* Add other K8 AGP bridge drivers here */
05fccb0e
IM
678 no_agp = no_agp ||
679 (agp_amd64_init() < 0) ||
1da177e4 680 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 681#endif
1da177e4 682
60b08c67 683 if (swiotlb)
0dc243ae 684 return;
60b08c67 685
8d4f6b93 686 /* Did we detect a different HW IOMMU? */
0440d4c0 687 if (iommu_detected && !gart_iommu_aperture)
0dc243ae 688 return;
8d4f6b93 689
1da177e4 690 if (no_iommu ||
17a941d8 691 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
0440d4c0 692 !gart_iommu_aperture ||
1da177e4 693 (no_agp && init_k8_gatt(&info) < 0)) {
5b7b644c 694 if (end_pfn > MAX_DMA32_PFN) {
8f59610d
PM
695 printk(KERN_WARNING "More than 4GB of memory "
696 "but GART IOMMU not available.\n"
697 KERN_WARNING "falling back to iommu=soft.\n");
5b7b644c 698 }
0dc243ae 699 return;
1da177e4
LT
700 }
701
5b7b644c 702 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
703 aper_size = info.aper_size * 1024 * 1024;
704 iommu_size = check_iommu_size(info.aper_base, aper_size);
705 iommu_pages = iommu_size >> PAGE_SHIFT;
706
707 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
708 get_order(iommu_pages/8));
709 if (!iommu_gart_bitmap)
710 panic("Cannot allocate iommu bitmap\n");
1da177e4
LT
711 memset(iommu_gart_bitmap, 0, iommu_pages/8);
712
713#ifdef CONFIG_IOMMU_LEAK
05fccb0e
IM
714 if (leak_trace) {
715 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
1da177e4 716 get_order(iommu_pages*sizeof(void *)));
05fccb0e
IM
717 if (iommu_leak_tab)
718 memset(iommu_leak_tab, 0, iommu_pages * 8);
1da177e4 719 else
05fccb0e
IM
720 printk(KERN_DEBUG
721 "PCI-DMA: Cannot allocate leak trace area\n");
722 }
1da177e4
LT
723#endif
724
05fccb0e 725 /*
1da177e4 726 * Out of IOMMU space handling.
05fccb0e
IM
727 * Reserve some invalid pages at the beginning of the GART.
728 */
729 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
1da177e4 730
05fccb0e 731 agp_memory_reserved = iommu_size;
1da177e4
LT
732 printk(KERN_INFO
733 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 734 iommu_size >> 20);
1da177e4 735
05fccb0e
IM
736 iommu_start = aper_size - iommu_size;
737 iommu_bus_base = info.aper_base + iommu_start;
1da177e4
LT
738 bad_dma_address = iommu_bus_base;
739 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
740
05fccb0e 741 /*
1da177e4
LT
742 * Unmap the IOMMU part of the GART. The alias of the page is
743 * always mapped with cache enabled and there is no full cache
744 * coherency across the GART remapping. The unmapping avoids
745 * automatic prefetches from the CPU allocating cache lines in
746 * there. All CPU accesses are done via the direct mapping to
747 * the backing memory. The GART address is only used by PCI
05fccb0e 748 * devices.
1da177e4 749 */
28d6ee41
AK
750 set_memory_np((unsigned long)__va(iommu_bus_base),
751 iommu_size >> PAGE_SHIFT);
184652eb
IM
752 /*
753 * Tricky. The GART table remaps the physical memory range,
754 * so the CPU wont notice potential aliases and if the memory
755 * is remapped to UC later on, we might surprise the PCI devices
756 * with a stray writeout of a cacheline. So play it sure and
757 * do an explicit, full-scale wbinvd() _after_ having marked all
758 * the pages as Not-Present:
759 */
760 wbinvd();
1da177e4 761
05fccb0e
IM
762 /*
763 * Try to workaround a bug (thanks to BenH)
764 * Set unmapped entries to a scratch page instead of 0.
1da177e4
LT
765 * Any prefetches that hit unmapped entries won't get an bus abort
766 * then.
767 */
05fccb0e
IM
768 scratch = get_zeroed_page(GFP_KERNEL);
769 if (!scratch)
1da177e4
LT
770 panic("Cannot allocate iommu scratch page");
771 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
05fccb0e 772 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
1da177e4
LT
773 iommu_gatt_base[i] = gart_unmapped_entry;
774
a32073bf 775 flush_gart();
17a941d8 776 dma_ops = &gart_dma_ops;
05fccb0e 777}
1da177e4 778
43999d9e 779void __init gart_parse_options(char *p)
17a941d8
MBY
780{
781 int arg;
782
1da177e4 783#ifdef CONFIG_IOMMU_LEAK
05fccb0e 784 if (!strncmp(p, "leak", 4)) {
17a941d8
MBY
785 leak_trace = 1;
786 p += 4;
787 if (*p == '=') ++p;
788 if (isdigit(*p) && get_option(&p, &arg))
789 iommu_leak_pages = arg;
790 }
1da177e4 791#endif
17a941d8
MBY
792 if (isdigit(*p) && get_option(&p, &arg))
793 iommu_size = arg;
05fccb0e 794 if (!strncmp(p, "fullflush", 8))
17a941d8 795 iommu_fullflush = 1;
05fccb0e 796 if (!strncmp(p, "nofullflush", 11))
17a941d8 797 iommu_fullflush = 0;
05fccb0e 798 if (!strncmp(p, "noagp", 5))
17a941d8 799 no_agp = 1;
05fccb0e 800 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
801 fix_aperture = 0;
802 /* duplicated from pci-dma.c */
05fccb0e 803 if (!strncmp(p, "force", 5))
0440d4c0 804 gart_iommu_aperture_allowed = 1;
05fccb0e 805 if (!strncmp(p, "allowed", 7))
0440d4c0 806 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
807 if (!strncmp(p, "memaper", 7)) {
808 fallback_aper_force = 1;
809 p += 7;
810 if (*p == '=') {
811 ++p;
812 if (get_option(&p, &arg))
813 fallback_aper_order = arg;
814 }
815 }
816}