Merge branch 'for-33' of git://repo.or.cz/linux-kbuild
[linux-2.6-block.git] / arch / x86 / kernel / pci-gart_64.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 3 *
1da177e4
LT
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 6 * with more than 4GB.
1da177e4 7 *
5872fb94 8 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
05fccb0e 9 *
1da177e4 10 * Copyright 2002 Andi Kleen, SuSE Labs.
ff7f3649 11 * Subject to the GNU General Public License v2 only.
1da177e4
LT
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
d43c36dc 19#include <linux/sched.h>
1da177e4
LT
20#include <linux/string.h>
21#include <linux/spinlock.h>
22#include <linux/pci.h>
23#include <linux/module.h>
24#include <linux/topology.h>
25#include <linux/interrupt.h>
a66022c4 26#include <linux/bitmap.h>
1eeb66a1 27#include <linux/kdebug.h>
9ee1bea4 28#include <linux/scatterlist.h>
fde9a109 29#include <linux/iommu-helper.h>
cd76374e 30#include <linux/sysdev.h>
237a6224 31#include <linux/io.h>
1da177e4 32#include <asm/atomic.h>
1da177e4
LT
33#include <asm/mtrr.h>
34#include <asm/pgtable.h>
35#include <asm/proto.h>
46a7fa27 36#include <asm/iommu.h>
395624fc 37#include <asm/gart.h>
1da177e4 38#include <asm/cacheflush.h>
17a941d8
MBY
39#include <asm/swiotlb.h>
40#include <asm/dma.h>
a32073bf 41#include <asm/k8.h>
338bac52 42#include <asm/x86_init.h>
1da177e4 43
79da0874 44static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 45static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
46static unsigned long iommu_pages; /* .. and in pages */
47
05fccb0e 48static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 49
42109197
FT
50static dma_addr_t bad_dma_addr;
51
05fccb0e
IM
52/*
53 * If this is disabled the IOMMU will use an optimized flushing strategy
54 * of only flushing when an mapping is reused. With it true the GART is
55 * flushed for every mapping. Problem is that doing the lazy flush seems
56 * to trigger bugs with some popular PCI cards, in particular 3ware (but
57 * has been also also seen with Qlogic at least).
58 */
c854c919 59static int iommu_fullflush = 1;
1da177e4 60
05fccb0e 61/* Allocation bitmap for the remapping area: */
1da177e4 62static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
63/* Guarded by iommu_bitmap_lock: */
64static unsigned long *iommu_gart_bitmap;
1da177e4 65
05fccb0e 66static u32 gart_unmapped_entry;
1da177e4
LT
67
68#define GPTE_VALID 1
69#define GPTE_COHERENT 2
70#define GPTE_ENCODE(x) \
71 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
72#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
73
05fccb0e 74#define EMERGENCY_PAGES 32 /* = 128KB */
1da177e4
LT
75
76#ifdef CONFIG_AGP
77#define AGPEXTERN extern
78#else
79#define AGPEXTERN
80#endif
81
82/* backdoor interface to AGP driver */
83AGPEXTERN int agp_memory_reserved;
84AGPEXTERN __u32 *agp_gatt_table;
85
86static unsigned long next_bit; /* protected by iommu_bitmap_lock */
3610f211 87static bool need_flush; /* global flush state. set for each gart wrap */
1da177e4 88
7b22ff53
FT
89static unsigned long alloc_iommu(struct device *dev, int size,
90 unsigned long align_mask)
05fccb0e 91{
1da177e4 92 unsigned long offset, flags;
fde9a109
FT
93 unsigned long boundary_size;
94 unsigned long base_index;
95
96 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
97 PAGE_SIZE) >> PAGE_SHIFT;
123bf0e2 98 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
fde9a109 99 PAGE_SIZE) >> PAGE_SHIFT;
1da177e4 100
05fccb0e 101 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
7b22ff53 103 size, base_index, boundary_size, align_mask);
1da177e4 104 if (offset == -1) {
3610f211 105 need_flush = true;
fde9a109 106 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
7b22ff53
FT
107 size, base_index, boundary_size,
108 align_mask);
1da177e4 109 }
05fccb0e 110 if (offset != -1) {
05fccb0e
IM
111 next_bit = offset+size;
112 if (next_bit >= iommu_pages) {
1da177e4 113 next_bit = 0;
3610f211 114 need_flush = true;
05fccb0e
IM
115 }
116 }
1da177e4 117 if (iommu_fullflush)
3610f211 118 need_flush = true;
05fccb0e
IM
119 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
120
1da177e4 121 return offset;
05fccb0e 122}
1da177e4
LT
123
124static void free_iommu(unsigned long offset, int size)
05fccb0e 125{
1da177e4 126 unsigned long flags;
05fccb0e 127
1da177e4 128 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a66022c4 129 bitmap_clear(iommu_gart_bitmap, offset, size);
70d7d357
JR
130 if (offset >= next_bit)
131 next_bit = offset + size;
1da177e4 132 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 133}
1da177e4 134
05fccb0e 135/*
1da177e4
LT
136 * Use global flush state to avoid races with multiple flushers.
137 */
a32073bf 138static void flush_gart(void)
05fccb0e 139{
1da177e4 140 unsigned long flags;
05fccb0e 141
1da177e4 142 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf
AK
143 if (need_flush) {
144 k8_flush_garts();
3610f211 145 need_flush = false;
05fccb0e 146 }
1da177e4 147 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 148}
1da177e4 149
1da177e4 150#ifdef CONFIG_IOMMU_LEAK
1da177e4 151/* Debugging aid for drivers that don't free their IOMMU tables */
1da177e4 152static int leak_trace;
79da0874 153static int iommu_leak_pages = 20;
05fccb0e 154
79da0874 155static void dump_leak(void)
1da177e4 156{
05fccb0e
IM
157 static int dump;
158
19c1a6f5 159 if (dump)
05fccb0e 160 return;
1da177e4 161 dump = 1;
05fccb0e 162
19c1a6f5
FT
163 show_stack(NULL, NULL);
164 debug_dma_dump_mappings(NULL);
1da177e4 165}
1da177e4
LT
166#endif
167
17a941d8 168static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 169{
05fccb0e 170 /*
1da177e4
LT
171 * Ran out of IOMMU space for this operation. This is very bad.
172 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 173 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
174 * let the Northbridge deal with it. This will result in garbage
175 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 176 * memory corruption will occur or random memory will be DMAed
1da177e4 177 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
178 */
179
fc3a8828 180 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
1da177e4 181
17a941d8 182 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
1da177e4
LT
183 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
184 panic("PCI-DMA: Memory would be corrupted\n");
05fccb0e
IM
185 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
186 panic(KERN_ERR
187 "PCI-DMA: Random memory would be DMAed\n");
188 }
1da177e4 189#ifdef CONFIG_IOMMU_LEAK
05fccb0e 190 dump_leak();
1da177e4 191#endif
05fccb0e 192}
1da177e4 193
05fccb0e
IM
194static inline int
195need_iommu(struct device *dev, unsigned long addr, size_t size)
196{
a4c2baa6 197 return force_iommu || !dma_capable(dev, addr, size);
1da177e4
LT
198}
199
05fccb0e
IM
200static inline int
201nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
202{
a4c2baa6 203 return !dma_capable(dev, addr, size);
1da177e4
LT
204}
205
206/* Map a single continuous physical area into the IOMMU.
207 * Caller needs to check if the iommu is needed and flush.
208 */
17a941d8 209static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
7b22ff53 210 size_t size, int dir, unsigned long align_mask)
05fccb0e 211{
1477b8e5 212 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
7b22ff53 213 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
1da177e4 214 int i;
05fccb0e 215
1da177e4
LT
216 if (iommu_page == -1) {
217 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 218 return phys_mem;
1da177e4
LT
219 if (panic_on_overflow)
220 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 221 iommu_full(dev, size, dir);
42109197 222 return bad_dma_addr;
1da177e4
LT
223 }
224
225 for (i = 0; i < npages; i++) {
226 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
1da177e4
LT
227 phys_mem += PAGE_SIZE;
228 }
229 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
230}
231
232/* Map a single area into the IOMMU */
052aedbf
FT
233static dma_addr_t gart_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t size,
235 enum dma_data_direction dir,
236 struct dma_attrs *attrs)
1da177e4 237{
2be62149 238 unsigned long bus;
052aedbf 239 phys_addr_t paddr = page_to_phys(page) + offset;
1da177e4 240
1da177e4 241 if (!dev)
6c505ce3 242 dev = &x86_dma_fallback_dev;
1da177e4 243
2be62149
IM
244 if (!need_iommu(dev, paddr, size))
245 return paddr;
1da177e4 246
7b22ff53
FT
247 bus = dma_map_area(dev, paddr, size, dir, 0);
248 flush_gart();
05fccb0e
IM
249
250 return bus;
17a941d8
MBY
251}
252
7c2d9cd2
JM
253/*
254 * Free a DMA mapping.
255 */
052aedbf
FT
256static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
257 size_t size, enum dma_data_direction dir,
258 struct dma_attrs *attrs)
7c2d9cd2
JM
259{
260 unsigned long iommu_page;
261 int npages;
262 int i;
263
264 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
265 dma_addr >= iommu_bus_base + iommu_size)
266 return;
05fccb0e 267
7c2d9cd2 268 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
1477b8e5 269 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
7c2d9cd2
JM
270 for (i = 0; i < npages; i++) {
271 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
7c2d9cd2
JM
272 }
273 free_iommu(iommu_page, npages);
274}
275
17a941d8
MBY
276/*
277 * Wrapper for pci_unmap_single working with scatterlists.
278 */
160c1d8e
FT
279static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
280 enum dma_data_direction dir, struct dma_attrs *attrs)
17a941d8 281{
9ee1bea4 282 struct scatterlist *s;
17a941d8
MBY
283 int i;
284
9ee1bea4 285 for_each_sg(sg, s, nents, i) {
60b08c67 286 if (!s->dma_length || !s->length)
17a941d8 287 break;
d7dff840 288 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
17a941d8
MBY
289 }
290}
1da177e4
LT
291
292/* Fallback for dma_map_sg in case of overflow */
293static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
294 int nents, int dir)
295{
9ee1bea4 296 struct scatterlist *s;
1da177e4
LT
297 int i;
298
299#ifdef CONFIG_IOMMU_DEBUG
123bf0e2 300 pr_debug("dma_map_sg overflow\n");
1da177e4
LT
301#endif
302
9ee1bea4 303 for_each_sg(sg, s, nents, i) {
58b053e4 304 unsigned long addr = sg_phys(s);
05fccb0e
IM
305
306 if (nonforced_iommu(dev, addr, s->length)) {
7b22ff53 307 addr = dma_map_area(dev, addr, s->length, dir, 0);
42109197 308 if (addr == bad_dma_addr) {
05fccb0e 309 if (i > 0)
160c1d8e 310 gart_unmap_sg(dev, sg, i, dir, NULL);
05fccb0e 311 nents = 0;
1da177e4
LT
312 sg[0].dma_length = 0;
313 break;
314 }
315 }
316 s->dma_address = addr;
317 s->dma_length = s->length;
318 }
a32073bf 319 flush_gart();
05fccb0e 320
1da177e4
LT
321 return nents;
322}
323
324/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
325static int __dma_map_cont(struct device *dev, struct scatterlist *start,
326 int nelems, struct scatterlist *sout,
327 unsigned long pages)
1da177e4 328{
7b22ff53 329 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
05fccb0e 330 unsigned long iommu_page = iommu_start;
9ee1bea4 331 struct scatterlist *s;
1da177e4
LT
332 int i;
333
334 if (iommu_start == -1)
335 return -1;
9ee1bea4
JA
336
337 for_each_sg(start, s, nelems, i) {
1da177e4
LT
338 unsigned long pages, addr;
339 unsigned long phys_addr = s->dma_address;
05fccb0e 340
9ee1bea4
JA
341 BUG_ON(s != start && s->offset);
342 if (s == start) {
1da177e4
LT
343 sout->dma_address = iommu_bus_base;
344 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
345 sout->dma_length = s->length;
05fccb0e
IM
346 } else {
347 sout->dma_length += s->length;
1da177e4
LT
348 }
349
350 addr = phys_addr;
1477b8e5 351 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
05fccb0e
IM
352 while (pages--) {
353 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
354 addr += PAGE_SIZE;
355 iommu_page++;
0d541064 356 }
05fccb0e
IM
357 }
358 BUG_ON(iommu_page - iommu_start != pages);
359
1da177e4
LT
360 return 0;
361}
362
05fccb0e 363static inline int
fde9a109
FT
364dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
365 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 366{
9ee1bea4
JA
367 if (!need) {
368 BUG_ON(nelems != 1);
e88a39de 369 sout->dma_address = start->dma_address;
9ee1bea4 370 sout->dma_length = start->length;
1da177e4 371 return 0;
9ee1bea4 372 }
fde9a109 373 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 374}
05fccb0e 375
1da177e4
LT
376/*
377 * DMA map all entries in a scatterlist.
05fccb0e 378 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 379 */
160c1d8e
FT
380static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
381 enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4 382{
9ee1bea4 383 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
384 int need = 0, nextneed, i, out, start;
385 unsigned long pages = 0;
42d00284
FT
386 unsigned int seg_size;
387 unsigned int max_seg_size;
1da177e4 388
05fccb0e 389 if (nents == 0)
1da177e4
LT
390 return 0;
391
1da177e4 392 if (!dev)
6c505ce3 393 dev = &x86_dma_fallback_dev;
1da177e4 394
123bf0e2
IM
395 out = 0;
396 start = 0;
397 start_sg = sg;
398 sgmap = sg;
399 seg_size = 0;
400 max_seg_size = dma_get_max_seg_size(dev);
401 ps = NULL; /* shut up gcc */
402
9ee1bea4 403 for_each_sg(sg, s, nents, i) {
58b053e4 404 dma_addr_t addr = sg_phys(s);
05fccb0e 405
1da177e4 406 s->dma_address = addr;
05fccb0e 407 BUG_ON(s->length == 0);
1da177e4 408
05fccb0e 409 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
410
411 /* Handle the previous not yet processed entries */
412 if (i > start) {
05fccb0e
IM
413 /*
414 * Can only merge when the last chunk ends on a
415 * page boundary and the new one doesn't have an
416 * offset.
417 */
1da177e4 418 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 419 (s->length + seg_size > max_seg_size) ||
9ee1bea4 420 (ps->offset + ps->length) % PAGE_SIZE) {
fde9a109
FT
421 if (dma_map_cont(dev, start_sg, i - start,
422 sgmap, pages, need) < 0)
1da177e4
LT
423 goto error;
424 out++;
123bf0e2
IM
425
426 seg_size = 0;
427 sgmap = sg_next(sgmap);
428 pages = 0;
429 start = i;
430 start_sg = s;
1da177e4
LT
431 }
432 }
433
42d00284 434 seg_size += s->length;
1da177e4 435 need = nextneed;
1477b8e5 436 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
9ee1bea4 437 ps = s;
1da177e4 438 }
fde9a109 439 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
440 goto error;
441 out++;
a32073bf 442 flush_gart();
9ee1bea4
JA
443 if (out < nents) {
444 sgmap = sg_next(sgmap);
445 sgmap->dma_length = 0;
446 }
1da177e4
LT
447 return out;
448
449error:
a32073bf 450 flush_gart();
160c1d8e 451 gart_unmap_sg(dev, sg, out, dir, NULL);
05fccb0e 452
a1002a48
KV
453 /* When it was forced or merged try again in a dumb way */
454 if (force_iommu || iommu_merge) {
455 out = dma_map_sg_nonforce(dev, sg, nents, dir);
456 if (out > 0)
457 return out;
458 }
1da177e4
LT
459 if (panic_on_overflow)
460 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 461
17a941d8 462 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4 463 for_each_sg(sg, s, nents, i)
42109197 464 s->dma_address = bad_dma_addr;
1da177e4 465 return 0;
05fccb0e 466}
1da177e4 467
94581094
JR
468/* allocate and map a coherent mapping */
469static void *
470gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
471 gfp_t flag)
472{
f6a32a36 473 dma_addr_t paddr;
421076e2 474 unsigned long align_mask;
1d990882
FT
475 struct page *page;
476
477 if (force_iommu && !(flag & GFP_DMA)) {
478 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
479 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
480 if (!page)
481 return NULL;
482
483 align_mask = (1UL << get_order(size)) - 1;
484 paddr = dma_map_area(dev, page_to_phys(page), size,
485 DMA_BIDIRECTIONAL, align_mask);
486
487 flush_gart();
42109197 488 if (paddr != bad_dma_addr) {
1d990882
FT
489 *dma_addr = paddr;
490 return page_address(page);
491 }
492 __free_pages(page, get_order(size));
493 } else
494 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
94581094
JR
495
496 return NULL;
497}
498
43a5a5a0
JR
499/* free a coherent mapping */
500static void
501gart_free_coherent(struct device *dev, size_t size, void *vaddr,
502 dma_addr_t dma_addr)
503{
d7dff840 504 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
43a5a5a0
JR
505 free_pages((unsigned long)vaddr, get_order(size));
506}
507
42109197
FT
508static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
509{
510 return (dma_addr == bad_dma_addr);
511}
512
17a941d8 513static int no_agp;
1da177e4
LT
514
515static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
516{
517 unsigned long a;
518
519 if (!iommu_size) {
520 iommu_size = aper_size;
521 if (!no_agp)
522 iommu_size /= 2;
523 }
524
525 a = aper + iommu_size;
31422c51 526 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
1da177e4 527
05fccb0e 528 if (iommu_size < 64*1024*1024) {
123bf0e2 529 pr_warning(
05fccb0e
IM
530 "PCI-DMA: Warning: Small IOMMU %luMB."
531 " Consider increasing the AGP aperture in BIOS\n",
532 iommu_size >> 20);
533 }
534
1da177e4 535 return iommu_size;
05fccb0e 536}
1da177e4 537
05fccb0e
IM
538static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
539{
540 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 541 u64 aper_base;
1da177e4 542
3bb6fbf9
PM
543 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
544 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
05fccb0e 545 aper_order = (aper_order >> 1) & 7;
1da177e4 546
05fccb0e 547 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
548 aper_base <<= 25;
549
05fccb0e
IM
550 aper_size = (32 * 1024 * 1024) << aper_order;
551 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
552 aper_base = 0;
553
554 *size = aper_size;
555 return aper_base;
05fccb0e 556}
1da177e4 557
6703f6d1
RW
558static void enable_gart_translations(void)
559{
560 int i;
561
562 for (i = 0; i < num_k8_northbridges; i++) {
563 struct pci_dev *dev = k8_northbridges[i];
564
565 enable_gart_translation(dev, __pa(agp_gatt_table));
566 }
567}
568
569/*
570 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
571 * resume in the same way as they are handled in gart_iommu_hole_init().
572 */
573static bool fix_up_north_bridges;
574static u32 aperture_order;
575static u32 aperture_alloc;
576
577void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
578{
579 fix_up_north_bridges = true;
580 aperture_order = aper_order;
581 aperture_alloc = aper_alloc;
582}
583
123bf0e2 584static void gart_fixup_northbridges(struct sys_device *dev)
cd76374e 585{
123bf0e2 586 int i;
6703f6d1 587
123bf0e2
IM
588 if (!fix_up_north_bridges)
589 return;
6703f6d1 590
123bf0e2 591 pr_info("PCI-DMA: Restoring GART aperture settings\n");
6703f6d1 592
123bf0e2
IM
593 for (i = 0; i < num_k8_northbridges; i++) {
594 struct pci_dev *dev = k8_northbridges[i];
6703f6d1 595
123bf0e2
IM
596 /*
597 * Don't enable translations just yet. That is the next
598 * step. Restore the pre-suspend aperture settings.
599 */
600 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
601 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
6703f6d1 602 }
123bf0e2
IM
603}
604
605static int gart_resume(struct sys_device *dev)
606{
607 pr_info("PCI-DMA: Resuming GART IOMMU\n");
608
609 gart_fixup_northbridges(dev);
6703f6d1
RW
610
611 enable_gart_translations();
612
cd76374e
PM
613 return 0;
614}
615
616static int gart_suspend(struct sys_device *dev, pm_message_t state)
617{
6703f6d1 618 return 0;
cd76374e
PM
619}
620
621static struct sysdev_class gart_sysdev_class = {
123bf0e2
IM
622 .name = "gart",
623 .suspend = gart_suspend,
624 .resume = gart_resume,
cd76374e
PM
625
626};
627
628static struct sys_device device_gart = {
123bf0e2 629 .cls = &gart_sysdev_class,
cd76374e
PM
630};
631
05fccb0e 632/*
1da177e4 633 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 634 * AGP driver for some reason.
1da177e4
LT
635 */
636static __init int init_k8_gatt(struct agp_kern_info *info)
05fccb0e
IM
637{
638 unsigned aper_size, gatt_size, new_aper_size;
639 unsigned aper_base, new_aper_base;
1da177e4
LT
640 struct pci_dev *dev;
641 void *gatt;
cd76374e 642 int i, error;
a32073bf 643
123bf0e2
IM
644 pr_info("PCI-DMA: Disabling AGP.\n");
645
1da177e4 646 aper_size = aper_base = info->aper_size = 0;
a32073bf
AK
647 dev = NULL;
648 for (i = 0; i < num_k8_northbridges; i++) {
649 dev = k8_northbridges[i];
05fccb0e
IM
650 new_aper_base = read_aperture(dev, &new_aper_size);
651 if (!new_aper_base)
652 goto nommu;
653
654 if (!aper_base) {
1da177e4
LT
655 aper_size = new_aper_size;
656 aper_base = new_aper_base;
05fccb0e
IM
657 }
658 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
659 goto nommu;
660 }
661 if (!aper_base)
05fccb0e 662 goto nommu;
123bf0e2 663
1da177e4 664 info->aper_base = aper_base;
05fccb0e 665 info->aper_size = aper_size >> 20;
1da177e4 666
05fccb0e 667 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
0114267b
JR
668 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
669 get_order(gatt_size));
05fccb0e 670 if (!gatt)
cf6387da 671 panic("Cannot allocate GATT table");
6d238cc4 672 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 673 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 674
1da177e4 675 agp_gatt_table = gatt;
a32073bf 676
cd76374e
PM
677 error = sysdev_class_register(&gart_sysdev_class);
678 if (!error)
679 error = sysdev_register(&device_gart);
680 if (error)
237a6224
JR
681 panic("Could not register gart_sysdev -- "
682 "would corrupt data on next suspend");
6703f6d1 683
a32073bf 684 flush_gart();
05fccb0e 685
123bf0e2 686 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
05fccb0e 687 aper_base, aper_size>>10);
7ab073b6 688
1da177e4
LT
689 return 0;
690
691 nommu:
05fccb0e 692 /* Should not happen anymore */
123bf0e2 693 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
ad361c98 694 "falling back to iommu=soft.\n");
05fccb0e
IM
695 return -1;
696}
1da177e4 697
160c1d8e 698static struct dma_map_ops gart_dma_ops = {
05fccb0e
IM
699 .map_sg = gart_map_sg,
700 .unmap_sg = gart_unmap_sg,
052aedbf
FT
701 .map_page = gart_map_page,
702 .unmap_page = gart_unmap_page,
94581094 703 .alloc_coherent = gart_alloc_coherent,
43a5a5a0 704 .free_coherent = gart_free_coherent,
42109197 705 .mapping_error = gart_mapping_error,
17a941d8
MBY
706};
707
338bac52 708static void gart_iommu_shutdown(void)
bc2cea6a
YL
709{
710 struct pci_dev *dev;
711 int i;
712
f3eee542
YL
713 /* don't shutdown it if there is AGP installed */
714 if (!no_agp)
bc2cea6a
YL
715 return;
716
05fccb0e
IM
717 for (i = 0; i < num_k8_northbridges; i++) {
718 u32 ctl;
bc2cea6a 719
05fccb0e 720 dev = k8_northbridges[i];
3bb6fbf9 721 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
bc2cea6a 722
3bb6fbf9 723 ctl &= ~GARTEN;
bc2cea6a 724
3bb6fbf9 725 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
05fccb0e 726 }
bc2cea6a
YL
727}
728
de957628 729int __init gart_iommu_init(void)
05fccb0e 730{
1da177e4 731 struct agp_kern_info info;
1da177e4 732 unsigned long iommu_start;
d99e9016
YL
733 unsigned long aper_base, aper_size;
734 unsigned long start_pfn, end_pfn;
1da177e4
LT
735 unsigned long scratch;
736 long i;
737
55aab5f4 738 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
de957628 739 return 0;
a32073bf 740
1da177e4 741#ifndef CONFIG_AGP_AMD64
05fccb0e 742 no_agp = 1;
1da177e4
LT
743#else
744 /* Makefile puts PCI initialization via subsys_initcall first. */
745 /* Add other K8 AGP bridge drivers here */
05fccb0e
IM
746 no_agp = no_agp ||
747 (agp_amd64_init() < 0) ||
1da177e4 748 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 749#endif
1da177e4 750
1da177e4 751 if (no_iommu ||
c987d12f 752 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0440d4c0 753 !gart_iommu_aperture ||
1da177e4 754 (no_agp && init_k8_gatt(&info) < 0)) {
c987d12f 755 if (max_pfn > MAX_DMA32_PFN) {
123bf0e2
IM
756 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
757 pr_warning("falling back to iommu=soft.\n");
5b7b644c 758 }
de957628 759 return 0;
1da177e4
LT
760 }
761
d99e9016 762 /* need to map that range */
123bf0e2
IM
763 aper_size = info.aper_size << 20;
764 aper_base = info.aper_base;
765 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
766
d99e9016
YL
767 if (end_pfn > max_low_pfn_mapped) {
768 start_pfn = (aper_base>>PAGE_SHIFT);
769 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
770 }
771
123bf0e2 772 pr_info("PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
773 iommu_size = check_iommu_size(info.aper_base, aper_size);
774 iommu_pages = iommu_size >> PAGE_SHIFT;
775
0114267b 776 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
05fccb0e
IM
777 get_order(iommu_pages/8));
778 if (!iommu_gart_bitmap)
779 panic("Cannot allocate iommu bitmap\n");
1da177e4
LT
780
781#ifdef CONFIG_IOMMU_LEAK
05fccb0e 782 if (leak_trace) {
19c1a6f5
FT
783 int ret;
784
785 ret = dma_debug_resize_entries(iommu_pages);
786 if (ret)
123bf0e2 787 pr_debug("PCI-DMA: Cannot trace all the entries\n");
05fccb0e 788 }
1da177e4
LT
789#endif
790
05fccb0e 791 /*
1da177e4 792 * Out of IOMMU space handling.
05fccb0e
IM
793 * Reserve some invalid pages at the beginning of the GART.
794 */
a66022c4 795 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
1da177e4 796
123bf0e2 797 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 798 iommu_size >> 20);
1da177e4 799
123bf0e2
IM
800 agp_memory_reserved = iommu_size;
801 iommu_start = aper_size - iommu_size;
802 iommu_bus_base = info.aper_base + iommu_start;
803 bad_dma_addr = iommu_bus_base;
804 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
1da177e4 805
05fccb0e 806 /*
1da177e4
LT
807 * Unmap the IOMMU part of the GART. The alias of the page is
808 * always mapped with cache enabled and there is no full cache
809 * coherency across the GART remapping. The unmapping avoids
810 * automatic prefetches from the CPU allocating cache lines in
811 * there. All CPU accesses are done via the direct mapping to
812 * the backing memory. The GART address is only used by PCI
05fccb0e 813 * devices.
1da177e4 814 */
28d6ee41
AK
815 set_memory_np((unsigned long)__va(iommu_bus_base),
816 iommu_size >> PAGE_SHIFT);
184652eb
IM
817 /*
818 * Tricky. The GART table remaps the physical memory range,
819 * so the CPU wont notice potential aliases and if the memory
820 * is remapped to UC later on, we might surprise the PCI devices
821 * with a stray writeout of a cacheline. So play it sure and
822 * do an explicit, full-scale wbinvd() _after_ having marked all
823 * the pages as Not-Present:
824 */
825 wbinvd();
123bf0e2 826
fe2245c9
ML
827 /*
828 * Now all caches are flushed and we can safely enable
829 * GART hardware. Doing it early leaves the possibility
830 * of stale cache entries that can lead to GART PTE
831 * errors.
832 */
833 enable_gart_translations();
1da177e4 834
05fccb0e 835 /*
fa3d319a 836 * Try to workaround a bug (thanks to BenH):
05fccb0e 837 * Set unmapped entries to a scratch page instead of 0.
1da177e4 838 * Any prefetches that hit unmapped entries won't get an bus abort
fa3d319a 839 * then. (P2P bridge may be prefetching on DMA reads).
1da177e4 840 */
05fccb0e
IM
841 scratch = get_zeroed_page(GFP_KERNEL);
842 if (!scratch)
1da177e4
LT
843 panic("Cannot allocate iommu scratch page");
844 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
05fccb0e 845 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
1da177e4
LT
846 iommu_gatt_base[i] = gart_unmapped_entry;
847
a32073bf 848 flush_gart();
17a941d8 849 dma_ops = &gart_dma_ops;
338bac52 850 x86_platform.iommu_shutdown = gart_iommu_shutdown;
75f1cdf1 851 swiotlb = 0;
de957628
FT
852
853 return 0;
05fccb0e 854}
1da177e4 855
43999d9e 856void __init gart_parse_options(char *p)
17a941d8
MBY
857{
858 int arg;
859
1da177e4 860#ifdef CONFIG_IOMMU_LEAK
05fccb0e 861 if (!strncmp(p, "leak", 4)) {
17a941d8
MBY
862 leak_trace = 1;
863 p += 4;
237a6224
JR
864 if (*p == '=')
865 ++p;
17a941d8
MBY
866 if (isdigit(*p) && get_option(&p, &arg))
867 iommu_leak_pages = arg;
868 }
1da177e4 869#endif
17a941d8
MBY
870 if (isdigit(*p) && get_option(&p, &arg))
871 iommu_size = arg;
41855b77 872 if (!strncmp(p, "fullflush", 9))
17a941d8 873 iommu_fullflush = 1;
05fccb0e 874 if (!strncmp(p, "nofullflush", 11))
17a941d8 875 iommu_fullflush = 0;
05fccb0e 876 if (!strncmp(p, "noagp", 5))
17a941d8 877 no_agp = 1;
05fccb0e 878 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
879 fix_aperture = 0;
880 /* duplicated from pci-dma.c */
05fccb0e 881 if (!strncmp(p, "force", 5))
0440d4c0 882 gart_iommu_aperture_allowed = 1;
05fccb0e 883 if (!strncmp(p, "allowed", 7))
0440d4c0 884 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
885 if (!strncmp(p, "memaper", 7)) {
886 fallback_aper_force = 1;
887 p += 7;
888 if (*p == '=') {
889 ++p;
890 if (get_option(&p, &arg))
891 fallback_aper_order = arg;
892 }
893 }
894}