Merge tag 'sched_ext-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / arch / x86 / kernel / amd_gart_64.c
CommitLineData
0920654f 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 4 *
1da177e4
LT
5 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
6 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 7 * with more than 4GB.
1da177e4 8 *
985098a0 9 * See Documentation/core-api/dma-api-howto.rst for the interface specification.
05fccb0e 10 *
1da177e4
LT
11 * Copyright 2002 Andi Kleen, SuSE Labs.
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
d43c36dc 19#include <linux/sched.h>
b17b0153 20#include <linux/sched/debug.h>
1da177e4
LT
21#include <linux/string.h>
22#include <linux/spinlock.h>
23#include <linux/pci.h>
1da177e4
LT
24#include <linux/topology.h>
25#include <linux/interrupt.h>
a66022c4 26#include <linux/bitmap.h>
1eeb66a1 27#include <linux/kdebug.h>
9ee1bea4 28#include <linux/scatterlist.h>
fde9a109 29#include <linux/iommu-helper.h>
f3c6ea1b 30#include <linux/syscore_ops.h>
237a6224 31#include <linux/io.h>
5a0e3ad6 32#include <linux/gfp.h>
60063497 33#include <linux/atomic.h>
ea8c64ac 34#include <linux/dma-direct.h>
0a0f0d8b 35#include <linux/dma-map-ops.h>
1da177e4 36#include <asm/mtrr.h>
1da177e4 37#include <asm/proto.h>
46a7fa27 38#include <asm/iommu.h>
395624fc 39#include <asm/gart.h>
d1163651 40#include <asm/set_memory.h>
17a941d8 41#include <asm/dma.h>
23ac4ae8 42#include <asm/amd_nb.h>
338bac52 43#include <asm/x86_init.h>
1da177e4 44
79da0874 45static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 46static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
47static unsigned long iommu_pages; /* .. and in pages */
48
05fccb0e 49static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 50
05fccb0e
IM
51/*
52 * If this is disabled the IOMMU will use an optimized flushing strategy
53 * of only flushing when an mapping is reused. With it true the GART is
54 * flushed for every mapping. Problem is that doing the lazy flush seems
55 * to trigger bugs with some popular PCI cards, in particular 3ware (but
3163600c 56 * has been also seen with Qlogic at least).
05fccb0e 57 */
c854c919 58static int iommu_fullflush = 1;
1da177e4 59
05fccb0e 60/* Allocation bitmap for the remapping area: */
1da177e4 61static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
62/* Guarded by iommu_bitmap_lock: */
63static unsigned long *iommu_gart_bitmap;
1da177e4 64
05fccb0e 65static u32 gart_unmapped_entry;
1da177e4
LT
66
67#define GPTE_VALID 1
68#define GPTE_COHERENT 2
69#define GPTE_ENCODE(x) \
70 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
71#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
72
1da177e4
LT
73#ifdef CONFIG_AGP
74#define AGPEXTERN extern
75#else
76#define AGPEXTERN
77#endif
78
665d3e2a
JR
79/* GART can only remap to physical addresses < 1TB */
80#define GART_MAX_PHYS_ADDR (1ULL << 40)
81
1da177e4
LT
82/* backdoor interface to AGP driver */
83AGPEXTERN int agp_memory_reserved;
84AGPEXTERN __u32 *agp_gatt_table;
85
86static unsigned long next_bit; /* protected by iommu_bitmap_lock */
3610f211 87static bool need_flush; /* global flush state. set for each gart wrap */
1da177e4 88
7b22ff53
FT
89static unsigned long alloc_iommu(struct device *dev, int size,
90 unsigned long align_mask)
05fccb0e 91{
1da177e4 92 unsigned long offset, flags;
fde9a109
FT
93 unsigned long boundary_size;
94 unsigned long base_index;
95
96 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
97 PAGE_SIZE) >> PAGE_SHIFT;
1e9d90db 98 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
1da177e4 99
05fccb0e 100 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
7b22ff53 102 size, base_index, boundary_size, align_mask);
1da177e4 103 if (offset == -1) {
3610f211 104 need_flush = true;
fde9a109 105 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
7b22ff53
FT
106 size, base_index, boundary_size,
107 align_mask);
1da177e4 108 }
05fccb0e 109 if (offset != -1) {
05fccb0e
IM
110 next_bit = offset+size;
111 if (next_bit >= iommu_pages) {
1da177e4 112 next_bit = 0;
3610f211 113 need_flush = true;
05fccb0e
IM
114 }
115 }
1da177e4 116 if (iommu_fullflush)
3610f211 117 need_flush = true;
05fccb0e
IM
118 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
119
1da177e4 120 return offset;
05fccb0e 121}
1da177e4
LT
122
123static void free_iommu(unsigned long offset, int size)
05fccb0e 124{
1da177e4 125 unsigned long flags;
05fccb0e 126
1da177e4 127 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a66022c4 128 bitmap_clear(iommu_gart_bitmap, offset, size);
70d7d357
JR
129 if (offset >= next_bit)
130 next_bit = offset + size;
1da177e4 131 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 132}
1da177e4 133
05fccb0e 134/*
1da177e4
LT
135 * Use global flush state to avoid races with multiple flushers.
136 */
a32073bf 137static void flush_gart(void)
05fccb0e 138{
1da177e4 139 unsigned long flags;
05fccb0e 140
1da177e4 141 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf 142 if (need_flush) {
eec1d4fa 143 amd_flush_garts();
3610f211 144 need_flush = false;
05fccb0e 145 }
1da177e4 146 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 147}
1da177e4 148
1da177e4 149#ifdef CONFIG_IOMMU_LEAK
1da177e4 150/* Debugging aid for drivers that don't free their IOMMU tables */
79da0874 151static void dump_leak(void)
1da177e4 152{
05fccb0e
IM
153 static int dump;
154
19c1a6f5 155 if (dump)
05fccb0e 156 return;
1da177e4 157 dump = 1;
05fccb0e 158
9cb8f069 159 show_stack(NULL, NULL, KERN_ERR);
19c1a6f5 160 debug_dma_dump_mappings(NULL);
1da177e4 161}
1da177e4
LT
162#endif
163
17a941d8 164static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 165{
05fccb0e 166 /*
1da177e4
LT
167 * Ran out of IOMMU space for this operation. This is very bad.
168 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 169 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
170 * let the Northbridge deal with it. This will result in garbage
171 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 172 * memory corruption will occur or random memory will be DMAed
1da177e4 173 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
174 */
175
fc3a8828 176 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
1da177e4 177#ifdef CONFIG_IOMMU_LEAK
05fccb0e 178 dump_leak();
1da177e4 179#endif
05fccb0e 180}
1da177e4 181
05fccb0e
IM
182static inline int
183need_iommu(struct device *dev, unsigned long addr, size_t size)
184{
68a33b17 185 return force_iommu || !dma_capable(dev, addr, size, true);
1da177e4
LT
186}
187
05fccb0e
IM
188static inline int
189nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
190{
68a33b17 191 return !dma_capable(dev, addr, size, true);
1da177e4
LT
192}
193
194/* Map a single continuous physical area into the IOMMU.
195 * Caller needs to check if the iommu is needed and flush.
196 */
17a941d8 197static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
7b22ff53 198 size_t size, int dir, unsigned long align_mask)
05fccb0e 199{
1477b8e5 200 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
665d3e2a 201 unsigned long iommu_page;
1da177e4 202 int i;
05fccb0e 203
665d3e2a 204 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
9e8aa6b5 205 return DMA_MAPPING_ERROR;
665d3e2a
JR
206
207 iommu_page = alloc_iommu(dev, npages, align_mask);
1da177e4
LT
208 if (iommu_page == -1) {
209 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 210 return phys_mem;
1da177e4
LT
211 if (panic_on_overflow)
212 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 213 iommu_full(dev, size, dir);
9e8aa6b5 214 return DMA_MAPPING_ERROR;
1da177e4
LT
215 }
216
217 for (i = 0; i < npages; i++) {
218 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
1da177e4
LT
219 phys_mem += PAGE_SIZE;
220 }
221 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
222}
223
224/* Map a single area into the IOMMU */
052aedbf
FT
225static dma_addr_t gart_map_page(struct device *dev, struct page *page,
226 unsigned long offset, size_t size,
227 enum dma_data_direction dir,
00085f1e 228 unsigned long attrs)
1da177e4 229{
2be62149 230 unsigned long bus;
052aedbf 231 phys_addr_t paddr = page_to_phys(page) + offset;
1da177e4 232
2be62149
IM
233 if (!need_iommu(dev, paddr, size))
234 return paddr;
1da177e4 235
7b22ff53
FT
236 bus = dma_map_area(dev, paddr, size, dir, 0);
237 flush_gart();
05fccb0e
IM
238
239 return bus;
17a941d8
MBY
240}
241
7c2d9cd2
JM
242/*
243 * Free a DMA mapping.
244 */
052aedbf
FT
245static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
246 size_t size, enum dma_data_direction dir,
00085f1e 247 unsigned long attrs)
7c2d9cd2
JM
248{
249 unsigned long iommu_page;
250 int npages;
251 int i;
252
06f55fd2
CH
253 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
254 return;
255
256 /*
257 * This driver will not always use a GART mapping, but might have
258 * created a direct mapping instead. If that is the case there is
259 * nothing to unmap here.
260 */
261 if (dma_addr < iommu_bus_base ||
7c2d9cd2
JM
262 dma_addr >= iommu_bus_base + iommu_size)
263 return;
05fccb0e 264
7c2d9cd2 265 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
1477b8e5 266 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
7c2d9cd2
JM
267 for (i = 0; i < npages; i++) {
268 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
7c2d9cd2
JM
269 }
270 free_iommu(iommu_page, npages);
271}
272
17a941d8
MBY
273/*
274 * Wrapper for pci_unmap_single working with scatterlists.
275 */
160c1d8e 276static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 277 enum dma_data_direction dir, unsigned long attrs)
17a941d8 278{
9ee1bea4 279 struct scatterlist *s;
17a941d8
MBY
280 int i;
281
9ee1bea4 282 for_each_sg(sg, s, nents, i) {
60b08c67 283 if (!s->dma_length || !s->length)
17a941d8 284 break;
00085f1e 285 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
17a941d8
MBY
286 }
287}
1da177e4
LT
288
289/* Fallback for dma_map_sg in case of overflow */
290static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
291 int nents, int dir)
292{
9ee1bea4 293 struct scatterlist *s;
1da177e4
LT
294 int i;
295
296#ifdef CONFIG_IOMMU_DEBUG
123bf0e2 297 pr_debug("dma_map_sg overflow\n");
1da177e4
LT
298#endif
299
9ee1bea4 300 for_each_sg(sg, s, nents, i) {
58b053e4 301 unsigned long addr = sg_phys(s);
05fccb0e
IM
302
303 if (nonforced_iommu(dev, addr, s->length)) {
7b22ff53 304 addr = dma_map_area(dev, addr, s->length, dir, 0);
9e8aa6b5 305 if (addr == DMA_MAPPING_ERROR) {
05fccb0e 306 if (i > 0)
00085f1e 307 gart_unmap_sg(dev, sg, i, dir, 0);
05fccb0e 308 nents = 0;
1da177e4
LT
309 sg[0].dma_length = 0;
310 break;
311 }
312 }
313 s->dma_address = addr;
314 s->dma_length = s->length;
315 }
a32073bf 316 flush_gart();
05fccb0e 317
1da177e4
LT
318 return nents;
319}
320
321/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
322static int __dma_map_cont(struct device *dev, struct scatterlist *start,
323 int nelems, struct scatterlist *sout,
324 unsigned long pages)
1da177e4 325{
7b22ff53 326 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
05fccb0e 327 unsigned long iommu_page = iommu_start;
9ee1bea4 328 struct scatterlist *s;
1da177e4
LT
329 int i;
330
331 if (iommu_start == -1)
fcacc8a6 332 return -ENOMEM;
9ee1bea4
JA
333
334 for_each_sg(start, s, nelems, i) {
1da177e4
LT
335 unsigned long pages, addr;
336 unsigned long phys_addr = s->dma_address;
05fccb0e 337
9ee1bea4
JA
338 BUG_ON(s != start && s->offset);
339 if (s == start) {
1da177e4
LT
340 sout->dma_address = iommu_bus_base;
341 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
342 sout->dma_length = s->length;
05fccb0e
IM
343 } else {
344 sout->dma_length += s->length;
1da177e4
LT
345 }
346
347 addr = phys_addr;
1477b8e5 348 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
05fccb0e
IM
349 while (pages--) {
350 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
351 addr += PAGE_SIZE;
352 iommu_page++;
0d541064 353 }
05fccb0e
IM
354 }
355 BUG_ON(iommu_page - iommu_start != pages);
356
1da177e4
LT
357 return 0;
358}
359
05fccb0e 360static inline int
fde9a109
FT
361dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
362 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 363{
9ee1bea4
JA
364 if (!need) {
365 BUG_ON(nelems != 1);
e88a39de 366 sout->dma_address = start->dma_address;
9ee1bea4 367 sout->dma_length = start->length;
1da177e4 368 return 0;
9ee1bea4 369 }
fde9a109 370 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 371}
05fccb0e 372
1da177e4
LT
373/*
374 * DMA map all entries in a scatterlist.
05fccb0e 375 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 376 */
160c1d8e 377static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 378 enum dma_data_direction dir, unsigned long attrs)
1da177e4 379{
9ee1bea4 380 struct scatterlist *s, *ps, *start_sg, *sgmap;
fcacc8a6 381 int need = 0, nextneed, i, out, start, ret;
05fccb0e 382 unsigned long pages = 0;
42d00284
FT
383 unsigned int seg_size;
384 unsigned int max_seg_size;
1da177e4 385
05fccb0e 386 if (nents == 0)
fcacc8a6 387 return -EINVAL;
1da177e4 388
123bf0e2
IM
389 out = 0;
390 start = 0;
391 start_sg = sg;
392 sgmap = sg;
393 seg_size = 0;
394 max_seg_size = dma_get_max_seg_size(dev);
395 ps = NULL; /* shut up gcc */
396
9ee1bea4 397 for_each_sg(sg, s, nents, i) {
58b053e4 398 dma_addr_t addr = sg_phys(s);
05fccb0e 399
1da177e4 400 s->dma_address = addr;
05fccb0e 401 BUG_ON(s->length == 0);
1da177e4 402
05fccb0e 403 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
404
405 /* Handle the previous not yet processed entries */
406 if (i > start) {
05fccb0e
IM
407 /*
408 * Can only merge when the last chunk ends on a
409 * page boundary and the new one doesn't have an
410 * offset.
411 */
1da177e4 412 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 413 (s->length + seg_size > max_seg_size) ||
9ee1bea4 414 (ps->offset + ps->length) % PAGE_SIZE) {
fcacc8a6
MO
415 ret = dma_map_cont(dev, start_sg, i - start,
416 sgmap, pages, need);
417 if (ret < 0)
1da177e4
LT
418 goto error;
419 out++;
123bf0e2
IM
420
421 seg_size = 0;
422 sgmap = sg_next(sgmap);
423 pages = 0;
424 start = i;
425 start_sg = s;
1da177e4
LT
426 }
427 }
428
42d00284 429 seg_size += s->length;
1da177e4 430 need = nextneed;
1477b8e5 431 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
9ee1bea4 432 ps = s;
1da177e4 433 }
fcacc8a6
MO
434 ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need);
435 if (ret < 0)
1da177e4
LT
436 goto error;
437 out++;
a32073bf 438 flush_gart();
9ee1bea4
JA
439 if (out < nents) {
440 sgmap = sg_next(sgmap);
441 sgmap->dma_length = 0;
442 }
1da177e4
LT
443 return out;
444
445error:
a32073bf 446 flush_gart();
00085f1e 447 gart_unmap_sg(dev, sg, out, dir, 0);
05fccb0e 448
a1002a48
KV
449 /* When it was forced or merged try again in a dumb way */
450 if (force_iommu || iommu_merge) {
451 out = dma_map_sg_nonforce(dev, sg, nents, dir);
452 if (out > 0)
453 return out;
454 }
1da177e4
LT
455 if (panic_on_overflow)
456 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 457
17a941d8 458 iommu_full(dev, pages << PAGE_SHIFT, dir);
fcacc8a6 459 return ret;
05fccb0e 460}
1da177e4 461
94581094
JR
462/* allocate and map a coherent mapping */
463static void *
464gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
00085f1e 465 gfp_t flag, unsigned long attrs)
94581094 466{
51c7eeba
CH
467 void *vaddr;
468
2f5388a2 469 vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
51c7eeba
CH
470 if (!vaddr ||
471 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
472 return vaddr;
94581094 473
51c7eeba
CH
474 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
475 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
476 flush_gart();
9e8aa6b5 477 if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
51c7eeba
CH
478 goto out_free;
479 return vaddr;
480out_free:
2f5388a2 481 dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
94581094
JR
482 return NULL;
483}
484
43a5a5a0
JR
485/* free a coherent mapping */
486static void
487gart_free_coherent(struct device *dev, size_t size, void *vaddr,
00085f1e 488 dma_addr_t dma_addr, unsigned long attrs)
43a5a5a0 489{
00085f1e 490 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
2f5388a2 491 dma_direct_free(dev, size, vaddr, dma_addr, attrs);
43a5a5a0
JR
492}
493
17a941d8 494static int no_agp;
1da177e4
LT
495
496static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
497{
498 unsigned long a;
499
500 if (!iommu_size) {
501 iommu_size = aper_size;
502 if (!no_agp)
503 iommu_size /= 2;
504 }
505
506 a = aper + iommu_size;
82328227 507 iommu_size -= round_up(a, PMD_SIZE) - a;
1da177e4 508
05fccb0e 509 if (iommu_size < 64*1024*1024) {
8d3bcc44 510 pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
05fccb0e 511 " Consider increasing the AGP aperture in BIOS\n",
8d3bcc44 512 iommu_size >> 20);
05fccb0e
IM
513 }
514
1da177e4 515 return iommu_size;
05fccb0e 516}
1da177e4 517
05fccb0e
IM
518static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
519{
520 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 521 u64 aper_base;
1da177e4 522
3bb6fbf9
PM
523 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
524 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
05fccb0e 525 aper_order = (aper_order >> 1) & 7;
1da177e4 526
05fccb0e 527 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
528 aper_base <<= 25;
529
05fccb0e
IM
530 aper_size = (32 * 1024 * 1024) << aper_order;
531 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
532 aper_base = 0;
533
534 *size = aper_size;
535 return aper_base;
05fccb0e 536}
1da177e4 537
6703f6d1
RW
538static void enable_gart_translations(void)
539{
540 int i;
541
9653a5c7 542 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
543 return;
544
9653a5c7
HR
545 for (i = 0; i < amd_nb_num(); i++) {
546 struct pci_dev *dev = node_to_amd_nb(i)->misc;
6703f6d1
RW
547
548 enable_gart_translation(dev, __pa(agp_gatt_table));
549 }
4b83873d
JR
550
551 /* Flush the GART-TLB to remove stale entries */
eec1d4fa 552 amd_flush_garts();
6703f6d1
RW
553}
554
555/*
556 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
557 * resume in the same way as they are handled in gart_iommu_hole_init().
558 */
559static bool fix_up_north_bridges;
560static u32 aperture_order;
561static u32 aperture_alloc;
562
563void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
564{
565 fix_up_north_bridges = true;
566 aperture_order = aper_order;
567 aperture_alloc = aper_alloc;
568}
569
f3c6ea1b 570static void gart_fixup_northbridges(void)
cd76374e 571{
123bf0e2 572 int i;
6703f6d1 573
123bf0e2
IM
574 if (!fix_up_north_bridges)
575 return;
6703f6d1 576
9653a5c7 577 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
578 return;
579
123bf0e2 580 pr_info("PCI-DMA: Restoring GART aperture settings\n");
6703f6d1 581
9653a5c7
HR
582 for (i = 0; i < amd_nb_num(); i++) {
583 struct pci_dev *dev = node_to_amd_nb(i)->misc;
6703f6d1 584
123bf0e2
IM
585 /*
586 * Don't enable translations just yet. That is the next
587 * step. Restore the pre-suspend aperture settings.
588 */
260133ab 589 gart_set_size_and_enable(dev, aperture_order);
123bf0e2 590 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
6703f6d1 591 }
123bf0e2
IM
592}
593
f3c6ea1b 594static void gart_resume(void)
123bf0e2
IM
595{
596 pr_info("PCI-DMA: Resuming GART IOMMU\n");
597
f3c6ea1b 598 gart_fixup_northbridges();
6703f6d1
RW
599
600 enable_gart_translations();
cd76374e
PM
601}
602
f3c6ea1b 603static struct syscore_ops gart_syscore_ops = {
123bf0e2 604 .resume = gart_resume,
cd76374e
PM
605
606};
607
05fccb0e 608/*
1da177e4 609 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 610 * AGP driver for some reason.
1da177e4 611 */
eec1d4fa 612static __init int init_amd_gatt(struct agp_kern_info *info)
05fccb0e
IM
613{
614 unsigned aper_size, gatt_size, new_aper_size;
615 unsigned aper_base, new_aper_base;
1da177e4
LT
616 struct pci_dev *dev;
617 void *gatt;
f3c6ea1b 618 int i;
a32073bf 619
123bf0e2
IM
620 pr_info("PCI-DMA: Disabling AGP.\n");
621
1da177e4 622 aper_size = aper_base = info->aper_size = 0;
a32073bf 623 dev = NULL;
9653a5c7
HR
624 for (i = 0; i < amd_nb_num(); i++) {
625 dev = node_to_amd_nb(i)->misc;
05fccb0e
IM
626 new_aper_base = read_aperture(dev, &new_aper_size);
627 if (!new_aper_base)
628 goto nommu;
629
630 if (!aper_base) {
1da177e4
LT
631 aper_size = new_aper_size;
632 aper_base = new_aper_base;
05fccb0e
IM
633 }
634 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
635 goto nommu;
636 }
637 if (!aper_base)
05fccb0e 638 goto nommu;
123bf0e2 639
1da177e4 640 info->aper_base = aper_base;
05fccb0e 641 info->aper_size = aper_size >> 20;
1da177e4 642
05fccb0e 643 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
0114267b
JR
644 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
645 get_order(gatt_size));
05fccb0e 646 if (!gatt)
cf6387da 647 panic("Cannot allocate GATT table");
6d238cc4 648 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 649 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 650
1da177e4 651 agp_gatt_table = gatt;
a32073bf 652
f3c6ea1b 653 register_syscore_ops(&gart_syscore_ops);
6703f6d1 654
a32073bf 655 flush_gart();
05fccb0e 656
123bf0e2 657 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
05fccb0e 658 aper_base, aper_size>>10);
7ab073b6 659
1da177e4
LT
660 return 0;
661
662 nommu:
05fccb0e 663 /* Should not happen anymore */
8d3bcc44 664 pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
05fccb0e
IM
665 return -1;
666}
1da177e4 667
5299709d 668static const struct dma_map_ops gart_dma_ops = {
05fccb0e
IM
669 .map_sg = gart_map_sg,
670 .unmap_sg = gart_unmap_sg,
052aedbf
FT
671 .map_page = gart_map_page,
672 .unmap_page = gart_unmap_page,
baa676fc
AP
673 .alloc = gart_alloc_coherent,
674 .free = gart_free_coherent,
f9f3232a
CH
675 .mmap = dma_common_mmap,
676 .get_sgtable = dma_common_get_sgtable,
fec777c3 677 .dma_supported = dma_direct_supported,
249baa54 678 .get_required_mask = dma_direct_get_required_mask,
8a2f1187 679 .alloc_pages_op = dma_direct_alloc_pages,
efa70f2f 680 .free_pages = dma_direct_free_pages,
17a941d8
MBY
681};
682
338bac52 683static void gart_iommu_shutdown(void)
bc2cea6a
YL
684{
685 struct pci_dev *dev;
686 int i;
687
f3eee542
YL
688 /* don't shutdown it if there is AGP installed */
689 if (!no_agp)
bc2cea6a
YL
690 return;
691
9653a5c7 692 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
693 return;
694
9653a5c7 695 for (i = 0; i < amd_nb_num(); i++) {
05fccb0e 696 u32 ctl;
bc2cea6a 697
9653a5c7 698 dev = node_to_amd_nb(i)->misc;
3bb6fbf9 699 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
bc2cea6a 700
3bb6fbf9 701 ctl &= ~GARTEN;
bc2cea6a 702
3bb6fbf9 703 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
05fccb0e 704 }
bc2cea6a
YL
705}
706
de957628 707int __init gart_iommu_init(void)
05fccb0e 708{
1da177e4 709 struct agp_kern_info info;
1da177e4 710 unsigned long iommu_start;
d99e9016
YL
711 unsigned long aper_base, aper_size;
712 unsigned long start_pfn, end_pfn;
1da177e4 713 unsigned long scratch;
1da177e4 714
9653a5c7 715 if (!amd_nb_has_feature(AMD_NB_GART))
de957628 716 return 0;
a32073bf 717
1da177e4 718#ifndef CONFIG_AGP_AMD64
05fccb0e 719 no_agp = 1;
1da177e4
LT
720#else
721 /* Makefile puts PCI initialization via subsys_initcall first. */
eec1d4fa 722 /* Add other AMD AGP bridge drivers here */
05fccb0e
IM
723 no_agp = no_agp ||
724 (agp_amd64_init() < 0) ||
1da177e4 725 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 726#endif
1da177e4 727
1da177e4 728 if (no_iommu ||
c987d12f 729 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0440d4c0 730 !gart_iommu_aperture ||
eec1d4fa 731 (no_agp && init_amd_gatt(&info) < 0)) {
c987d12f 732 if (max_pfn > MAX_DMA32_PFN) {
8d3bcc44
KW
733 pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
734 pr_warn("falling back to iommu=soft.\n");
5b7b644c 735 }
de957628 736 return 0;
1da177e4
LT
737 }
738
d99e9016 739 /* need to map that range */
123bf0e2
IM
740 aper_size = info.aper_size << 20;
741 aper_base = info.aper_base;
742 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
743
5101730c
YL
744 start_pfn = PFN_DOWN(aper_base);
745 if (!pfn_range_is_mapped(start_pfn, end_pfn))
c164fbb4
LG
746 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
747 PAGE_KERNEL);
d99e9016 748
123bf0e2 749 pr_info("PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
750 iommu_size = check_iommu_size(info.aper_base, aper_size);
751 iommu_pages = iommu_size >> PAGE_SHIFT;
752
0114267b 753 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
05fccb0e
IM
754 get_order(iommu_pages/8));
755 if (!iommu_gart_bitmap)
756 panic("Cannot allocate iommu bitmap\n");
1da177e4 757
123bf0e2 758 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 759 iommu_size >> 20);
1da177e4 760
123bf0e2
IM
761 agp_memory_reserved = iommu_size;
762 iommu_start = aper_size - iommu_size;
763 iommu_bus_base = info.aper_base + iommu_start;
123bf0e2 764 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
1da177e4 765
05fccb0e 766 /*
1da177e4
LT
767 * Unmap the IOMMU part of the GART. The alias of the page is
768 * always mapped with cache enabled and there is no full cache
769 * coherency across the GART remapping. The unmapping avoids
770 * automatic prefetches from the CPU allocating cache lines in
771 * there. All CPU accesses are done via the direct mapping to
772 * the backing memory. The GART address is only used by PCI
05fccb0e 773 * devices.
1da177e4 774 */
28d6ee41
AK
775 set_memory_np((unsigned long)__va(iommu_bus_base),
776 iommu_size >> PAGE_SHIFT);
184652eb
IM
777 /*
778 * Tricky. The GART table remaps the physical memory range,
54aa699e 779 * so the CPU won't notice potential aliases and if the memory
184652eb
IM
780 * is remapped to UC later on, we might surprise the PCI devices
781 * with a stray writeout of a cacheline. So play it sure and
782 * do an explicit, full-scale wbinvd() _after_ having marked all
783 * the pages as Not-Present:
784 */
785 wbinvd();
123bf0e2 786
fe2245c9
ML
787 /*
788 * Now all caches are flushed and we can safely enable
789 * GART hardware. Doing it early leaves the possibility
790 * of stale cache entries that can lead to GART PTE
791 * errors.
792 */
793 enable_gart_translations();
1da177e4 794
05fccb0e 795 /*
fa3d319a 796 * Try to workaround a bug (thanks to BenH):
05fccb0e 797 * Set unmapped entries to a scratch page instead of 0.
1da177e4 798 * Any prefetches that hit unmapped entries won't get an bus abort
fa3d319a 799 * then. (P2P bridge may be prefetching on DMA reads).
1da177e4 800 */
05fccb0e
IM
801 scratch = get_zeroed_page(GFP_KERNEL);
802 if (!scratch)
1da177e4
LT
803 panic("Cannot allocate iommu scratch page");
804 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
1da177e4 805
a32073bf 806 flush_gart();
17a941d8 807 dma_ops = &gart_dma_ops;
338bac52 808 x86_platform.iommu_shutdown = gart_iommu_shutdown;
78013eaa 809 x86_swiotlb_enable = false;
de957628
FT
810
811 return 0;
05fccb0e 812}
1da177e4 813
43999d9e 814void __init gart_parse_options(char *p)
17a941d8
MBY
815{
816 int arg;
817
17a941d8
MBY
818 if (isdigit(*p) && get_option(&p, &arg))
819 iommu_size = arg;
41855b77 820 if (!strncmp(p, "fullflush", 9))
17a941d8 821 iommu_fullflush = 1;
05fccb0e 822 if (!strncmp(p, "nofullflush", 11))
17a941d8 823 iommu_fullflush = 0;
05fccb0e 824 if (!strncmp(p, "noagp", 5))
17a941d8 825 no_agp = 1;
05fccb0e 826 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
827 fix_aperture = 0;
828 /* duplicated from pci-dma.c */
05fccb0e 829 if (!strncmp(p, "force", 5))
0440d4c0 830 gart_iommu_aperture_allowed = 1;
05fccb0e 831 if (!strncmp(p, "allowed", 7))
0440d4c0 832 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
833 if (!strncmp(p, "memaper", 7)) {
834 fallback_aper_force = 1;
835 p += 7;
836 if (*p == '=') {
837 ++p;
838 if (get_option(&p, &arg))
839 fallback_aper_order = arg;
840 }
841 }
842}