Merge tag 'drm-vc4-fixes-2016-09-14' of https://github.com/anholt/linux into drm...
[linux-2.6-block.git] / arch / x86 / kernel / pci-swiotlb.c
CommitLineData
17a941d8
MBY
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
186f4360 5#include <linux/init.h>
8ce79960
JF
6#include <linux/swiotlb.h>
7#include <linux/bootmem.h>
d6bd3a39
REB
8#include <linux/dma-mapping.h>
9
46a7fa27 10#include <asm/iommu.h>
17a941d8
MBY
11#include <asm/swiotlb.h>
12#include <asm/dma.h>
c116c545
KRW
13#include <asm/xen/swiotlb-xen.h>
14#include <asm/iommu_table.h>
17a941d8 15int swiotlb __read_mostly;
17a941d8 16
9c5a3621 17void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc 18 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 19 unsigned long attrs)
03967c52
FT
20{
21 void *vaddr;
22
186dfc9d
JR
23 /*
24 * Don't print a warning when the first allocation attempt fails.
25 * swiotlb_alloc_coherent() will print a warning when the DMA
26 * memory allocation ultimately failed.
27 */
28 flags |= __GFP_NOWARN;
29
baa676fc
AP
30 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
31 attrs);
03967c52
FT
32 if (vaddr)
33 return vaddr;
34
35 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
36}
37
9c5a3621 38void x86_swiotlb_free_coherent(struct device *dev, size_t size,
baa676fc 39 void *vaddr, dma_addr_t dma_addr,
00085f1e 40 unsigned long attrs)
baa676fc 41{
9c5a3621
AM
42 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
43 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
44 else
45 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
baa676fc
AP
46}
47
ff6c6fed 48static struct dma_map_ops swiotlb_dma_ops = {
17a941d8 49 .mapping_error = swiotlb_dma_mapping_error,
baa676fc
AP
50 .alloc = x86_swiotlb_alloc_coherent,
51 .free = x86_swiotlb_free_coherent,
17a941d8
MBY
52 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
53 .sync_single_for_device = swiotlb_sync_single_for_device,
17a941d8
MBY
54 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
55 .sync_sg_for_device = swiotlb_sync_sg_for_device,
160c1d8e
FT
56 .map_sg = swiotlb_map_sg_attrs,
57 .unmap_sg = swiotlb_unmap_sg_attrs,
4cf37bb7
FT
58 .map_page = swiotlb_map_page,
59 .unmap_page = swiotlb_unmap_page,
17a941d8
MBY
60 .dma_supported = NULL,
61};
62
b18485e7 63/*
efa631c2 64 * pci_swiotlb_detect_override - set swiotlb to 1 if necessary
b18485e7
FT
65 *
66 * This returns non-zero if we are forced to use swiotlb (by the boot
67 * option).
68 */
efa631c2 69int __init pci_swiotlb_detect_override(void)
17a941d8 70{
273bee27
FT
71 int use_swiotlb = swiotlb | swiotlb_force;
72
efa631c2
KRW
73 if (swiotlb_force)
74 swiotlb = 1;
75
76 return use_swiotlb;
77}
c116c545
KRW
78IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
79 pci_xen_swiotlb_detect,
80 pci_swiotlb_init,
81 pci_swiotlb_late_init);
efa631c2
KRW
82
83/*
84 * if 4GB or more detected (and iommu=off not set) return 1
85 * and set swiotlb to 1.
86 */
87int __init pci_swiotlb_detect_4gb(void)
88{
17a941d8 89 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
cfb80c9e 90#ifdef CONFIG_X86_64
ec941c5f 91 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
19943b0e 92 swiotlb = 1;
cfb80c9e 93#endif
efa631c2 94 return swiotlb;
186a2502 95}
c116c545
KRW
96IOMMU_INIT(pci_swiotlb_detect_4gb,
97 pci_swiotlb_detect_override,
98 pci_swiotlb_init,
99 pci_swiotlb_late_init);
186a2502
FT
100
101void __init pci_swiotlb_init(void)
102{
17a941d8 103 if (swiotlb) {
ad32e8cb 104 swiotlb_init(0);
17a941d8 105 dma_ops = &swiotlb_dma_ops;
a3b28ee1 106 }
17a941d8 107}
efa631c2
KRW
108
109void __init pci_swiotlb_late_init(void)
110{
111 /* An IOMMU turned us off. */
112 if (!swiotlb)
113 swiotlb_free();
114 else {
115 printk(KERN_INFO "PCI-DMA: "
116 "Using software bounce buffering for IO (SWIOTLB)\n");
117 swiotlb_print_info();
118 }
119}