License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
459121c9 2#include <linux/dma-mapping.h>
2118d0c5 3#include <linux/dma-debug.h>
cb5867a5 4#include <linux/dmar.h>
69c60c88 5#include <linux/export.h>
116890d5 6#include <linux/bootmem.h>
5a0e3ad6 7#include <linux/gfp.h>
bca5c096 8#include <linux/pci.h>
acde31dc 9#include <linux/kmemleak.h>
cb5867a5 10
116890d5
GC
11#include <asm/proto.h>
12#include <asm/dma.h>
46a7fa27 13#include <asm/iommu.h>
1d9b16d1 14#include <asm/gart.h>
cb5867a5 15#include <asm/calgary.h>
b4941a9a 16#include <asm/x86_init.h>
ee1f284f 17#include <asm/iommu_table.h>
459121c9 18
3b15e581
FY
19static int forbid_dac __read_mostly;
20
5299709d 21const struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
22EXPORT_SYMBOL(dma_ops);
23
b4cdc430 24static int iommu_sac_force __read_mostly;
8e0c3797 25
f9c258de
GC
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
fae9a0d8
GC
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
ac0101d3
JR
40/*
41 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 42 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 43 * devices and allow every device to access to whole physical memory. This is
fb637f3c 44 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
45 * guests and not for driver dma translation.
46 */
47int iommu_pass_through __read_mostly;
aed5d5f4 48
ee1f284f
KRW
49extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50
eb647138 51/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 52struct device x86_dma_fallback_dev = {
1a927133 53 .init_name = "fallback device",
eb647138 54 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 55 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 56};
6c505ce3 57EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 58
2118d0c5 59/* Number of entries preallocated for DMA-API debugging */
73b664ce 60#define PREALLOC_DMA_DEBUG_ENTRIES 65536
2118d0c5 61
116890d5
GC
62void __init pci_iommu_alloc(void)
63{
ee1f284f
KRW
64 struct iommu_table_entry *p;
65
ee1f284f
KRW
66 sort_iommu_table(__iommu_table, __iommu_table_end);
67 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 68
ee1f284f
KRW
69 for (p = __iommu_table; p < __iommu_table_end; p++) {
70 if (p && p->detect && p->detect() > 0) {
71 p->flags |= IOMMU_DETECTED;
72 if (p->early_init)
73 p->early_init();
74 if (p->flags & IOMMU_FINISH_IF_DETECTED)
75 break;
76 }
77 }
116890d5 78}
9f6ac577 79void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc 80 dma_addr_t *dma_addr, gfp_t flag,
00085f1e 81 unsigned long attrs)
9f6ac577
FT
82{
83 unsigned long dma_mask;
c080e26e 84 struct page *page;
0a2b9a6e 85 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
86 dma_addr_t addr;
87
88 dma_mask = dma_alloc_coherent_mask(dev, flag);
89
d92ef66c 90 flag &= ~__GFP_ZERO;
9f6ac577 91again:
c080e26e 92 page = NULL;
c091c71a 93 /* CMA can be used only in the context which permits sleeping */
d0164adc 94 if (gfpflags_allow_blocking(flag)) {
712c604d
LS
95 page = dma_alloc_from_contiguous(dev, count, get_order(size),
96 flag);
c7753208
TL
97 if (page) {
98 addr = phys_to_dma(dev, page_to_phys(page));
99 if (addr + size > dma_mask) {
100 dma_release_from_contiguous(dev, page, count);
101 page = NULL;
102 }
38f7ea5a
AM
103 }
104 }
c091c71a 105 /* fallback */
0a2b9a6e
MS
106 if (!page)
107 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
108 if (!page)
109 return NULL;
110
c7753208 111 addr = phys_to_dma(dev, page_to_phys(page));
a4c2baa6 112 if (addr + size > dma_mask) {
9f6ac577
FT
113 __free_pages(page, get_order(size));
114
284901a9 115 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
116 flag = (flag & ~GFP_DMA32) | GFP_DMA;
117 goto again;
118 }
119
120 return NULL;
121 }
d92ef66c 122 memset(page_address(page), 0, size);
9f6ac577
FT
123 *dma_addr = addr;
124 return page_address(page);
125}
126
0a2b9a6e 127void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
00085f1e 128 dma_addr_t dma_addr, unsigned long attrs)
0a2b9a6e
MS
129{
130 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
131 struct page *page = virt_to_page(vaddr);
132
133 if (!dma_release_from_contiguous(dev, page, count))
134 free_pages((unsigned long)vaddr, get_order(size));
135}
136
6894258e 137bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
0c7965ff 138{
298a96c1
VS
139 if (!*dev)
140 *dev = &x86_dma_fallback_dev;
141
6894258e 142 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
590f0787 143 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
0c7965ff 144
6894258e
CH
145 if (!is_device_dma_capable(*dev))
146 return false;
147 return true;
0c7965ff 148
f1dc154f 149}
6894258e 150EXPORT_SYMBOL(arch_dma_alloc_attrs);
f1dc154f 151
fae9a0d8 152/*
395cf969
PB
153 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
154 * parameter documentation.
fae9a0d8
GC
155 */
156static __init int iommu_setup(char *p)
157{
158 iommu_merge = 1;
159
160 if (!p)
161 return -EINVAL;
162
163 while (*p) {
164 if (!strncmp(p, "off", 3))
165 no_iommu = 1;
166 /* gart_parse_options has more force support */
167 if (!strncmp(p, "force", 5))
168 force_iommu = 1;
169 if (!strncmp(p, "noforce", 7)) {
170 iommu_merge = 0;
171 force_iommu = 0;
172 }
173
174 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
175 iommu_merge = 1;
176 force_iommu = 1;
177 }
178 if (!strncmp(p, "panic", 5))
179 panic_on_overflow = 1;
180 if (!strncmp(p, "nopanic", 7))
181 panic_on_overflow = 0;
182 if (!strncmp(p, "merge", 5)) {
183 iommu_merge = 1;
184 force_iommu = 1;
185 }
186 if (!strncmp(p, "nomerge", 7))
187 iommu_merge = 0;
188 if (!strncmp(p, "forcesac", 8))
189 iommu_sac_force = 1;
190 if (!strncmp(p, "allowdac", 8))
191 forbid_dac = 0;
192 if (!strncmp(p, "nodac", 5))
2ae8bb75 193 forbid_dac = 1;
fae9a0d8
GC
194 if (!strncmp(p, "usedac", 6)) {
195 forbid_dac = -1;
196 return 1;
197 }
198#ifdef CONFIG_SWIOTLB
199 if (!strncmp(p, "soft", 4))
200 swiotlb = 1;
3238c0c4 201#endif
80286879 202 if (!strncmp(p, "pt", 2))
4ed0d3e6 203 iommu_pass_through = 1;
fae9a0d8 204
fae9a0d8 205 gart_parse_options(p);
fae9a0d8
GC
206
207#ifdef CONFIG_CALGARY_IOMMU
208 if (!strncmp(p, "calgary", 7))
209 use_calgary = 1;
210#endif /* CONFIG_CALGARY_IOMMU */
211
212 p += strcspn(p, ",");
213 if (*p == ',')
214 ++p;
215 }
216 return 0;
217}
218early_param("iommu", iommu_setup);
219
5860acc1 220int x86_dma_supported(struct device *dev, u64 mask)
8e0c3797
GC
221{
222#ifdef CONFIG_PCI
223 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 224 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
225 return 0;
226 }
227#endif
228
8e0c3797
GC
229 /* Copied from i386. Doesn't make much sense, because it will
230 only work for pci_alloc_coherent.
231 The caller just has to use GFP_DMA in this case. */
2f4f27d4 232 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
233 return 0;
234
235 /* Tell the device to use SAC when IOMMU force is on. This
236 allows the driver to use cheaper accesses in some cases.
237
238 Problem with this is that if we overflow the IOMMU area and
239 return DAC as fallback address the device may not handle it
240 correctly.
241
242 As a special case some controllers have a 39bit address
243 mode that is as efficient as 32bit (aic79xx). Don't force
244 SAC for these. Assume all masks <= 40 bits are of this
245 type. Normally this doesn't make any difference, but gives
246 more gentle handling of IOMMU overflow. */
50cf156a 247 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 248 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
249 return 0;
250 }
251
252 return 1;
253}
8e0c3797 254
cb5867a5
GC
255static int __init pci_iommu_init(void)
256{
ee1f284f 257 struct iommu_table_entry *p;
2118d0c5
JR
258 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
259
86f31952
JR
260#ifdef CONFIG_PCI
261 dma_debug_add_bus(&pci_bus_type);
262#endif
d07c1be0
FT
263 x86_init.iommu.iommu_init();
264
ee1f284f
KRW
265 for (p = __iommu_table; p < __iommu_table_end; p++) {
266 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
267 p->late_init();
268 }
75f1cdf1 269
cb5867a5
GC
270 return 0;
271}
cb5867a5 272/* Must execute after PCI subsystem */
9a821b23 273rootfs_initcall(pci_iommu_init);
3b15e581
FY
274
275#ifdef CONFIG_PCI
276/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
277
a18e3690 278static void via_no_dac(struct pci_dev *dev)
3b15e581 279{
c484b241 280 if (forbid_dac == 0) {
13bf7576 281 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
282 forbid_dac = 1;
283 }
284}
c484b241
YL
285DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 287#endif