Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / arch / x86 / kernel / pci-dma.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-map-ops.h>
3#include <linux/dma-direct.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10#include <linux/amd-iommu.h>
11
12#include <asm/proto.h>
13#include <asm/dma.h>
14#include <asm/iommu.h>
15#include <asm/gart.h>
16#include <asm/x86_init.h>
17
18#include <xen/xen.h>
19#include <xen/swiotlb-xen.h>
20
21static bool disable_dac_quirk __read_mostly;
22
23const struct dma_map_ops *dma_ops;
24EXPORT_SYMBOL(dma_ops);
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
40#ifdef CONFIG_SWIOTLB
41bool x86_swiotlb_enable;
42static unsigned int x86_swiotlb_flags;
43
44static void __init pci_swiotlb_detect(void)
45{
46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 x86_swiotlb_enable = true;
49
50 /*
51 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 * devices that can't support DMA to encrypted memory.
53 */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 x86_swiotlb_enable = true;
56
57 /*
58 * Guest with guest memory encryption currently perform all DMA through
59 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 * that is not explicitly shared with it.
61 */
62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 x86_swiotlb_enable = true;
64 x86_swiotlb_flags |= SWIOTLB_FORCE;
65 }
66}
67#else
68static inline void __init pci_swiotlb_detect(void)
69{
70}
71#define x86_swiotlb_flags 0
72#endif /* CONFIG_SWIOTLB */
73
74#ifdef CONFIG_SWIOTLB_XEN
75static bool xen_swiotlb_enabled(void)
76{
77 return xen_initial_domain() || x86_swiotlb_enable ||
78 (IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79}
80
81static void __init pci_xen_swiotlb_init(void)
82{
83 if (!xen_swiotlb_enabled())
84 return;
85 x86_swiotlb_enable = true;
86 x86_swiotlb_flags |= SWIOTLB_ANY;
87 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
88 dma_ops = &xen_swiotlb_dma_ops;
89 if (IS_ENABLED(CONFIG_PCI))
90 pci_request_acs();
91}
92#else
93static inline void __init pci_xen_swiotlb_init(void)
94{
95}
96#endif /* CONFIG_SWIOTLB_XEN */
97
98void __init pci_iommu_alloc(void)
99{
100 if (xen_pv_domain()) {
101 pci_xen_swiotlb_init();
102 return;
103 }
104 pci_swiotlb_detect();
105 gart_iommu_hole_init();
106 amd_iommu_detect();
107 detect_intel_iommu();
108 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109}
110
111static __init int iommu_setup(char *p)
112{
113 iommu_merge = 1;
114
115 if (!p)
116 return -EINVAL;
117
118 while (*p) {
119 if (!strncmp(p, "off", 3))
120 no_iommu = 1;
121 /* gart_parse_options has more force support */
122 if (!strncmp(p, "force", 5))
123 force_iommu = 1;
124 if (!strncmp(p, "noforce", 7)) {
125 iommu_merge = 0;
126 force_iommu = 0;
127 }
128
129 if (!strncmp(p, "biomerge", 8)) {
130 iommu_merge = 1;
131 force_iommu = 1;
132 }
133 if (!strncmp(p, "panic", 5))
134 panic_on_overflow = 1;
135 if (!strncmp(p, "nopanic", 7))
136 panic_on_overflow = 0;
137 if (!strncmp(p, "merge", 5)) {
138 iommu_merge = 1;
139 force_iommu = 1;
140 }
141 if (!strncmp(p, "nomerge", 7))
142 iommu_merge = 0;
143 if (!strncmp(p, "forcesac", 8))
144 pr_warn("forcesac option ignored.\n");
145 if (!strncmp(p, "allowdac", 8))
146 pr_warn("allowdac option ignored.\n");
147 if (!strncmp(p, "nodac", 5))
148 pr_warn("nodac option ignored.\n");
149 if (!strncmp(p, "usedac", 6)) {
150 disable_dac_quirk = true;
151 return 1;
152 }
153#ifdef CONFIG_SWIOTLB
154 if (!strncmp(p, "soft", 4))
155 x86_swiotlb_enable = true;
156#endif
157 if (!strncmp(p, "pt", 2))
158 iommu_set_default_passthrough(true);
159 if (!strncmp(p, "nopt", 4))
160 iommu_set_default_translated(true);
161
162 gart_parse_options(p);
163
164 p += strcspn(p, ",");
165 if (*p == ',')
166 ++p;
167 }
168 return 0;
169}
170early_param("iommu", iommu_setup);
171
172static int __init pci_iommu_init(void)
173{
174 x86_init.iommu.iommu_init();
175
176#ifdef CONFIG_SWIOTLB
177 /* An IOMMU turned us off. */
178 if (x86_swiotlb_enable) {
179 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
180 swiotlb_print_info();
181 } else {
182 swiotlb_exit();
183 }
184#endif
185
186 return 0;
187}
188/* Must execute after PCI subsystem */
189rootfs_initcall(pci_iommu_init);
190
191#ifdef CONFIG_PCI
192/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
193
194static int via_no_dac_cb(struct pci_dev *pdev, void *data)
195{
196 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
197 return 0;
198}
199
200static void via_no_dac(struct pci_dev *dev)
201{
202 if (!disable_dac_quirk) {
203 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
204 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
205 }
206}
207DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
208 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
209#endif