Commit | Line | Data |
---|---|---|
5121872a SS |
1 | #include <linux/cpu.h> |
2 | #include <linux/dma-mapping.h> | |
83862ccf SS |
3 | #include <linux/bootmem.h> |
4 | #include <linux/gfp.h> | |
5121872a | 5 | #include <linux/highmem.h> |
83862ccf | 6 | #include <linux/export.h> |
8746515d | 7 | #include <linux/memblock.h> |
5121872a | 8 | #include <linux/of_address.h> |
83862ccf SS |
9 | #include <linux/slab.h> |
10 | #include <linux/types.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/vmalloc.h> | |
13 | #include <linux/swiotlb.h> | |
14 | ||
15 | #include <xen/xen.h> | |
da095a99 | 16 | #include <xen/interface/grant_table.h> |
83862ccf | 17 | #include <xen/interface/memory.h> |
a9fd60e2 | 18 | #include <xen/page.h> |
83862ccf SS |
19 | #include <xen/swiotlb-xen.h> |
20 | ||
21 | #include <asm/cacheflush.h> | |
83862ccf SS |
22 | #include <asm/xen/hypercall.h> |
23 | #include <asm/xen/interface.h> | |
24 | ||
8746515d SS |
25 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
26 | { | |
27 | struct memblock_region *reg; | |
28 | gfp_t flags = __GFP_NOWARN; | |
29 | ||
30 | for_each_memblock(memory, reg) { | |
31 | if (reg->base < (phys_addr_t)0xffffffff) { | |
32 | flags |= __GFP_DMA; | |
33 | break; | |
34 | } | |
35 | } | |
36 | return __get_free_pages(flags, order); | |
37 | } | |
38 | ||
5121872a SS |
39 | enum dma_cache_op { |
40 | DMA_UNMAP, | |
41 | DMA_MAP, | |
42 | }; | |
da095a99 | 43 | static bool hypercall_cflush = false; |
5121872a SS |
44 | |
45 | /* functions called by SWIOTLB */ | |
46 | ||
47 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, | |
48 | size_t size, enum dma_data_direction dir, enum dma_cache_op op) | |
49 | { | |
da095a99 | 50 | struct gnttab_cache_flush cflush; |
5121872a SS |
51 | unsigned long pfn; |
52 | size_t left = size; | |
53 | ||
54 | pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; | |
55 | offset %= PAGE_SIZE; | |
56 | ||
57 | do { | |
58 | size_t len = left; | |
59 | ||
da095a99 SS |
60 | /* buffers in highmem or foreign pages cannot cross page |
61 | * boundaries */ | |
62 | if (len + offset > PAGE_SIZE) | |
63 | len = PAGE_SIZE - offset; | |
64 | ||
65 | cflush.op = 0; | |
66 | cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; | |
67 | cflush.offset = offset; | |
68 | cflush.length = len; | |
69 | ||
70 | if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) | |
71 | cflush.op = GNTTAB_CACHE_INVAL; | |
72 | if (op == DMA_MAP) { | |
73 | if (dir == DMA_FROM_DEVICE) | |
74 | cflush.op = GNTTAB_CACHE_INVAL; | |
75 | else | |
76 | cflush.op = GNTTAB_CACHE_CLEAN; | |
77 | } | |
78 | if (cflush.op) | |
79 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); | |
5121872a SS |
80 | |
81 | offset = 0; | |
82 | pfn++; | |
83 | left -= len; | |
84 | } while (left); | |
85 | } | |
86 | ||
87 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, | |
88 | size_t size, enum dma_data_direction dir) | |
89 | { | |
90 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); | |
91 | } | |
92 | ||
93 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | |
94 | size_t size, enum dma_data_direction dir) | |
95 | { | |
96 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); | |
97 | } | |
98 | ||
99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | |
100 | dma_addr_t dev_addr, unsigned long offset, size_t size, | |
101 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
102 | { | |
103 | if (is_device_dma_coherent(hwdev)) | |
104 | return; | |
105 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | |
106 | return; | |
107 | ||
108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); | |
109 | } | |
110 | ||
111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | |
112 | size_t size, enum dma_data_direction dir, | |
113 | struct dma_attrs *attrs) | |
114 | ||
115 | { | |
116 | if (is_device_dma_coherent(hwdev)) | |
117 | return; | |
118 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | |
119 | return; | |
120 | ||
121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
122 | } | |
123 | ||
124 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | |
125 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
126 | { | |
127 | if (is_device_dma_coherent(hwdev)) | |
128 | return; | |
129 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
130 | } | |
131 | ||
132 | void __xen_dma_sync_single_for_device(struct device *hwdev, | |
133 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
134 | { | |
135 | if (is_device_dma_coherent(hwdev)) | |
136 | return; | |
137 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); | |
138 | } | |
139 | ||
a4dba130 | 140 | bool xen_arch_need_swiotlb(struct device *dev, |
291be10f JG |
141 | phys_addr_t phys, |
142 | dma_addr_t dev_addr) | |
a4dba130 | 143 | { |
291be10f JG |
144 | unsigned long pfn = PFN_DOWN(phys); |
145 | unsigned long bfn = PFN_DOWN(dev_addr); | |
146 | ||
32e09870 | 147 | return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev)); |
a4dba130 SS |
148 | } |
149 | ||
1b65c4e5 | 150 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
83862ccf SS |
151 | unsigned int address_bits, |
152 | dma_addr_t *dma_handle) | |
153 | { | |
154 | if (!xen_initial_domain()) | |
155 | return -EINVAL; | |
156 | ||
157 | /* we assume that dom0 is mapped 1:1 for now */ | |
1b65c4e5 | 158 | *dma_handle = pstart; |
83862ccf SS |
159 | return 0; |
160 | } | |
161 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
162 | ||
1b65c4e5 | 163 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
83862ccf SS |
164 | { |
165 | return; | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); | |
168 | ||
169 | struct dma_map_ops *xen_dma_ops; | |
35c8ab4c | 170 | EXPORT_SYMBOL(xen_dma_ops); |
83862ccf SS |
171 | |
172 | static struct dma_map_ops xen_swiotlb_dma_ops = { | |
173 | .mapping_error = xen_swiotlb_dma_mapping_error, | |
174 | .alloc = xen_swiotlb_alloc_coherent, | |
175 | .free = xen_swiotlb_free_coherent, | |
176 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | |
177 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | |
178 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | |
179 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, | |
180 | .map_sg = xen_swiotlb_map_sg_attrs, | |
181 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, | |
182 | .map_page = xen_swiotlb_map_page, | |
183 | .unmap_page = xen_swiotlb_unmap_page, | |
184 | .dma_supported = xen_swiotlb_dma_supported, | |
eb1ddc00 | 185 | .set_dma_mask = xen_swiotlb_set_dma_mask, |
83862ccf SS |
186 | }; |
187 | ||
188 | int __init xen_mm_init(void) | |
189 | { | |
da095a99 | 190 | struct gnttab_cache_flush cflush; |
83862ccf SS |
191 | if (!xen_initial_domain()) |
192 | return 0; | |
193 | xen_swiotlb_init(1, false); | |
194 | xen_dma_ops = &xen_swiotlb_dma_ops; | |
da095a99 SS |
195 | |
196 | cflush.op = 0; | |
197 | cflush.a.dev_bus_addr = 0; | |
198 | cflush.offset = 0; | |
199 | cflush.length = 0; | |
200 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) | |
201 | hypercall_cflush = true; | |
83862ccf SS |
202 | return 0; |
203 | } | |
204 | arch_initcall(xen_mm_init); |