Commit | Line | Data |
---|---|---|
5121872a SS |
1 | #include <linux/cpu.h> |
2 | #include <linux/dma-mapping.h> | |
83862ccf | 3 | #include <linux/gfp.h> |
5121872a | 4 | #include <linux/highmem.h> |
83862ccf | 5 | #include <linux/export.h> |
8746515d | 6 | #include <linux/memblock.h> |
5121872a | 7 | #include <linux/of_address.h> |
83862ccf SS |
8 | #include <linux/slab.h> |
9 | #include <linux/types.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/swiotlb.h> | |
13 | ||
14 | #include <xen/xen.h> | |
da095a99 | 15 | #include <xen/interface/grant_table.h> |
83862ccf | 16 | #include <xen/interface/memory.h> |
a9fd60e2 | 17 | #include <xen/page.h> |
83862ccf SS |
18 | #include <xen/swiotlb-xen.h> |
19 | ||
20 | #include <asm/cacheflush.h> | |
83862ccf SS |
21 | #include <asm/xen/hypercall.h> |
22 | #include <asm/xen/interface.h> | |
23 | ||
8746515d SS |
24 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
25 | { | |
26 | struct memblock_region *reg; | |
d0164adc | 27 | gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; |
8746515d SS |
28 | |
29 | for_each_memblock(memory, reg) { | |
30 | if (reg->base < (phys_addr_t)0xffffffff) { | |
31 | flags |= __GFP_DMA; | |
32 | break; | |
33 | } | |
34 | } | |
35 | return __get_free_pages(flags, order); | |
36 | } | |
37 | ||
5121872a SS |
38 | enum dma_cache_op { |
39 | DMA_UNMAP, | |
40 | DMA_MAP, | |
41 | }; | |
da095a99 | 42 | static bool hypercall_cflush = false; |
5121872a SS |
43 | |
44 | /* functions called by SWIOTLB */ | |
45 | ||
46 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, | |
47 | size_t size, enum dma_data_direction dir, enum dma_cache_op op) | |
48 | { | |
da095a99 | 49 | struct gnttab_cache_flush cflush; |
9435cce8 | 50 | unsigned long xen_pfn; |
5121872a SS |
51 | size_t left = size; |
52 | ||
9435cce8 JG |
53 | xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; |
54 | offset %= XEN_PAGE_SIZE; | |
5121872a SS |
55 | |
56 | do { | |
57 | size_t len = left; | |
58 | ||
da095a99 SS |
59 | /* buffers in highmem or foreign pages cannot cross page |
60 | * boundaries */ | |
9435cce8 JG |
61 | if (len + offset > XEN_PAGE_SIZE) |
62 | len = XEN_PAGE_SIZE - offset; | |
da095a99 SS |
63 | |
64 | cflush.op = 0; | |
9435cce8 | 65 | cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; |
da095a99 SS |
66 | cflush.offset = offset; |
67 | cflush.length = len; | |
68 | ||
69 | if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) | |
70 | cflush.op = GNTTAB_CACHE_INVAL; | |
71 | if (op == DMA_MAP) { | |
72 | if (dir == DMA_FROM_DEVICE) | |
73 | cflush.op = GNTTAB_CACHE_INVAL; | |
74 | else | |
75 | cflush.op = GNTTAB_CACHE_CLEAN; | |
76 | } | |
77 | if (cflush.op) | |
78 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); | |
5121872a SS |
79 | |
80 | offset = 0; | |
9435cce8 | 81 | xen_pfn++; |
5121872a SS |
82 | left -= len; |
83 | } while (left); | |
84 | } | |
85 | ||
86 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, | |
87 | size_t size, enum dma_data_direction dir) | |
88 | { | |
89 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); | |
90 | } | |
91 | ||
92 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | |
93 | size_t size, enum dma_data_direction dir) | |
94 | { | |
95 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); | |
96 | } | |
97 | ||
98 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | |
99 | dma_addr_t dev_addr, unsigned long offset, size_t size, | |
00085f1e | 100 | enum dma_data_direction dir, unsigned long attrs) |
5121872a SS |
101 | { |
102 | if (is_device_dma_coherent(hwdev)) | |
103 | return; | |
00085f1e | 104 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
5121872a SS |
105 | return; |
106 | ||
107 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); | |
108 | } | |
109 | ||
110 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | |
111 | size_t size, enum dma_data_direction dir, | |
00085f1e | 112 | unsigned long attrs) |
5121872a SS |
113 | |
114 | { | |
115 | if (is_device_dma_coherent(hwdev)) | |
116 | return; | |
00085f1e | 117 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
5121872a SS |
118 | return; |
119 | ||
120 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
121 | } | |
122 | ||
123 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | |
124 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
125 | { | |
126 | if (is_device_dma_coherent(hwdev)) | |
127 | return; | |
128 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
129 | } | |
130 | ||
131 | void __xen_dma_sync_single_for_device(struct device *hwdev, | |
132 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
133 | { | |
134 | if (is_device_dma_coherent(hwdev)) | |
135 | return; | |
136 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); | |
137 | } | |
138 | ||
a4dba130 | 139 | bool xen_arch_need_swiotlb(struct device *dev, |
291be10f JG |
140 | phys_addr_t phys, |
141 | dma_addr_t dev_addr) | |
a4dba130 | 142 | { |
9435cce8 JG |
143 | unsigned int xen_pfn = XEN_PFN_DOWN(phys); |
144 | unsigned int bfn = XEN_PFN_DOWN(dev_addr); | |
145 | ||
146 | /* | |
147 | * The swiotlb buffer should be used if | |
148 | * - Xen doesn't have the cache flush hypercall | |
149 | * - The Linux page refers to foreign memory | |
150 | * - The device doesn't support coherent DMA request | |
151 | * | |
152 | * The Linux page may be spanned acrros multiple Xen page, although | |
153 | * it's not possible to have a mix of local and foreign Xen page. | |
154 | * Furthermore, range_straddles_page_boundary is already checking | |
155 | * if buffer is physically contiguous in the host RAM. | |
156 | * | |
157 | * Therefore we only need to check the first Xen page to know if we | |
158 | * require a bounce buffer because the device doesn't support coherent | |
159 | * memory and we are not able to flush the cache. | |
160 | */ | |
161 | return (!hypercall_cflush && (xen_pfn != bfn) && | |
162 | !is_device_dma_coherent(dev)); | |
a4dba130 SS |
163 | } |
164 | ||
1b65c4e5 | 165 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
83862ccf SS |
166 | unsigned int address_bits, |
167 | dma_addr_t *dma_handle) | |
168 | { | |
169 | if (!xen_initial_domain()) | |
170 | return -EINVAL; | |
171 | ||
172 | /* we assume that dom0 is mapped 1:1 for now */ | |
1b65c4e5 | 173 | *dma_handle = pstart; |
83862ccf SS |
174 | return 0; |
175 | } | |
176 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
177 | ||
1b65c4e5 | 178 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
83862ccf SS |
179 | { |
180 | return; | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); | |
183 | ||
5299709d | 184 | const struct dma_map_ops *xen_dma_ops; |
35c8ab4c | 185 | EXPORT_SYMBOL(xen_dma_ops); |
83862ccf | 186 | |
83862ccf SS |
187 | int __init xen_mm_init(void) |
188 | { | |
da095a99 | 189 | struct gnttab_cache_flush cflush; |
83862ccf SS |
190 | if (!xen_initial_domain()) |
191 | return 0; | |
192 | xen_swiotlb_init(1, false); | |
193 | xen_dma_ops = &xen_swiotlb_dma_ops; | |
da095a99 SS |
194 | |
195 | cflush.op = 0; | |
196 | cflush.a.dev_bus_addr = 0; | |
197 | cflush.offset = 0; | |
198 | cflush.length = 0; | |
199 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) | |
200 | hypercall_cflush = true; | |
83862ccf SS |
201 | return 0; |
202 | } | |
203 | arch_initcall(xen_mm_init); |