Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. | |
7 | * | |
8 | * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for | |
9 | * a description of how these routines should be used. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <asm/dma.h> | |
14 | #include <asm/sn/sn_sal.h> | |
15 | #include "pci/pcibus_provider_defs.h" | |
16 | #include "pci/pcidev.h" | |
17 | #include "pci/pcibr_provider.h" | |
18 | ||
19 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | |
20 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | |
21 | ||
22 | /** | |
23 | * sn_dma_supported - test a DMA mask | |
24 | * @dev: device to test | |
25 | * @mask: DMA mask to test | |
26 | * | |
27 | * Return whether the given PCI device DMA address mask can be supported | |
28 | * properly. For example, if your device can only drive the low 24-bits | |
29 | * during PCI bus mastering, then you would pass 0x00ffffff as the mask to | |
30 | * this function. Of course, SN only supports devices that have 32 or more | |
31 | * address bits when using the PMU. | |
32 | */ | |
33 | int sn_dma_supported(struct device *dev, u64 mask) | |
34 | { | |
35 | BUG_ON(dev->bus != &pci_bus_type); | |
36 | ||
37 | if (mask < 0x7fffffff) | |
38 | return 0; | |
39 | return 1; | |
40 | } | |
41 | EXPORT_SYMBOL(sn_dma_supported); | |
42 | ||
43 | /** | |
44 | * sn_dma_set_mask - set the DMA mask | |
45 | * @dev: device to set | |
46 | * @dma_mask: new mask | |
47 | * | |
48 | * Set @dev's DMA mask if the hw supports it. | |
49 | */ | |
50 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) | |
51 | { | |
52 | BUG_ON(dev->bus != &pci_bus_type); | |
53 | ||
54 | if (!sn_dma_supported(dev, dma_mask)) | |
55 | return 0; | |
56 | ||
57 | *dev->dma_mask = dma_mask; | |
58 | return 1; | |
59 | } | |
60 | EXPORT_SYMBOL(sn_dma_set_mask); | |
61 | ||
62 | /** | |
63 | * sn_dma_alloc_coherent - allocate memory for coherent DMA | |
64 | * @dev: device to allocate for | |
65 | * @size: size of the region | |
66 | * @dma_handle: DMA (bus) address | |
67 | * @flags: memory allocation flags | |
68 | * | |
69 | * dma_alloc_coherent() returns a pointer to a memory region suitable for | |
70 | * coherent DMA traffic to/from a PCI device. On SN platforms, this means | |
71 | * that @dma_handle will have the %PCIIO_DMA_CMD flag set. | |
72 | * | |
73 | * This interface is usually used for "command" streams (e.g. the command | |
74 | * queue for a SCSI controller). See Documentation/DMA-API.txt for | |
75 | * more information. | |
76 | */ | |
77 | void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |
78 | dma_addr_t * dma_handle, int flags) | |
79 | { | |
80 | void *cpuaddr; | |
81 | unsigned long phys_addr; | |
82 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
83 | ||
84 | BUG_ON(dev->bus != &pci_bus_type); | |
85 | ||
86 | /* | |
87 | * Allocate the memory. | |
88 | * FIXME: We should be doing alloc_pages_node for the node closest | |
89 | * to the PCI device. | |
90 | */ | |
91 | if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)))) | |
92 | return NULL; | |
93 | ||
94 | memset(cpuaddr, 0x0, size); | |
95 | ||
96 | /* physical addr. of the memory we just got */ | |
97 | phys_addr = __pa(cpuaddr); | |
98 | ||
99 | /* | |
100 | * 64 bit address translations should never fail. | |
101 | * 32 bit translations can fail if there are insufficient mapping | |
102 | * resources. | |
103 | */ | |
104 | ||
105 | *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, | |
106 | SN_PCIDMA_CONSISTENT); | |
107 | if (!*dma_handle) { | |
108 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | |
109 | free_pages((unsigned long)cpuaddr, get_order(size)); | |
110 | return NULL; | |
111 | } | |
112 | ||
113 | return cpuaddr; | |
114 | } | |
115 | EXPORT_SYMBOL(sn_dma_alloc_coherent); | |
116 | ||
117 | /** | |
118 | * sn_pci_free_coherent - free memory associated with coherent DMAable region | |
119 | * @dev: device to free for | |
120 | * @size: size to free | |
121 | * @cpu_addr: kernel virtual address to free | |
122 | * @dma_handle: DMA address associated with this region | |
123 | * | |
124 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping | |
125 | * any associated IOMMU mappings. | |
126 | */ | |
127 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |
128 | dma_addr_t dma_handle) | |
129 | { | |
130 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
131 | ||
132 | BUG_ON(dev->bus != &pci_bus_type); | |
133 | ||
134 | pcibr_dma_unmap(pcidev_info, dma_handle, 0); | |
135 | free_pages((unsigned long)cpu_addr, get_order(size)); | |
136 | } | |
137 | EXPORT_SYMBOL(sn_dma_free_coherent); | |
138 | ||
139 | /** | |
140 | * sn_dma_map_single - map a single page for DMA | |
141 | * @dev: device to map for | |
142 | * @cpu_addr: kernel virtual address of the region to map | |
143 | * @size: size of the region | |
144 | * @direction: DMA direction | |
145 | * | |
146 | * Map the region pointed to by @cpu_addr for DMA and return the | |
147 | * DMA address. | |
148 | * | |
149 | * We map this to the one step pcibr_dmamap_trans interface rather than | |
150 | * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have | |
151 | * no way of saving the dmamap handle from the alloc to later free | |
152 | * (which is pretty much unacceptable). | |
153 | * | |
154 | * TODO: simplify our interface; | |
155 | * figure out how to save dmamap handle so can use two step. | |
156 | */ | |
157 | dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |
158 | int direction) | |
159 | { | |
160 | dma_addr_t dma_addr; | |
161 | unsigned long phys_addr; | |
162 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
163 | ||
164 | BUG_ON(dev->bus != &pci_bus_type); | |
165 | ||
166 | phys_addr = __pa(cpu_addr); | |
167 | dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); | |
168 | if (!dma_addr) { | |
169 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | |
170 | return 0; | |
171 | } | |
172 | return dma_addr; | |
173 | } | |
174 | EXPORT_SYMBOL(sn_dma_map_single); | |
175 | ||
176 | /** | |
177 | * sn_dma_unmap_single - unamp a DMA mapped page | |
178 | * @dev: device to sync | |
179 | * @dma_addr: DMA address to sync | |
180 | * @size: size of region | |
181 | * @direction: DMA direction | |
182 | * | |
183 | * This routine is supposed to sync the DMA region specified | |
184 | * by @dma_handle into the coherence domain. On SN, we're always cache | |
185 | * coherent, so we just need to free any ATEs associated with this mapping. | |
186 | */ | |
187 | void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
188 | int direction) | |
189 | { | |
190 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
191 | ||
192 | BUG_ON(dev->bus != &pci_bus_type); | |
193 | pcibr_dma_unmap(pcidev_info, dma_addr, direction); | |
194 | } | |
195 | EXPORT_SYMBOL(sn_dma_unmap_single); | |
196 | ||
197 | /** | |
198 | * sn_dma_unmap_sg - unmap a DMA scatterlist | |
199 | * @dev: device to unmap | |
200 | * @sg: scatterlist to unmap | |
201 | * @nhwentries: number of scatterlist entries | |
202 | * @direction: DMA direction | |
203 | * | |
204 | * Unmap a set of streaming mode DMA translations. | |
205 | */ | |
206 | void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
207 | int nhwentries, int direction) | |
208 | { | |
209 | int i; | |
210 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
211 | ||
212 | BUG_ON(dev->bus != &pci_bus_type); | |
213 | ||
214 | for (i = 0; i < nhwentries; i++, sg++) { | |
215 | pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); | |
216 | sg->dma_address = (dma_addr_t) NULL; | |
217 | sg->dma_length = 0; | |
218 | } | |
219 | } | |
220 | EXPORT_SYMBOL(sn_dma_unmap_sg); | |
221 | ||
222 | /** | |
223 | * sn_dma_map_sg - map a scatterlist for DMA | |
224 | * @dev: device to map for | |
225 | * @sg: scatterlist to map | |
226 | * @nhwentries: number of entries | |
227 | * @direction: direction of the DMA transaction | |
228 | * | |
229 | * Maps each entry of @sg for DMA. | |
230 | */ | |
231 | int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
232 | int direction) | |
233 | { | |
234 | unsigned long phys_addr; | |
235 | struct scatterlist *saved_sg = sg; | |
236 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | |
237 | int i; | |
238 | ||
239 | BUG_ON(dev->bus != &pci_bus_type); | |
240 | ||
241 | /* | |
242 | * Setup a DMA address for each entry in the scatterlist. | |
243 | */ | |
244 | for (i = 0; i < nhwentries; i++, sg++) { | |
245 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | |
246 | sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, | |
247 | sg->length, 0); | |
248 | ||
249 | if (!sg->dma_address) { | |
250 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | |
251 | ||
252 | /* | |
253 | * Free any successfully allocated entries. | |
254 | */ | |
255 | if (i > 0) | |
256 | sn_dma_unmap_sg(dev, saved_sg, i, direction); | |
257 | return 0; | |
258 | } | |
259 | ||
260 | sg->dma_length = sg->length; | |
261 | } | |
262 | ||
263 | return nhwentries; | |
264 | } | |
265 | EXPORT_SYMBOL(sn_dma_map_sg); | |
266 | ||
267 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
268 | size_t size, int direction) | |
269 | { | |
270 | BUG_ON(dev->bus != &pci_bus_type); | |
271 | } | |
272 | EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); | |
273 | ||
274 | void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
275 | size_t size, int direction) | |
276 | { | |
277 | BUG_ON(dev->bus != &pci_bus_type); | |
278 | } | |
279 | EXPORT_SYMBOL(sn_dma_sync_single_for_device); | |
280 | ||
281 | void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
282 | int nelems, int direction) | |
283 | { | |
284 | BUG_ON(dev->bus != &pci_bus_type); | |
285 | } | |
286 | EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); | |
287 | ||
288 | void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
289 | int nelems, int direction) | |
290 | { | |
291 | BUG_ON(dev->bus != &pci_bus_type); | |
292 | } | |
293 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | |
294 | ||
295 | int sn_dma_mapping_error(dma_addr_t dma_addr) | |
296 | { | |
297 | return 0; | |
298 | } | |
299 | EXPORT_SYMBOL(sn_dma_mapping_error); | |
300 | ||
301 | char *sn_pci_get_legacy_mem(struct pci_bus *bus) | |
302 | { | |
303 | if (!SN_PCIBUS_BUSSOFT(bus)) | |
304 | return ERR_PTR(-ENODEV); | |
305 | ||
306 | return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); | |
307 | } | |
308 | ||
309 | int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) | |
310 | { | |
311 | unsigned long addr; | |
312 | int ret; | |
313 | ||
314 | if (!SN_PCIBUS_BUSSOFT(bus)) | |
315 | return -ENODEV; | |
316 | ||
317 | addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; | |
318 | addr += port; | |
319 | ||
320 | ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); | |
321 | ||
322 | if (ret == 2) | |
323 | return -EINVAL; | |
324 | ||
325 | if (ret == 1) | |
326 | *val = -1; | |
327 | ||
328 | return size; | |
329 | } | |
330 | ||
331 | int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |
332 | { | |
333 | int ret = size; | |
334 | unsigned long paddr; | |
335 | unsigned long *addr; | |
336 | ||
337 | if (!SN_PCIBUS_BUSSOFT(bus)) { | |
338 | ret = -ENODEV; | |
339 | goto out; | |
340 | } | |
341 | ||
342 | /* Put the phys addr in uncached space */ | |
343 | paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; | |
344 | paddr += port; | |
345 | addr = (unsigned long *)paddr; | |
346 | ||
347 | switch (size) { | |
348 | case 1: | |
349 | *(volatile u8 *)(addr) = (u8)(val); | |
350 | break; | |
351 | case 2: | |
352 | *(volatile u16 *)(addr) = (u16)(val); | |
353 | break; | |
354 | case 4: | |
355 | *(volatile u32 *)(addr) = (u32)(val); | |
356 | break; | |
357 | default: | |
358 | ret = -EINVAL; | |
359 | break; | |
360 | } | |
361 | out: | |
362 | return ret; | |
363 | } |