Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
2 | #define _ASM_IA64_DMA_MAPPING_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
1da177e4 | 8 | #include <asm/machvec.h> |
9b6eccfc | 9 | #include <linux/scatterlist.h> |
62fdd767 FY |
10 | #include <asm/swiotlb.h> |
11 | ||
175add19 JK |
12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
13 | ||
62fdd767 FY |
14 | struct dma_mapping_ops { |
15 | int (*mapping_error)(struct device *dev, | |
16 | dma_addr_t dma_addr); | |
17 | void* (*alloc_coherent)(struct device *dev, size_t size, | |
18 | dma_addr_t *dma_handle, gfp_t gfp); | |
19 | void (*free_coherent)(struct device *dev, size_t size, | |
20 | void *vaddr, dma_addr_t dma_handle); | |
21 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | |
22 | size_t size, int direction); | |
23 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | |
24 | size_t size, int direction); | |
25 | void (*sync_single_for_cpu)(struct device *hwdev, | |
26 | dma_addr_t dma_handle, size_t size, | |
27 | int direction); | |
28 | void (*sync_single_for_device)(struct device *hwdev, | |
29 | dma_addr_t dma_handle, size_t size, | |
30 | int direction); | |
31 | void (*sync_single_range_for_cpu)(struct device *hwdev, | |
32 | dma_addr_t dma_handle, unsigned long offset, | |
33 | size_t size, int direction); | |
34 | void (*sync_single_range_for_device)(struct device *hwdev, | |
35 | dma_addr_t dma_handle, unsigned long offset, | |
36 | size_t size, int direction); | |
37 | void (*sync_sg_for_cpu)(struct device *hwdev, | |
38 | struct scatterlist *sg, int nelems, | |
39 | int direction); | |
40 | void (*sync_sg_for_device)(struct device *hwdev, | |
41 | struct scatterlist *sg, int nelems, | |
42 | int direction); | |
43 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | |
44 | int nents, int direction); | |
45 | void (*unmap_sg)(struct device *hwdev, | |
46 | struct scatterlist *sg, int nents, | |
47 | int direction); | |
48 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | |
49 | int is_phys; | |
50 | }; | |
51 | ||
52 | extern struct dma_mapping_ops *dma_ops; | |
53 | extern struct ia64_machine_vector ia64_mv; | |
54 | extern void set_iommu_machvec(void); | |
1da177e4 | 55 | |
3a80b6aa FT |
56 | #define dma_alloc_coherent(dev, size, handle, gfp) \ |
57 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | |
58 | ||
b7de8e7e RD |
59 | /* coherent mem. is cheap */ |
60 | static inline void * | |
61 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
62 | gfp_t flag) | |
63 | { | |
64 | return dma_alloc_coherent(dev, size, dma_handle, flag); | |
65 | } | |
1da177e4 | 66 | #define dma_free_coherent platform_dma_free_coherent |
b7de8e7e RD |
67 | static inline void |
68 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |
69 | dma_addr_t dma_handle) | |
70 | { | |
71 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
72 | } | |
309df0c5 AK |
73 | #define dma_map_single_attrs platform_dma_map_single_attrs |
74 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |
75 | size_t size, int dir) | |
76 | { | |
77 | return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | |
78 | } | |
79 | #define dma_map_sg_attrs platform_dma_map_sg_attrs | |
80 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | |
81 | int nents, int dir) | |
82 | { | |
83 | return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | |
84 | } | |
85 | #define dma_unmap_single_attrs platform_dma_unmap_single_attrs | |
86 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | |
87 | size_t size, int dir) | |
88 | { | |
89 | return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | |
90 | } | |
91 | #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs | |
92 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |
93 | int nents, int dir) | |
94 | { | |
95 | return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | |
96 | } | |
1da177e4 LT |
97 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu |
98 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | |
99 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | |
100 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | |
101 | #define dma_mapping_error platform_dma_mapping_error | |
102 | ||
103 | #define dma_map_page(dev, pg, off, size, dir) \ | |
104 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | |
105 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | |
106 | dma_unmap_single(dev, dma_addr, size, dir) | |
107 | ||
108 | /* | |
109 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | |
110 | * See Documentation/DMA-API.txt for details. | |
111 | */ | |
112 | ||
113 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | |
114 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
115 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
116 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
117 | ||
118 | #define dma_supported platform_dma_supported | |
119 | ||
120 | static inline int | |
121 | dma_set_mask (struct device *dev, u64 mask) | |
122 | { | |
123 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
124 | return -EIO; | |
125 | *dev->dma_mask = mask; | |
126 | return 0; | |
127 | } | |
128 | ||
e1531b42 | 129 | extern int dma_get_cache_alignment(void); |
1da177e4 LT |
130 | |
131 | static inline void | |
d3fa72e4 RB |
132 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
133 | enum dma_data_direction dir) | |
1da177e4 LT |
134 | { |
135 | /* | |
136 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to | |
137 | * ensure that dma_cache_sync() enforces order, hence the mb(). | |
138 | */ | |
139 | mb(); | |
140 | } | |
141 | ||
f67637ee | 142 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
1da177e4 | 143 | |
62fdd767 FY |
144 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
145 | { | |
146 | return dma_ops; | |
147 | } | |
148 | ||
149 | ||
150 | ||
1da177e4 | 151 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |