Commit | Line | Data |
---|---|---|
ccfe27d7 MS |
1 | /* |
2 | * Copyright (C) 2009-2010 PetaLogix | |
3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | |
4 | * | |
5 | * Provide default implementations of the DMA mapping callbacks for | |
6 | * directly mapped busses. | |
7 | */ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/dma-debug.h> | |
12 | #include <asm/bug.h> | |
2549edd3 | 13 | #include <asm/cacheflush.h> |
ccfe27d7 MS |
14 | |
15 | /* | |
16 | * Generic direct DMA implementation | |
17 | * | |
18 | * This implementation supports a per-device offset that can be applied if | |
19 | * the address at which memory is visible to devices is not 0. Platform code | |
20 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
21 | * default the offset is PCI_DRAM_OFFSET. | |
22 | */ | |
dcbae4be | 23 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, |
2549edd3 MS |
24 | size_t size, enum dma_data_direction direction) |
25 | { | |
2549edd3 MS |
26 | switch (direction) { |
27 | case DMA_TO_DEVICE: | |
dcbae4be | 28 | flush_dcache_range(paddr + offset, paddr + offset + size); |
2549edd3 MS |
29 | break; |
30 | case DMA_FROM_DEVICE: | |
dcbae4be | 31 | invalidate_dcache_range(paddr + offset, paddr + offset + size); |
2549edd3 MS |
32 | break; |
33 | default: | |
34 | BUG(); | |
35 | } | |
36 | } | |
37 | ||
ccfe27d7 MS |
38 | static unsigned long get_dma_direct_offset(struct device *dev) |
39 | { | |
40 | if (dev) | |
41 | return (unsigned long)dev->archdata.dma_data; | |
42 | ||
43 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | |
44 | } | |
45 | ||
1be53e08 MS |
46 | #define NOT_COHERENT_CACHE |
47 | ||
48 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |
ccfe27d7 MS |
49 | dma_addr_t *dma_handle, gfp_t flag) |
50 | { | |
1be53e08 MS |
51 | #ifdef NOT_COHERENT_CACHE |
52 | return consistent_alloc(flag, size, dma_handle); | |
53 | #else | |
ccfe27d7 MS |
54 | void *ret; |
55 | struct page *page; | |
56 | int node = dev_to_node(dev); | |
57 | ||
58 | /* ignore region specifiers */ | |
59 | flag &= ~(__GFP_HIGHMEM); | |
60 | ||
61 | page = alloc_pages_node(node, flag, get_order(size)); | |
62 | if (page == NULL) | |
63 | return NULL; | |
64 | ret = page_address(page); | |
65 | memset(ret, 0, size); | |
66 | *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); | |
67 | ||
68 | return ret; | |
1be53e08 | 69 | #endif |
ccfe27d7 MS |
70 | } |
71 | ||
1be53e08 | 72 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
ccfe27d7 MS |
73 | void *vaddr, dma_addr_t dma_handle) |
74 | { | |
1be53e08 MS |
75 | #ifdef NOT_COHERENT_CACHE |
76 | consistent_free(vaddr); | |
77 | #else | |
ccfe27d7 | 78 | free_pages((unsigned long)vaddr, get_order(size)); |
1be53e08 | 79 | #endif |
ccfe27d7 MS |
80 | } |
81 | ||
82 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |
83 | int nents, enum dma_data_direction direction, | |
84 | struct dma_attrs *attrs) | |
85 | { | |
86 | struct scatterlist *sg; | |
87 | int i; | |
88 | ||
d79f3b06 | 89 | /* FIXME this part of code is untested */ |
ccfe27d7 MS |
90 | for_each_sg(sgl, sg, nents, i) { |
91 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | |
92 | sg->dma_length = sg->length; | |
d79f3b06 MS |
93 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, |
94 | sg->length, direction); | |
ccfe27d7 MS |
95 | } |
96 | ||
97 | return nents; | |
98 | } | |
99 | ||
100 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | |
101 | int nents, enum dma_data_direction direction, | |
102 | struct dma_attrs *attrs) | |
103 | { | |
104 | } | |
105 | ||
106 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | |
107 | { | |
108 | return 1; | |
109 | } | |
110 | ||
111 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | |
112 | struct page *page, | |
113 | unsigned long offset, | |
114 | size_t size, | |
2549edd3 | 115 | enum dma_data_direction direction, |
ccfe27d7 MS |
116 | struct dma_attrs *attrs) |
117 | { | |
d79f3b06 | 118 | __dma_sync_page(page_to_phys(page), offset, size, direction); |
ccfe27d7 MS |
119 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
120 | } | |
121 | ||
122 | static inline void dma_direct_unmap_page(struct device *dev, | |
123 | dma_addr_t dma_address, | |
124 | size_t size, | |
125 | enum dma_data_direction direction, | |
126 | struct dma_attrs *attrs) | |
127 | { | |
d79f3b06 MS |
128 | /* There is not necessary to do cache cleanup |
129 | * | |
130 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | |
131 | * dma_address is physical address | |
132 | */ | |
1be53e08 | 133 | __dma_sync_page(dma_address, 0 , size, direction); |
ccfe27d7 MS |
134 | } |
135 | ||
136 | struct dma_map_ops dma_direct_ops = { | |
137 | .alloc_coherent = dma_direct_alloc_coherent, | |
138 | .free_coherent = dma_direct_free_coherent, | |
139 | .map_sg = dma_direct_map_sg, | |
140 | .unmap_sg = dma_direct_unmap_sg, | |
141 | .dma_supported = dma_direct_dma_supported, | |
142 | .map_page = dma_direct_map_page, | |
143 | .unmap_page = dma_direct_unmap_page, | |
144 | }; | |
145 | EXPORT_SYMBOL(dma_direct_ops); | |
146 | ||
147 | /* Number of entries preallocated for DMA-API debugging */ | |
148 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
149 | ||
150 | static int __init dma_init(void) | |
151 | { | |
152 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
153 | ||
154 | return 0; | |
155 | } | |
156 | fs_initcall(dma_init); |