Commit | Line | Data |
---|---|---|
ccfe27d7 MS |
1 | /* |
2 | * Copyright (C) 2009-2010 PetaLogix | |
3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | |
4 | * | |
5 | * Provide default implementations of the DMA mapping callbacks for | |
6 | * directly mapped busses. | |
7 | */ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
ccfe27d7 MS |
12 | #include <linux/dma-debug.h> |
13 | #include <asm/bug.h> | |
2549edd3 | 14 | #include <asm/cacheflush.h> |
ccfe27d7 MS |
15 | |
16 | /* | |
17 | * Generic direct DMA implementation | |
18 | * | |
19 | * This implementation supports a per-device offset that can be applied if | |
20 | * the address at which memory is visible to devices is not 0. Platform code | |
21 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
22 | * default the offset is PCI_DRAM_OFFSET. | |
23 | */ | |
dcbae4be | 24 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, |
2549edd3 MS |
25 | size_t size, enum dma_data_direction direction) |
26 | { | |
2549edd3 MS |
27 | switch (direction) { |
28 | case DMA_TO_DEVICE: | |
dcbae4be | 29 | flush_dcache_range(paddr + offset, paddr + offset + size); |
2549edd3 MS |
30 | break; |
31 | case DMA_FROM_DEVICE: | |
dcbae4be | 32 | invalidate_dcache_range(paddr + offset, paddr + offset + size); |
2549edd3 MS |
33 | break; |
34 | default: | |
35 | BUG(); | |
36 | } | |
37 | } | |
38 | ||
ccfe27d7 MS |
39 | static unsigned long get_dma_direct_offset(struct device *dev) |
40 | { | |
78ebfa88 | 41 | if (likely(dev)) |
ccfe27d7 MS |
42 | return (unsigned long)dev->archdata.dma_data; |
43 | ||
44 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | |
45 | } | |
46 | ||
1be53e08 MS |
47 | #define NOT_COHERENT_CACHE |
48 | ||
49 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |
ccfe27d7 MS |
50 | dma_addr_t *dma_handle, gfp_t flag) |
51 | { | |
1be53e08 MS |
52 | #ifdef NOT_COHERENT_CACHE |
53 | return consistent_alloc(flag, size, dma_handle); | |
54 | #else | |
ccfe27d7 MS |
55 | void *ret; |
56 | struct page *page; | |
57 | int node = dev_to_node(dev); | |
58 | ||
59 | /* ignore region specifiers */ | |
60 | flag &= ~(__GFP_HIGHMEM); | |
61 | ||
62 | page = alloc_pages_node(node, flag, get_order(size)); | |
63 | if (page == NULL) | |
64 | return NULL; | |
65 | ret = page_address(page); | |
66 | memset(ret, 0, size); | |
67 | *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); | |
68 | ||
69 | return ret; | |
1be53e08 | 70 | #endif |
ccfe27d7 MS |
71 | } |
72 | ||
1be53e08 | 73 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
ccfe27d7 MS |
74 | void *vaddr, dma_addr_t dma_handle) |
75 | { | |
1be53e08 | 76 | #ifdef NOT_COHERENT_CACHE |
f1525765 | 77 | consistent_free(size, vaddr); |
1be53e08 | 78 | #else |
ccfe27d7 | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
1be53e08 | 80 | #endif |
ccfe27d7 MS |
81 | } |
82 | ||
83 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |
84 | int nents, enum dma_data_direction direction, | |
85 | struct dma_attrs *attrs) | |
86 | { | |
87 | struct scatterlist *sg; | |
88 | int i; | |
89 | ||
d79f3b06 | 90 | /* FIXME this part of code is untested */ |
ccfe27d7 MS |
91 | for_each_sg(sgl, sg, nents, i) { |
92 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | |
93 | sg->dma_length = sg->length; | |
d79f3b06 MS |
94 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, |
95 | sg->length, direction); | |
ccfe27d7 MS |
96 | } |
97 | ||
98 | return nents; | |
99 | } | |
100 | ||
101 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | |
102 | int nents, enum dma_data_direction direction, | |
103 | struct dma_attrs *attrs) | |
104 | { | |
105 | } | |
106 | ||
107 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | |
108 | { | |
109 | return 1; | |
110 | } | |
111 | ||
112 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | |
113 | struct page *page, | |
114 | unsigned long offset, | |
115 | size_t size, | |
2549edd3 | 116 | enum dma_data_direction direction, |
ccfe27d7 MS |
117 | struct dma_attrs *attrs) |
118 | { | |
d79f3b06 | 119 | __dma_sync_page(page_to_phys(page), offset, size, direction); |
ccfe27d7 MS |
120 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
121 | } | |
122 | ||
123 | static inline void dma_direct_unmap_page(struct device *dev, | |
124 | dma_addr_t dma_address, | |
125 | size_t size, | |
126 | enum dma_data_direction direction, | |
127 | struct dma_attrs *attrs) | |
128 | { | |
d79f3b06 MS |
129 | /* There is not necessary to do cache cleanup |
130 | * | |
131 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | |
132 | * dma_address is physical address | |
133 | */ | |
1be53e08 | 134 | __dma_sync_page(dma_address, 0 , size, direction); |
ccfe27d7 MS |
135 | } |
136 | ||
137 | struct dma_map_ops dma_direct_ops = { | |
138 | .alloc_coherent = dma_direct_alloc_coherent, | |
139 | .free_coherent = dma_direct_free_coherent, | |
140 | .map_sg = dma_direct_map_sg, | |
141 | .unmap_sg = dma_direct_unmap_sg, | |
142 | .dma_supported = dma_direct_dma_supported, | |
143 | .map_page = dma_direct_map_page, | |
144 | .unmap_page = dma_direct_unmap_page, | |
145 | }; | |
146 | EXPORT_SYMBOL(dma_direct_ops); | |
147 | ||
148 | /* Number of entries preallocated for DMA-API debugging */ | |
149 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
150 | ||
151 | static int __init dma_init(void) | |
152 | { | |
153 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
154 | ||
155 | return 0; | |
156 | } | |
157 | fs_initcall(dma_init); |