Commit | Line | Data |
---|---|---|
1394f032 | 1 | /* |
96f1050d | 2 | * Dynamic DMA mapping support |
1394f032 | 3 | * |
96f1050d | 4 | * Copyright 2005-2009 Analog Devices Inc. |
1394f032 | 5 | * |
96f1050d | 6 | * Licensed under the GPL-2 or later |
1394f032 BW |
7 | */ |
8 | ||
9 | #include <linux/types.h> | |
dd3b0e3e | 10 | #include <linux/gfp.h> |
1394f032 | 11 | #include <linux/string.h> |
1394f032 | 12 | #include <linux/spinlock.h> |
1394f032 | 13 | #include <linux/dma-mapping.h> |
82861924 | 14 | #include <linux/scatterlist.h> |
1394f032 BW |
15 | |
16 | static spinlock_t dma_page_lock; | |
cb5ae60f | 17 | static unsigned long *dma_page; |
1394f032 BW |
18 | static unsigned int dma_pages; |
19 | static unsigned long dma_base; | |
20 | static unsigned long dma_size; | |
21 | static unsigned int dma_initialized; | |
22 | ||
dd3b0e3e | 23 | static void dma_alloc_init(unsigned long start, unsigned long end) |
1394f032 BW |
24 | { |
25 | spin_lock_init(&dma_page_lock); | |
26 | dma_initialized = 0; | |
27 | ||
cb5ae60f | 28 | dma_page = (unsigned long *)__get_free_page(GFP_KERNEL); |
1394f032 BW |
29 | memset(dma_page, 0, PAGE_SIZE); |
30 | dma_base = PAGE_ALIGN(start); | |
31 | dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); | |
32 | dma_pages = dma_size >> PAGE_SHIFT; | |
33 | memset((void *)dma_base, 0, DMA_UNCACHED_REGION); | |
34 | dma_initialized = 1; | |
35 | ||
b85d858b | 36 | printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__, |
1394f032 BW |
37 | dma_page, dma_pages, dma_base); |
38 | } | |
39 | ||
40 | static inline unsigned int get_pages(size_t size) | |
41 | { | |
42 | return ((size - 1) >> PAGE_SHIFT) + 1; | |
43 | } | |
44 | ||
45 | static unsigned long __alloc_dma_pages(unsigned int pages) | |
46 | { | |
47 | unsigned long ret = 0, flags; | |
48 | int i, count = 0; | |
49 | ||
50 | if (dma_initialized == 0) | |
51 | dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend); | |
52 | ||
53 | spin_lock_irqsave(&dma_page_lock, flags); | |
54 | ||
55 | for (i = 0; i < dma_pages;) { | |
cb5ae60f | 56 | if (test_bit(i++, dma_page) == 0) { |
1394f032 BW |
57 | if (++count == pages) { |
58 | while (count--) | |
cb5ae60f MH |
59 | __set_bit(--i, dma_page); |
60 | ||
1394f032 BW |
61 | ret = dma_base + (i << PAGE_SHIFT); |
62 | break; | |
63 | } | |
64 | } else | |
65 | count = 0; | |
66 | } | |
67 | spin_unlock_irqrestore(&dma_page_lock, flags); | |
68 | return ret; | |
69 | } | |
70 | ||
71 | static void __free_dma_pages(unsigned long addr, unsigned int pages) | |
72 | { | |
73 | unsigned long page = (addr - dma_base) >> PAGE_SHIFT; | |
74 | unsigned long flags; | |
75 | int i; | |
76 | ||
77 | if ((page + pages) > dma_pages) { | |
b85d858b | 78 | printk(KERN_ERR "%s: freeing outside range.\n", __func__); |
1394f032 BW |
79 | BUG(); |
80 | } | |
81 | ||
82 | spin_lock_irqsave(&dma_page_lock, flags); | |
cb5ae60f MH |
83 | for (i = page; i < page + pages; i++) |
84 | __clear_bit(i, dma_page); | |
85 | ||
1394f032 BW |
86 | spin_unlock_irqrestore(&dma_page_lock, flags); |
87 | } | |
88 | ||
89 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
dd3b0e3e | 90 | dma_addr_t *dma_handle, gfp_t gfp) |
1394f032 BW |
91 | { |
92 | void *ret; | |
93 | ||
94 | ret = (void *)__alloc_dma_pages(get_pages(size)); | |
95 | ||
96 | if (ret) { | |
97 | memset(ret, 0, size); | |
98 | *dma_handle = virt_to_phys(ret); | |
99 | } | |
100 | ||
101 | return ret; | |
102 | } | |
103 | EXPORT_SYMBOL(dma_alloc_coherent); | |
104 | ||
105 | void | |
106 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
107 | dma_addr_t dma_handle) | |
108 | { | |
109 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); | |
110 | } | |
111 | EXPORT_SYMBOL(dma_free_coherent); | |
112 | ||
113 | /* | |
dd3b0e3e | 114 | * Streaming DMA mappings |
1394f032 | 115 | */ |
dd3b0e3e BS |
116 | void __dma_sync(dma_addr_t addr, size_t size, |
117 | enum dma_data_direction dir) | |
1394f032 | 118 | { |
dd3b0e3e | 119 | _dma_sync(addr, size, dir); |
1394f032 | 120 | } |
dd3b0e3e | 121 | EXPORT_SYMBOL(__dma_sync); |
1394f032 BW |
122 | |
123 | int | |
124 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
125 | enum dma_data_direction direction) | |
126 | { | |
127 | int i; | |
128 | ||
b07af760 | 129 | for (i = 0; i < nents; i++, sg++) { |
58b053e4 | 130 | sg->dma_address = (dma_addr_t) sg_virt(sg); |
dd3b0e3e | 131 | __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); |
b07af760 | 132 | } |
1394f032 BW |
133 | |
134 | return nents; | |
135 | } | |
136 | EXPORT_SYMBOL(dma_map_sg); | |
137 | ||
dd3b0e3e BS |
138 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
139 | int nelems, enum dma_data_direction direction) | |
1394f032 | 140 | { |
dd3b0e3e | 141 | int i; |
1394f032 | 142 | |
dd3b0e3e BS |
143 | for (i = 0; i < nelems; i++, sg++) { |
144 | sg->dma_address = (dma_addr_t) sg_virt(sg); | |
145 | __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); | |
146 | } | |
1394f032 | 147 | } |
dd3b0e3e | 148 | EXPORT_SYMBOL(dma_sync_sg_for_device); |