Commit | Line | Data |
---|---|---|
3a0d7a4d MS |
1 | /* |
2 | * Microblaze support for cache consistent memory. | |
3 | * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> | |
4 | * Copyright (C) 2010 PetaLogix | |
5 | * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> | |
6 | * | |
7 | * Based on PowerPC version derived from arch/arm/mm/consistent.c | |
8 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | |
9 | * Copyright (C) 2000 Russell King | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/signal.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/types.h> | |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/mman.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/swap.h> | |
27 | #include <linux/stddef.h> | |
28 | #include <linux/vmalloc.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/bootmem.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/pci.h> | |
34 | #include <linux/interrupt.h> | |
5a0e3ad6 | 35 | #include <linux/gfp.h> |
3a0d7a4d MS |
36 | |
37 | #include <asm/pgalloc.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/hardirq.h> | |
40 | #include <asm/mmu_context.h> | |
41 | #include <asm/mmu.h> | |
42 | #include <linux/uaccess.h> | |
43 | #include <asm/pgtable.h> | |
44 | #include <asm/cpuinfo.h> | |
45 | ||
46 | #ifndef CONFIG_MMU | |
47 | ||
48 | /* I have to use dcache values because I can't relate on ram size */ | |
49 | #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) | |
50 | ||
51 | /* | |
52 | * Consistent memory allocators. Used for DMA devices that want to | |
53 | * share uncached memory with the processor core. | |
54 | * My crufty no-MMU approach is simple. In the HW platform we can optionally | |
55 | * mirror the DDR up above the processor cacheable region. So, memory accessed | |
56 | * in this mirror region will not be cached. It's alloced from the same | |
57 | * pool as normal memory, but the handle we return is shifted up into the | |
58 | * uncached region. This will no doubt cause big problems if memory allocated | |
59 | * here is not also freed properly. -- JW | |
60 | */ | |
61 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | |
62 | { | |
63 | struct page *page, *end, *free; | |
64 | unsigned long order; | |
65 | void *ret, *virt; | |
66 | ||
67 | if (in_interrupt()) | |
68 | BUG(); | |
69 | ||
70 | size = PAGE_ALIGN(size); | |
71 | order = get_order(size); | |
72 | ||
73 | page = alloc_pages(gfp, order); | |
74 | if (!page) | |
75 | goto no_page; | |
76 | ||
77 | /* We could do with a page_to_phys and page_to_bus here. */ | |
78 | virt = page_address(page); | |
79 | ret = ioremap(virt_to_phys(virt), size); | |
80 | if (!ret) | |
81 | goto no_remap; | |
82 | ||
83 | /* | |
84 | * Here's the magic! Note if the uncached shadow is not implemented, | |
85 | * it's up to the calling code to also test that condition and make | |
86 | * other arranegments, such as manually flushing the cache and so on. | |
87 | */ | |
88 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | |
89 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | |
90 | #endif | |
91 | /* dma_handle is same as physical (shadowed) address */ | |
92 | *dma_handle = (dma_addr_t)ret; | |
93 | ||
94 | /* | |
95 | * free wasted pages. We skip the first page since we know | |
96 | * that it will have count = 1 and won't require freeing. | |
97 | * We also mark the pages in use as reserved so that | |
98 | * remap_page_range works. | |
99 | */ | |
100 | page = virt_to_page(virt); | |
101 | free = page + (size >> PAGE_SHIFT); | |
102 | end = page + (1 << order); | |
103 | ||
104 | for (; page < end; page++) { | |
105 | init_page_count(page); | |
106 | if (page >= free) | |
107 | __free_page(page); | |
108 | else | |
109 | SetPageReserved(page); | |
110 | } | |
111 | ||
112 | return ret; | |
113 | no_remap: | |
114 | __free_pages(page, order); | |
115 | no_page: | |
116 | return NULL; | |
117 | } | |
118 | ||
119 | #else | |
120 | ||
121 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | |
122 | { | |
123 | int order, err, i; | |
124 | unsigned long page, va, flags; | |
125 | phys_addr_t pa; | |
126 | struct vm_struct *area; | |
127 | void *ret; | |
128 | ||
129 | if (in_interrupt()) | |
130 | BUG(); | |
131 | ||
132 | /* Only allocate page size areas. */ | |
133 | size = PAGE_ALIGN(size); | |
134 | order = get_order(size); | |
135 | ||
136 | page = __get_free_pages(gfp, order); | |
137 | if (!page) { | |
138 | BUG(); | |
139 | return NULL; | |
140 | } | |
141 | ||
142 | /* | |
143 | * we need to ensure that there are no cachelines in use, | |
144 | * or worse dirty in this area. | |
145 | */ | |
146 | flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size); | |
147 | ||
148 | /* Allocate some common virtual space to map the new pages. */ | |
149 | area = get_vm_area(size, VM_ALLOC); | |
150 | if (area == NULL) { | |
151 | free_pages(page, order); | |
152 | return NULL; | |
153 | } | |
154 | va = (unsigned long) area->addr; | |
155 | ret = (void *)va; | |
156 | ||
157 | /* This gives us the real physical address of the first page. */ | |
158 | *dma_handle = pa = virt_to_bus((void *)page); | |
159 | ||
160 | /* MS: This is the whole magic - use cache inhibit pages */ | |
161 | flags = _PAGE_KERNEL | _PAGE_NO_CACHE; | |
162 | ||
163 | /* | |
164 | * Set refcount=1 on all pages in an order>0 | |
165 | * allocation so that vfree() will actually | |
166 | * free all pages that were allocated. | |
167 | */ | |
168 | if (order > 0) { | |
169 | struct page *rpage = virt_to_page(page); | |
170 | for (i = 1; i < (1 << order); i++) | |
171 | init_page_count(rpage+i); | |
172 | } | |
173 | ||
174 | err = 0; | |
175 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | |
176 | err = map_page(va+i, pa+i, flags); | |
177 | ||
178 | if (err) { | |
179 | vfree((void *)va); | |
180 | return NULL; | |
181 | } | |
182 | ||
183 | return ret; | |
184 | } | |
185 | #endif /* CONFIG_MMU */ | |
186 | EXPORT_SYMBOL(consistent_alloc); | |
187 | ||
188 | /* | |
189 | * free page(s) as defined by the above mapping. | |
190 | */ | |
191 | void consistent_free(void *vaddr) | |
192 | { | |
193 | if (in_interrupt()) | |
194 | BUG(); | |
195 | ||
196 | /* Clear SHADOW_MASK bit in address, and free as per usual */ | |
197 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | |
198 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); | |
199 | #endif | |
200 | vfree(vaddr); | |
201 | } | |
202 | EXPORT_SYMBOL(consistent_free); | |
203 | ||
204 | /* | |
205 | * make an area consistent. | |
206 | */ | |
207 | void consistent_sync(void *vaddr, size_t size, int direction) | |
208 | { | |
209 | unsigned long start; | |
210 | unsigned long end; | |
211 | ||
212 | start = (unsigned long)vaddr; | |
213 | ||
214 | /* Convert start address back down to unshadowed memory region */ | |
215 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | |
216 | start &= ~UNCACHED_SHADOW_MASK; | |
217 | #endif | |
218 | end = start + size; | |
219 | ||
220 | switch (direction) { | |
221 | case PCI_DMA_NONE: | |
222 | BUG(); | |
223 | case PCI_DMA_FROMDEVICE: /* invalidate only */ | |
224 | flush_dcache_range(start, end); | |
225 | break; | |
226 | case PCI_DMA_TODEVICE: /* writeback only */ | |
227 | flush_dcache_range(start, end); | |
228 | break; | |
229 | case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ | |
230 | flush_dcache_range(start, end); | |
231 | break; | |
232 | } | |
233 | } | |
234 | EXPORT_SYMBOL(consistent_sync); | |
235 | ||
236 | /* | |
237 | * consistent_sync_page makes memory consistent. identical | |
238 | * to consistent_sync, but takes a struct page instead of a | |
239 | * virtual address | |
240 | */ | |
241 | void consistent_sync_page(struct page *page, unsigned long offset, | |
242 | size_t size, int direction) | |
243 | { | |
244 | unsigned long start = (unsigned long)page_address(page) + offset; | |
245 | consistent_sync((void *)start, size, direction); | |
246 | } | |
247 | EXPORT_SYMBOL(consistent_sync_page); |