powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / dma-noncoherent.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
1da177e4 25#include <linux/sched.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
1da177e4 31#include <linux/highmem.h>
44a0337b 32#include <linux/dma-direct.h>
6666cc17 33#include <linux/dma-noncoherent.h>
93087948 34#include <linux/export.h>
1da177e4
LT
35
36#include <asm/tlbflush.h>
308c09f1 37#include <asm/dma.h>
1da177e4 38
8b31e49d
BH
39#include "mmu_decl.h"
40
84532a0f
BH
41/*
42 * This address range defaults to a value that is safe for all
43 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
44 * can be further configured for specific applications under
45 * the "Advanced Setup" menu. -Matt
46 */
8b31e49d
BH
47#define CONSISTENT_BASE (IOREMAP_TOP)
48#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
84532a0f
BH
49#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
50
51/*
52 * This is the page table (2MB) covering uncached, DMA consistent allocations
53 */
84532a0f
BH
54static DEFINE_SPINLOCK(consistent_lock);
55
56/*
57 * VM region handling support.
58 *
59 * This should become something generic, handling VM region allocations for
60 * vmalloc and similar (ioremap, module space, etc).
61 *
62 * I envisage vmalloc()'s supporting vm_struct becoming:
63 *
64 * struct vm_struct {
65 * struct vm_region region;
66 * unsigned long flags;
67 * struct page **pages;
68 * unsigned int nr_pages;
69 * unsigned long phys_addr;
70 * };
71 *
72 * get_vm_area() would then call vm_region_alloc with an appropriate
73 * struct vm_region head (eg):
74 *
75 * struct vm_region vmalloc_head = {
76 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
77 * .vm_start = VMALLOC_START,
78 * .vm_end = VMALLOC_END,
79 * };
80 *
81 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
82 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
83 * would have to initialise this each time prior to calling vm_region_alloc().
84 */
85struct ppc_vm_region {
86 struct list_head vm_list;
87 unsigned long vm_start;
88 unsigned long vm_end;
89};
90
91static struct ppc_vm_region consistent_head = {
92 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
93 .vm_start = CONSISTENT_BASE,
94 .vm_end = CONSISTENT_END,
95};
96
97static struct ppc_vm_region *
98ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
99{
100 unsigned long addr = head->vm_start, end = head->vm_end - size;
101 unsigned long flags;
102 struct ppc_vm_region *c, *new;
103
104 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
105 if (!new)
106 goto out;
107
108 spin_lock_irqsave(&consistent_lock, flags);
109
110 list_for_each_entry(c, &head->vm_list, vm_list) {
111 if ((addr + size) < addr)
112 goto nospc;
113 if ((addr + size) <= c->vm_start)
114 goto found;
115 addr = c->vm_end;
116 if (addr > end)
117 goto nospc;
118 }
119
120 found:
121 /*
122 * Insert this entry _before_ the one we found.
123 */
124 list_add_tail(&new->vm_list, &c->vm_list);
125 new->vm_start = addr;
126 new->vm_end = addr + size;
127
128 spin_unlock_irqrestore(&consistent_lock, flags);
129 return new;
130
131 nospc:
132 spin_unlock_irqrestore(&consistent_lock, flags);
133 kfree(new);
134 out:
135 return NULL;
136}
137
138static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
139{
140 struct ppc_vm_region *c;
141
142 list_for_each_entry(c, &head->vm_list, vm_list) {
143 if (c->vm_start == addr)
144 goto out;
145 }
146 c = NULL;
147 out:
148 return c;
149}
150
1da177e4
LT
151/*
152 * Allocate DMA-coherent memory space and return both the kernel remapped
153 * virtual and bus address for that space.
154 */
68005b67
CH
155void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
156 gfp_t gfp, unsigned long attrs)
1da177e4
LT
157{
158 struct page *page;
84532a0f 159 struct ppc_vm_region *c;
1da177e4 160 unsigned long order;
8b31e49d 161 u64 mask = ISA_DMA_THRESHOLD, limit;
1da177e4 162
8b31e49d
BH
163 if (dev) {
164 mask = dev->coherent_dma_mask;
165
166 /*
167 * Sanity check the DMA mask - it must be non-zero, and
168 * must be able to be satisfied by a DMA allocation.
169 */
170 if (mask == 0) {
171 dev_warn(dev, "coherent DMA mask is unset\n");
172 goto no_page;
173 }
174
175 if ((~mask) & ISA_DMA_THRESHOLD) {
176 dev_warn(dev, "coherent DMA mask %#llx is smaller "
177 "than system GFP_DMA mask %#llx\n",
178 mask, (unsigned long long)ISA_DMA_THRESHOLD);
179 goto no_page;
180 }
84532a0f
BH
181 }
182
8b31e49d 183
1da177e4
LT
184 size = PAGE_ALIGN(size);
185 limit = (mask + 1) & ~mask;
8b31e49d
BH
186 if ((limit && size >= limit) ||
187 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
84532a0f
BH
188 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
189 size, mask);
1da177e4
LT
190 return NULL;
191 }
192
193 order = get_order(size);
194
8b31e49d 195 /* Might be useful if we ever have a real legacy DMA zone... */
1da177e4
LT
196 if (mask != 0xffffffff)
197 gfp |= GFP_DMA;
198
199 page = alloc_pages(gfp, order);
200 if (!page)
201 goto no_page;
202
203 /*
204 * Invalidate any data that might be lurking in the
205 * kernel direct-mapped region for device DMA.
206 */
207 {
208 unsigned long kaddr = (unsigned long)page_address(page);
209 memset(page_address(page), 0, size);
210 flush_dcache_range(kaddr, kaddr + size);
211 }
212
213 /*
84532a0f 214 * Allocate a virtual address in the consistent mapping region.
1da177e4 215 */
84532a0f
BH
216 c = ppc_vm_region_alloc(&consistent_head, size,
217 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
218 if (c) {
219 unsigned long vaddr = c->vm_start;
84532a0f 220 struct page *end = page + (1 << order);
1da177e4 221
84532a0f
BH
222 split_page(page, order);
223
224 /*
225 * Set the "dma handle"
226 */
44a0337b 227 *dma_handle = phys_to_dma(dev, page_to_phys(page));
1da177e4 228
84532a0f 229 do {
84532a0f 230 SetPageReserved(page);
4386c096 231 map_kernel_page(vaddr, page_to_phys(page),
c766ee72 232 pgprot_noncached(PAGE_KERNEL));
84532a0f 233 page++;
84532a0f
BH
234 vaddr += PAGE_SIZE;
235 } while (size -= PAGE_SIZE);
1da177e4 236
84532a0f
BH
237 /*
238 * Free the otherwise unused pages.
239 */
240 while (page < end) {
241 __free_page(page);
242 page++;
243 }
244
245 return (void *)c->vm_start;
1da177e4
LT
246 }
247
248 if (page)
249 __free_pages(page, order);
84532a0f 250 no_page:
1da177e4
LT
251 return NULL;
252}
1da177e4
LT
253
254/*
255 * free a page as defined by the above mapping.
256 */
68005b67 257void arch_dma_free(struct device *dev, size_t size, void *vaddr,
44a0337b 258 dma_addr_t dma_handle, unsigned long attrs)
1da177e4 259{
84532a0f
BH
260 struct ppc_vm_region *c;
261 unsigned long flags, addr;
8b31e49d 262
84532a0f
BH
263 size = PAGE_ALIGN(size);
264
265 spin_lock_irqsave(&consistent_lock, flags);
266
267 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
268 if (!c)
269 goto no_area;
270
271 if ((c->vm_end - c->vm_start) != size) {
272 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
273 __func__, c->vm_end - c->vm_start, size);
274 dump_stack();
275 size = c->vm_end - c->vm_start;
276 }
277
84532a0f
BH
278 addr = c->vm_start;
279 do {
8b31e49d 280 pte_t *ptep;
84532a0f
BH
281 unsigned long pfn;
282
8b31e49d
BH
283 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
284 addr),
285 addr),
286 addr);
287 if (!pte_none(*ptep) && pte_present(*ptep)) {
288 pfn = pte_pfn(*ptep);
289 pte_clear(&init_mm, addr, ptep);
84532a0f
BH
290 if (pfn_valid(pfn)) {
291 struct page *page = pfn_to_page(pfn);
c1ce4b37 292 __free_reserved_page(page);
84532a0f
BH
293 }
294 }
8b31e49d 295 addr += PAGE_SIZE;
84532a0f
BH
296 } while (size -= PAGE_SIZE);
297
298 flush_tlb_kernel_range(c->vm_start, c->vm_end);
299
300 list_del(&c->vm_list);
301
302 spin_unlock_irqrestore(&consistent_lock, flags);
303
304 kfree(c);
305 return;
306
307 no_area:
308 spin_unlock_irqrestore(&consistent_lock, flags);
309 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
310 __func__, vaddr);
311 dump_stack();
1da177e4 312}
1da177e4 313
1da177e4
LT
314/*
315 * make an area consistent.
316 */
461db2bd 317static void __dma_sync(void *vaddr, size_t size, int direction)
1da177e4
LT
318{
319 unsigned long start = (unsigned long)vaddr;
320 unsigned long end = start + size;
321
322 switch (direction) {
323 case DMA_NONE:
324 BUG();
03d70617
AL
325 case DMA_FROM_DEVICE:
326 /*
327 * invalidate only when cache-line aligned otherwise there is
328 * the potential for discarding uncommitted data from the cache
329 */
8478d7f0 330 if ((start | end) & (L1_CACHE_BYTES - 1))
03d70617
AL
331 flush_dcache_range(start, end);
332 else
333 invalidate_dcache_range(start, end);
1da177e4
LT
334 break;
335 case DMA_TO_DEVICE: /* writeback only */
336 clean_dcache_range(start, end);
337 break;
338 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
339 flush_dcache_range(start, end);
340 break;
341 }
342}
1da177e4
LT
343
344#ifdef CONFIG_HIGHMEM
345/*
346 * __dma_sync_page() implementation for systems using highmem.
347 * In this case, each page of a buffer must be kmapped/kunmapped
348 * in order to have a virtual address for __dma_sync(). This must
338cec32 349 * not sleep so kmap_atomic()/kunmap_atomic() are used.
1da177e4
LT
350 *
351 * Note: yes, it is possible and correct to have a buffer extend
352 * beyond the first page.
353 */
354static inline void __dma_sync_page_highmem(struct page *page,
355 unsigned long offset, size_t size, int direction)
356{
a0c111c6 357 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
1da177e4
LT
358 size_t cur_size = seg_size;
359 unsigned long flags, start, seg_offset = offset;
a0c111c6 360 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
1da177e4
LT
361 int seg_nr = 0;
362
363 local_irq_save(flags);
364
365 do {
2480b208 366 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
1da177e4
LT
367
368 /* Sync this buffer segment */
369 __dma_sync((void *)start, seg_size, direction);
2480b208 370 kunmap_atomic((void *)start);
1da177e4
LT
371 seg_nr++;
372
373 /* Calculate next buffer segment size */
374 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
375
376 /* Add the segment size to our running total */
377 cur_size += seg_size;
378 seg_offset = 0;
379 } while (seg_nr < nr_segs);
380
381 local_irq_restore(flags);
382}
383#endif /* CONFIG_HIGHMEM */
384
385/*
386 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
387 * takes a struct page instead of a virtual address
388 */
461db2bd 389static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
1da177e4 390{
461db2bd
CH
391 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
392 unsigned offset = paddr & ~PAGE_MASK;
393
1da177e4 394#ifdef CONFIG_HIGHMEM
461db2bd 395 __dma_sync_page_highmem(page, offset, size, dir);
1da177e4
LT
396#else
397 unsigned long start = (unsigned long)page_address(page) + offset;
461db2bd 398 __dma_sync((void *)start, size, dir);
1da177e4
LT
399#endif
400}
461db2bd
CH
401
402void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
403 size_t size, enum dma_data_direction dir)
404{
405 __dma_sync_page(paddr, size, dir);
406}
407
408void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
409 size_t size, enum dma_data_direction dir)
410{
411 __dma_sync_page(paddr, size, dir);
412}
6090912c
BH
413
414/*
461db2bd 415 * Return the PFN for a given cpu virtual address returned by arch_dma_alloc.
6090912c 416 */
6666cc17
CH
417long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
418 dma_addr_t dma_addr)
6090912c
BH
419{
420 /* This should always be populated, so we don't test every
421 * level. If that fails, we'll have a nice crash which
422 * will be as good as a BUG_ON()
423 */
6666cc17 424 unsigned long cpu_addr = (unsigned long)vaddr;
6090912c
BH
425 pgd_t *pgd = pgd_offset_k(cpu_addr);
426 pud_t *pud = pud_offset(pgd, cpu_addr);
427 pmd_t *pmd = pmd_offset(pud, cpu_addr);
428 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
429
430 if (pte_none(*ptep) || !pte_present(*ptep))
431 return 0;
432 return pte_pfn(*ptep);
433}