Merge tag 'microblaze-3.19-rc1' of git://git.monstr.eu/linux-2.6-microblaze
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Dec 2014 17:54:05 +0000 (09:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Dec 2014 17:54:05 +0000 (09:54 -0800)
Pull Microblaze fix from Michal Simek:
 "Fix mmap for cache coherent memory"

* tag 'microblaze-3.19-rc1' of git://git.monstr.eu/linux-2.6-microblaze:
  microblaze: Fix mmap for cache coherent memory

arch/microblaze/include/asm/pgtable.h
arch/microblaze/kernel/dma.c
arch/microblaze/mm/consistent.c

index 95cef0b5f836627b500bb6d03e5730c13c3314ff..df19d0c47be8df57975898d0125d069938d36e91 100644 (file)
@@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
 void consistent_sync(void *vaddr, size_t size, int direction);
 void consistent_sync_page(struct page *page, unsigned long offset,
        size_t size, int direction);
+unsigned long consistent_virt_to_pfn(void *vaddr);
 
 void setup_memory(void);
 #endif /* __ASSEMBLY__ */
index 4633c36c1b32fd09134131fabb184c0b2533aaa6..ed7ba8a118227440cd53c2670cc3afd7ac4bf360 100644 (file)
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
                        __dma_sync(sg->dma_address, sg->length, direction);
 }
 
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+                            void *cpu_addr, dma_addr_t handle, size_t size,
+                            struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+       unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+       unsigned long pfn;
+
+       if (off >= count || user_count > (count - off))
+               return -ENXIO;
+
+#ifdef NOT_COHERENT_CACHE
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       pfn = consistent_virt_to_pfn(cpu_addr);
+#else
+       pfn = virt_to_pfn(cpu_addr);
+#endif
+       return remap_pfn_range(vma, vma->vm_start, pfn + off,
+                              vma->vm_end - vma->vm_start, vma->vm_page_prot);
+#else
+       return -ENXIO;
+#endif
+}
+
 struct dma_map_ops dma_direct_ops = {
        .alloc          = dma_direct_alloc_coherent,
        .free           = dma_direct_free_coherent,
+       .mmap           = dma_direct_mmap_coherent,
        .map_sg         = dma_direct_map_sg,
        .dma_supported  = dma_direct_dma_supported,
        .map_page       = dma_direct_map_page,
index e10ad930895e23a5d8e229ba3267eb9344dc772e..b06c3a7faf20b51724fadd84f02ca33cc7390cc6 100644 (file)
@@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
 }
 EXPORT_SYMBOL(consistent_alloc);
 
+#ifdef CONFIG_MMU
+static pte_t *consistent_virt_to_pte(void *vaddr)
+{
+       unsigned long addr = (unsigned long)vaddr;
+
+       return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
+}
+
+unsigned long consistent_virt_to_pfn(void *vaddr)
+{
+       pte_t *ptep = consistent_virt_to_pte(vaddr);
+
+       if (pte_none(*ptep) || !pte_present(*ptep))
+               return 0;
+
+       return pte_pfn(*ptep);
+}
+#endif
+
 /*
  * free page(s) as defined by the above mapping.
  */
@@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
        } while (size -= PAGE_SIZE);
 #else
        do {
-               pte_t *ptep;
+               pte_t *ptep = consistent_virt_to_pte(vaddr);
                unsigned long pfn;
 
-               ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
-                                               (unsigned int)vaddr),
-                                       (unsigned int)vaddr),
-                               (unsigned int)vaddr);
                if (!pte_none(*ptep) && pte_present(*ptep)) {
                        pfn = pte_pfn(*ptep);
                        pte_clear(&init_mm, (unsigned int)vaddr, ptep);