if (bm->page_list) {
if (bm->dma_dir != DMA_NONE) {
- /*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
- */
- buf = &bm->page_list[0];
- dma_free_coherent(bm->dma_hw_dev,
- PAGE_SIZE * bm->n_pages,
- buf->virt_addr, buf->dma_addr);
+ for (i = 0; i < bm->n_pages; i++) {
+ buf = &bm->page_list[i];
+ dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ buf->virt_addr,
+ buf->dma_addr);
+ }
} else {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
goto err;
if (bm->dma_dir != DMA_NONE) {
- void *virt_addr;
- dma_addr_t dma_addr;
-
- /*
- * Currently, the DMA buffer needs to be allocated as a
- * single block so that it can be mmap()'ed.
- */
- virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
- PAGE_SIZE * n_pages, &dma_addr,
- GFP_KERNEL);
- if (!virt_addr)
- goto err;
-
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
- buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
- buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
+ buf->virt_addr =
+ dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->virt_addr)
+ break;
}
-
- bm->n_pages = i;
} else {
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
SetPageReserved(virt_to_page(buf->virt_addr));
}
-
- bm->n_pages = i;
- if (i < n_pages)
- goto err;
}
+ bm->n_pages = i;
+ if (i < n_pages)
+ goto err;
return bm;
goto done;
}
if (bm->dma_dir != DMA_NONE) {
+ unsigned long vm_start = vma->vm_start;
+ unsigned long vm_end = vma->vm_end;
+
/*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
+ * Buffer pages are not contiguous, so temporarily modify VMA
+ * start and end addresses for each buffer page.
*/
- buf = &bm->page_list[0];
- retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr,
- buf->dma_addr, n_pages * PAGE_SIZE);
+ for (i = 0; i < n_pages; ++i) {
+ buf = &bm->page_list[i];
+ vma->vm_start = start;
+ vma->vm_end = start + PAGE_SIZE;
+ retval = dma_mmap_coherent(bm->dma_hw_dev, vma,
+ buf->virt_addr,
+ buf->dma_addr, PAGE_SIZE);
+ if (retval)
+ break;
+
+ start += PAGE_SIZE;
+ }
+ vma->vm_start = vm_start;
+ vma->vm_end = vm_end;
} else {
for (i = 0; i < n_pages; ++i) {
unsigned long pfn;
start += PAGE_SIZE;
}
+ }
#ifdef CONFIG_MMU
- /*
- * Leaving behind a partial mapping of a buffer we're about to
- * drop is unsafe, see remap_pfn_range_notrack().
- * We need to zap the range here ourselves instead of relying
- * on the automatic zapping in remap_pfn_range() because we call
- * remap_pfn_range() in a loop.
- */
- if (retval)
- zap_vma_ptes(vma, vma->vm_start, size);
+ /*
+ * Leaving behind a partial mapping of a buffer we're about to drop is
+ * unsafe, see remap_pfn_range_notrack(). We need to zap the range
+ * here ourselves instead of relying on the automatic zapping in
+ * remap_pfn_range() because we call remap_pfn_range() in a loop.
+ */
+ if (retval)
+ zap_vma_ptes(vma, vma->vm_start, size);
#endif
- }
if (retval == 0) {
vma->vm_ops = &comedi_vm_ops;