include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6-block.git] / arch / x86 / kernel / pci-nommu.c
CommitLineData
1da177e4
LT
1/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
8fa3d6fc 3#include <linux/dma-mapping.h>
b922f53b 4#include <linux/scatterlist.h>
1894e367
JSR
5#include <linux/string.h>
6#include <linux/init.h>
5a0e3ad6 7#include <linux/gfp.h>
1894e367
JSR
8#include <linux/pci.h>
9#include <linux/mm.h>
8fa3d6fc 10
1da177e4 11#include <asm/processor.h>
1894e367 12#include <asm/iommu.h>
17a941d8 13#include <asm/dma.h>
1da177e4 14
17a941d8
MBY
15static int
16check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
1da177e4 17{
a4c2baa6 18 if (hwdev && !dma_capable(hwdev, bus, size)) {
284901a9 19 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
f0fdabf8 20 printk(KERN_ERR
8fa3d6fc
AM
21 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
22 name, (long long)bus, size,
23 (long long)*hwdev->dma_mask);
17a941d8 24 return 0;
1da177e4 25 }
17a941d8
MBY
26 return 1;
27}
1da177e4 28
33feffd4
FT
29static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
30 unsigned long offset, size_t size,
31 enum dma_data_direction dir,
32 struct dma_attrs *attrs)
17a941d8 33{
33feffd4 34 dma_addr_t bus = page_to_phys(page) + offset;
5b3e5b72 35 WARN_ON(size == 0);
33feffd4 36 if (!check_addr("map_single", dev, bus, size))
8fd524b3 37 return DMA_ERROR_CODE;
e4dcdd6b 38 flush_write_buffers();
17a941d8 39 return bus;
1da177e4 40}
1da177e4 41
17a941d8
MBY
42/* Map a set of buffers described by scatterlist in streaming
43 * mode for DMA. This is the scatter-gather version of the
44 * above pci_map_single interface. Here the scatter gather list
45 * elements are each tagged with the appropriate dma address
46 * and length. They are obtained via sg_dma_{address,length}(SG).
47 *
48 * NOTE: An implementation may be able to use a smaller number of
49 * DMA address/length pairs than there are SG table elements.
50 * (for example via virtual mapping capabilities)
51 * The routine returns the number of addr/length pairs actually
52 * used, at most nents.
53 *
54 * Device ownership issues as mentioned above for pci_map_single are
55 * the same here.
56 */
1048fa52 57static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
160c1d8e
FT
58 int nents, enum dma_data_direction dir,
59 struct dma_attrs *attrs)
1da177e4 60{
b922f53b 61 struct scatterlist *s;
17a941d8 62 int i;
1da177e4 63
5b3e5b72
GC
64 WARN_ON(nents == 0 || sg[0].length == 0);
65
b922f53b 66 for_each_sg(sg, s, nents, i) {
58b053e4 67 BUG_ON(!sg_page(s));
30db2cbf 68 s->dma_address = sg_phys(s);
17a941d8
MBY
69 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
70 return 0;
71 s->dma_length = s->length;
72 }
e4dcdd6b 73 flush_write_buffers();
17a941d8
MBY
74 return nents;
75}
1da177e4 76
a3a76532
JR
77static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_addr)
79{
80 free_pages((unsigned long)vaddr, get_order(size));
81}
82
a8ad568d
AB
83static void nommu_sync_single_for_device(struct device *dev,
84 dma_addr_t addr, size_t size,
85 enum dma_data_direction dir)
86{
87 flush_write_buffers();
88}
89
90
91static void nommu_sync_sg_for_device(struct device *dev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction dir)
94{
95 flush_write_buffers();
96}
97
160c1d8e 98struct dma_map_ops nommu_dma_ops = {
a8ad568d
AB
99 .alloc_coherent = dma_generic_alloc_coherent,
100 .free_coherent = nommu_free_coherent,
101 .map_sg = nommu_map_sg,
102 .map_page = nommu_map_page,
103 .sync_single_for_device = nommu_sync_single_for_device,
104 .sync_sg_for_device = nommu_sync_sg_for_device,
105 .is_phys = 1,
17a941d8 106};