powerpc/32s: Setup the early hash table at all time.
[linux-block.git] / arch / powerpc / kernel / dma-iommu.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8dd0e952
BB
2/*
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * busses using the iommu infrastructure
7 */
8
8617a5c5
CH
9#include <linux/dma-direct.h>
10#include <linux/pci.h>
8dd0e952
BB
11#include <asm/iommu.h>
12
13/*
14 * Generic iommu implementation
15 */
16
17/* Allocates a contiguous real buffer and creates mappings over it.
18 * Returns the virtual address of the buffer and sets dma_handle
19 * to the dma address (mapping) of the first page.
20 */
21static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
bfbf7d61 22 dma_addr_t *dma_handle, gfp_t flag,
00085f1e 23 unsigned long attrs)
8dd0e952 24{
738ef42e 25 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
b3c73856 26 dma_handle, dev->coherent_dma_mask, flag,
8fae0353 27 dev_to_node(dev));
8dd0e952
BB
28}
29
30static void dma_iommu_free_coherent(struct device *dev, size_t size,
bfbf7d61 31 void *vaddr, dma_addr_t dma_handle,
00085f1e 32 unsigned long attrs)
8dd0e952 33{
f1565c24 34 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
8dd0e952
BB
35}
36
37/* Creates TCEs for a user provided buffer. The user buffer must be
f9226d57
MN
38 * contiguous real kernel storage (not vmalloc). The address passed here
39 * comprises a page address and offset into that page. The dma_addr_t
40 * returned will point to the same byte within the page as was passed in.
8dd0e952 41 */
f9226d57
MN
42static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
43 unsigned long offset, size_t size,
44 enum dma_data_direction direction,
00085f1e 45 unsigned long attrs)
8dd0e952 46{
738ef42e 47 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
24911acd 48 size, dma_get_mask(dev), direction, attrs);
8dd0e952
BB
49}
50
51
f9226d57
MN
52static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
53 size_t size, enum dma_data_direction direction,
00085f1e 54 unsigned long attrs)
8dd0e952 55{
f1565c24
CH
56 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
57 attrs);
8dd0e952
BB
58}
59
60
61static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
62 int nelems, enum dma_data_direction direction,
00085f1e 63 unsigned long attrs)
8dd0e952 64{
0690cbd2 65 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
24911acd 66 dma_get_mask(dev), direction, attrs);
8dd0e952
BB
67}
68
69static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
70 int nelems, enum dma_data_direction direction,
00085f1e 71 unsigned long attrs)
8dd0e952 72{
f1565c24 73 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
0690cbd2 74 direction, attrs);
8dd0e952
BB
75}
76
8617a5c5
CH
77static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
81
f1565c24
CH
82 if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
83 return false;
84 return phb->controller_ops.iommu_bypass_supported(pdev, mask);
8617a5c5
CH
85}
86
8dd0e952 87/* We support DMA to/from any memory page via the iommu */
817820b0 88int dma_iommu_dma_supported(struct device *dev, u64 mask)
8dd0e952 89{
738ef42e 90 struct iommu_table *tbl = get_iommu_table_base(dev);
8dd0e952 91
8617a5c5 92 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
f1565c24 93 dev->dma_ops_bypass = true;
8617a5c5
CH
94 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
95 return 1;
96 }
97
4f7e0bab
AK
98 if (!tbl) {
99 dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
100 return 0;
101 }
102
d0847757 103 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
4c374af5
AK
104 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
105 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
d0847757 106 mask, tbl->it_offset << tbl->it_page_shift);
8dd0e952 107 return 0;
8617a5c5
CH
108 }
109
110 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
f1565c24 111 dev->dma_ops_bypass = false;
8617a5c5 112 return 1;
8dd0e952
BB
113}
114
a20f507f 115u64 dma_iommu_get_required_mask(struct device *dev)
6a5c7be5
MM
116{
117 struct iommu_table *tbl = get_iommu_table_base(dev);
118 u64 mask;
8617a5c5 119
6a5c7be5
MM
120 if (!tbl)
121 return 0;
122
437ef802
AK
123 mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
124 tbl->it_page_shift - 1);
6a5c7be5
MM
125 mask += mask - 1;
126
127 return mask;
128}
129
ba767b52 130const struct dma_map_ops dma_iommu_ops = {
bfbf7d61
AP
131 .alloc = dma_iommu_alloc_coherent,
132 .free = dma_iommu_free_coherent,
2eccacd0
MM
133 .map_sg = dma_iommu_map_sg,
134 .unmap_sg = dma_iommu_unmap_sg,
135 .dma_supported = dma_iommu_dma_supported,
136 .map_page = dma_iommu_map_page,
137 .unmap_page = dma_iommu_unmap_page,
d24f9c69 138 .get_required_mask = dma_iommu_get_required_mask,
f9f3232a
CH
139 .mmap = dma_common_mmap,
140 .get_sgtable = dma_common_get_sgtable,
8dd0e952 141};