Commit | Line | Data |
---|---|---|
ea8c64ac | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
b4174173 CH |
2 | /* |
3 | * Internals of the DMA direct mapping implementation. Only for use by the | |
4 | * DMA mapping code and IOMMU drivers. | |
5 | */ | |
ea8c64ac CH |
6 | #ifndef _LINUX_DMA_DIRECT_H |
7 | #define _LINUX_DMA_DIRECT_H 1 | |
8 | ||
9 | #include <linux/dma-mapping.h> | |
9f4df96b | 10 | #include <linux/dma-map-ops.h> |
b12d6627 | 11 | #include <linux/memblock.h> /* for min_low_pfn */ |
b6e05477 | 12 | #include <linux/mem_encrypt.h> |
b4174173 | 13 | #include <linux/swiotlb.h> |
ea8c64ac | 14 | |
ba0fb44a | 15 | extern u64 zone_dma_limit; |
8b5369ea | 16 | |
e0d07278 JQ |
17 | /* |
18 | * Record the mapping of CPU physical to DMA addresses for a given region. | |
19 | */ | |
20 | struct bus_dma_region { | |
21 | phys_addr_t cpu_start; | |
22 | dma_addr_t dma_start; | |
23 | u64 size; | |
e0d07278 JQ |
24 | }; |
25 | ||
26 | static inline dma_addr_t translate_phys_to_dma(struct device *dev, | |
27 | phys_addr_t paddr) | |
28 | { | |
29 | const struct bus_dma_region *m; | |
30 | ||
4ad4c1f3 RM |
31 | for (m = dev->dma_range_map; m->size; m++) { |
32 | u64 offset = paddr - m->cpu_start; | |
33 | ||
34 | if (paddr >= m->cpu_start && offset < m->size) | |
35 | return m->dma_start + offset; | |
36 | } | |
e0d07278 JQ |
37 | |
38 | /* make sure dma_capable fails when no translation is available */ | |
39 | return DMA_MAPPING_ERROR; | |
40 | } | |
41 | ||
42 | static inline phys_addr_t translate_dma_to_phys(struct device *dev, | |
43 | dma_addr_t dma_addr) | |
44 | { | |
45 | const struct bus_dma_region *m; | |
46 | ||
4ad4c1f3 RM |
47 | for (m = dev->dma_range_map; m->size; m++) { |
48 | u64 offset = dma_addr - m->dma_start; | |
49 | ||
50 | if (dma_addr >= m->dma_start && offset < m->size) | |
51 | return m->cpu_start + offset; | |
52 | } | |
e0d07278 JQ |
53 | |
54 | return (phys_addr_t)-1; | |
55 | } | |
56 | ||
fece6530 RM |
57 | static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map) |
58 | { | |
59 | dma_addr_t ret = (dma_addr_t)U64_MAX; | |
60 | ||
61 | for (; map->size; map++) | |
62 | ret = min(ret, map->dma_start); | |
63 | return ret; | |
64 | } | |
65 | ||
66 | static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) | |
67 | { | |
68 | dma_addr_t ret = 0; | |
69 | ||
70 | for (; map->size; map++) | |
71 | ret = max(ret, map->dma_start + map->size - 1); | |
72 | return ret; | |
73 | } | |
74 | ||
ea8c64ac CH |
75 | #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA |
76 | #include <asm/dma-direct.h> | |
5ceda740 CH |
77 | #ifndef phys_to_dma_unencrypted |
78 | #define phys_to_dma_unencrypted phys_to_dma | |
79 | #endif | |
ea8c64ac | 80 | #else |
5ceda740 CH |
81 | static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, |
82 | phys_addr_t paddr) | |
ea8c64ac | 83 | { |
e0d07278 JQ |
84 | if (dev->dma_range_map) |
85 | return translate_phys_to_dma(dev, paddr); | |
86 | return paddr; | |
ea8c64ac CH |
87 | } |
88 | ||
5ceda740 CH |
89 | /* |
90 | * If memory encryption is supported, phys_to_dma will set the memory encryption | |
91 | * bit in the DMA address, and dma_to_phys will clear it. | |
92 | * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb | |
93 | * buffers. | |
94 | */ | |
95 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | |
96 | { | |
97 | return __sme_set(phys_to_dma_unencrypted(dev, paddr)); | |
98 | } | |
99 | ||
e0d07278 | 100 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) |
ea8c64ac | 101 | { |
e0d07278 JQ |
102 | phys_addr_t paddr; |
103 | ||
104 | if (dev->dma_range_map) | |
105 | paddr = translate_dma_to_phys(dev, dma_addr); | |
106 | else | |
107 | paddr = dma_addr; | |
ea8c64ac | 108 | |
7bc5c428 | 109 | return __sme_clr(paddr); |
ea8c64ac | 110 | } |
130c1ccb | 111 | #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ |
ea8c64ac | 112 | |
9087c375 TL |
113 | #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED |
114 | bool force_dma_unencrypted(struct device *dev); | |
115 | #else | |
116 | static inline bool force_dma_unencrypted(struct device *dev) | |
117 | { | |
118 | return false; | |
119 | } | |
120 | #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ | |
121 | ||
68a33b17 CH |
122 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, |
123 | bool is_ram) | |
c7345159 CH |
124 | { |
125 | dma_addr_t end = addr + size - 1; | |
126 | ||
e0d07278 JQ |
127 | if (addr == DMA_MAPPING_ERROR) |
128 | return false; | |
68a33b17 | 129 | if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && |
c7345159 CH |
130 | min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) |
131 | return false; | |
132 | ||
a7ba70f1 | 133 | return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit); |
c7345159 CH |
134 | } |
135 | ||
a20bb058 | 136 | u64 dma_direct_get_required_mask(struct device *dev); |
19dca8c0 CH |
137 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
138 | gfp_t gfp, unsigned long attrs); | |
139 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | |
140 | dma_addr_t dma_addr, unsigned long attrs); | |
efa70f2f CH |
141 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
142 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
143 | void dma_direct_free_pages(struct device *dev, size_t size, | |
144 | struct page *page, dma_addr_t dma_addr, | |
145 | enum dma_data_direction dir); | |
1a9777a8 | 146 | int dma_direct_supported(struct device *dev, u64 mask); |
d3fa60d7 CH |
147 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
148 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
149 | ||
ea8c64ac | 150 | #endif /* _LINUX_DMA_DIRECT_H */ |