Commit | Line | Data |
---|---|---|
775c8a3d | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
9663f2e6 KP |
2 | /* |
3 | * Copyright © 2008 Keith Packard <keithp@keithp.com> | |
9663f2e6 KP |
4 | */ |
5 | ||
6 | #ifndef _LINUX_IO_MAPPING_H | |
7 | #define _LINUX_IO_MAPPING_H | |
8 | ||
9 | #include <linux/types.h> | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
187f1882 | 11 | #include <linux/bug.h> |
2584cf83 | 12 | #include <linux/io.h> |
65fddcfc | 13 | #include <linux/pgtable.h> |
9663f2e6 | 14 | #include <asm/page.h> |
9663f2e6 KP |
15 | |
16 | /* | |
17 | * The io_mapping mechanism provides an abstraction for mapping | |
18 | * individual pages from an io device to the CPU in an efficient fashion. | |
19 | * | |
7d3d3254 | 20 | * See Documentation/driver-api/io-mapping.rst |
9663f2e6 KP |
21 | */ |
22 | ||
4ab0d47d VP |
23 | struct io_mapping { |
24 | resource_size_t base; | |
25 | unsigned long size; | |
26 | pgprot_t prot; | |
cafaf14a | 27 | void __iomem *iomem; |
4ab0d47d VP |
28 | }; |
29 | ||
cafaf14a CW |
30 | #ifdef CONFIG_HAVE_ATOMIC_IOMAP |
31 | ||
2b755626 | 32 | #include <linux/pfn.h> |
cafaf14a | 33 | #include <asm/iomap.h> |
e5beae16 KP |
34 | /* |
35 | * For small address space machines, mapping large objects | |
36 | * into the kernel virtual space isn't practical. Where | |
37 | * available, use fixmap support to dynamically map pages | |
38 | * of the object at run time. | |
39 | */ | |
9663f2e6 | 40 | |
9663f2e6 | 41 | static inline struct io_mapping * |
cafaf14a CW |
42 | io_mapping_init_wc(struct io_mapping *iomap, |
43 | resource_size_t base, | |
44 | unsigned long size) | |
9663f2e6 | 45 | { |
9e36fda0 | 46 | pgprot_t prot; |
4ab0d47d | 47 | |
9e36fda0 | 48 | if (iomap_create_wc(base, size, &prot)) |
cafaf14a | 49 | return NULL; |
4ab0d47d VP |
50 | |
51 | iomap->base = base; | |
52 | iomap->size = size; | |
9e36fda0 | 53 | iomap->prot = prot; |
4ab0d47d | 54 | return iomap; |
9663f2e6 KP |
55 | } |
56 | ||
57 | static inline void | |
cafaf14a | 58 | io_mapping_fini(struct io_mapping *mapping) |
9663f2e6 | 59 | { |
9e36fda0 | 60 | iomap_free(mapping->base, mapping->size); |
9663f2e6 KP |
61 | } |
62 | ||
63 | /* Atomic map/unmap */ | |
29bc17ec | 64 | static inline void __iomem * |
fca3ec01 | 65 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
3e4d3af5 | 66 | unsigned long offset) |
9663f2e6 | 67 | { |
4ab0d47d | 68 | resource_size_t phys_addr; |
4ab0d47d VP |
69 | |
70 | BUG_ON(offset >= mapping->size); | |
71 | phys_addr = mapping->base + offset; | |
7eb16f23 SAS |
72 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
73 | preempt_disable(); | |
74 | else | |
75 | migrate_disable(); | |
351191ad TG |
76 | pagefault_disable(); |
77 | return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); | |
9663f2e6 KP |
78 | } |
79 | ||
80 | static inline void | |
3e4d3af5 | 81 | io_mapping_unmap_atomic(void __iomem *vaddr) |
9663f2e6 | 82 | { |
351191ad TG |
83 | kunmap_local_indexed((void __force *)vaddr); |
84 | pagefault_enable(); | |
7eb16f23 SAS |
85 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
86 | preempt_enable(); | |
87 | else | |
88 | migrate_enable(); | |
9663f2e6 KP |
89 | } |
90 | ||
e66f6e09 TG |
91 | static inline void __iomem * |
92 | io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) | |
93 | { | |
94 | resource_size_t phys_addr; | |
95 | ||
96 | BUG_ON(offset >= mapping->size); | |
97 | phys_addr = mapping->base + offset; | |
98 | return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); | |
99 | } | |
100 | ||
101 | static inline void io_mapping_unmap_local(void __iomem *vaddr) | |
102 | { | |
103 | kunmap_local_indexed((void __force *)vaddr); | |
104 | } | |
105 | ||
29bc17ec | 106 | static inline void __iomem * |
d8dab00d CW |
107 | io_mapping_map_wc(struct io_mapping *mapping, |
108 | unsigned long offset, | |
109 | unsigned long size) | |
9663f2e6 | 110 | { |
5ce04e3d PV |
111 | resource_size_t phys_addr; |
112 | ||
4ab0d47d | 113 | BUG_ON(offset >= mapping->size); |
5ce04e3d PV |
114 | phys_addr = mapping->base + offset; |
115 | ||
d8dab00d | 116 | return ioremap_wc(phys_addr, size); |
9663f2e6 KP |
117 | } |
118 | ||
119 | static inline void | |
29bc17ec | 120 | io_mapping_unmap(void __iomem *vaddr) |
9663f2e6 | 121 | { |
e5beae16 | 122 | iounmap(vaddr); |
9663f2e6 KP |
123 | } |
124 | ||
e66f6e09 | 125 | #else /* HAVE_ATOMIC_IOMAP */ |
9663f2e6 | 126 | |
24dd85ff | 127 | #include <linux/uaccess.h> |
4ab0d47d | 128 | |
e5beae16 | 129 | /* Create the io_mapping object*/ |
9663f2e6 | 130 | static inline struct io_mapping * |
cafaf14a CW |
131 | io_mapping_init_wc(struct io_mapping *iomap, |
132 | resource_size_t base, | |
133 | unsigned long size) | |
134 | { | |
e0b3e0b1 MR |
135 | iomap->iomem = ioremap_wc(base, size); |
136 | if (!iomap->iomem) | |
137 | return NULL; | |
138 | ||
cafaf14a CW |
139 | iomap->base = base; |
140 | iomap->size = size; | |
bcaaa0c4 | 141 | iomap->prot = pgprot_writecombine(PAGE_KERNEL); |
cafaf14a CW |
142 | |
143 | return iomap; | |
144 | } | |
145 | ||
146 | static inline void | |
147 | io_mapping_fini(struct io_mapping *mapping) | |
148 | { | |
149 | iounmap(mapping->iomem); | |
150 | } | |
151 | ||
152 | /* Non-atomic map/unmap */ | |
153 | static inline void __iomem * | |
154 | io_mapping_map_wc(struct io_mapping *mapping, | |
155 | unsigned long offset, | |
156 | unsigned long size) | |
9663f2e6 | 157 | { |
cafaf14a | 158 | return mapping->iomem + offset; |
9663f2e6 KP |
159 | } |
160 | ||
161 | static inline void | |
cafaf14a | 162 | io_mapping_unmap(void __iomem *vaddr) |
9663f2e6 KP |
163 | { |
164 | } | |
165 | ||
166 | /* Atomic map/unmap */ | |
29bc17ec | 167 | static inline void __iomem * |
fca3ec01 | 168 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
3e4d3af5 | 169 | unsigned long offset) |
9663f2e6 | 170 | { |
7eb16f23 SAS |
171 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
172 | preempt_disable(); | |
173 | else | |
174 | migrate_disable(); | |
24dd85ff | 175 | pagefault_disable(); |
cafaf14a | 176 | return io_mapping_map_wc(mapping, offset, PAGE_SIZE); |
9663f2e6 KP |
177 | } |
178 | ||
179 | static inline void | |
3e4d3af5 | 180 | io_mapping_unmap_atomic(void __iomem *vaddr) |
9663f2e6 | 181 | { |
cafaf14a | 182 | io_mapping_unmap(vaddr); |
24dd85ff | 183 | pagefault_enable(); |
7eb16f23 SAS |
184 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
185 | preempt_enable(); | |
186 | else | |
187 | migrate_enable(); | |
9663f2e6 KP |
188 | } |
189 | ||
e66f6e09 TG |
190 | static inline void __iomem * |
191 | io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) | |
192 | { | |
193 | return io_mapping_map_wc(mapping, offset, PAGE_SIZE); | |
194 | } | |
195 | ||
196 | static inline void io_mapping_unmap_local(void __iomem *vaddr) | |
197 | { | |
198 | io_mapping_unmap(vaddr); | |
199 | } | |
200 | ||
201 | #endif /* !HAVE_ATOMIC_IOMAP */ | |
cafaf14a CW |
202 | |
203 | static inline struct io_mapping * | |
204 | io_mapping_create_wc(resource_size_t base, | |
205 | unsigned long size) | |
9663f2e6 | 206 | { |
cafaf14a CW |
207 | struct io_mapping *iomap; |
208 | ||
209 | iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); | |
210 | if (!iomap) | |
211 | return NULL; | |
212 | ||
213 | if (!io_mapping_init_wc(iomap, base, size)) { | |
214 | kfree(iomap); | |
215 | return NULL; | |
216 | } | |
217 | ||
218 | return iomap; | |
9663f2e6 KP |
219 | } |
220 | ||
221 | static inline void | |
cafaf14a | 222 | io_mapping_free(struct io_mapping *iomap) |
9663f2e6 | 223 | { |
cafaf14a CW |
224 | io_mapping_fini(iomap); |
225 | kfree(iomap); | |
9663f2e6 | 226 | } |
e5beae16 | 227 | |
1fbaf8fc CH |
228 | int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, |
229 | unsigned long addr, unsigned long pfn, unsigned long size); | |
eca36e43 CJ |
230 | |
231 | #endif /* _LINUX_IO_MAPPING_H */ |