Commit | Line | Data |
---|---|---|
b4d0d230 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
739d875d | 2 | /* Generic I/O port emulation. |
3f7e212d AB |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
3f7e212d AB |
6 | */ |
7 | #ifndef __ASM_GENERIC_IO_H | |
8 | #define __ASM_GENERIC_IO_H | |
9 | ||
10 | #include <asm/page.h> /* I/O is all done through memory accesses */ | |
9216efaf | 11 | #include <linux/string.h> /* for memset() and memcpy() */ |
3f7e212d | 12 | #include <linux/types.h> |
21003197 | 13 | #include <linux/instruction_pointer.h> |
3f7e212d AB |
14 | |
15 | #ifdef CONFIG_GENERIC_IOMAP | |
16 | #include <asm-generic/iomap.h> | |
17 | #endif | |
18 | ||
60ca1e5a | 19 | #include <asm/mmiowb.h> |
66eab4df MT |
20 | #include <asm-generic/pci_iomap.h> |
21 | ||
64e2c673 SK |
22 | #ifndef __io_br |
23 | #define __io_br() barrier() | |
24 | #endif | |
25 | ||
26 | /* prevent prefetching of coherent DMA data ahead of a dma-complete */ | |
27 | #ifndef __io_ar | |
28 | #ifdef rmb | |
abbbbc83 | 29 | #define __io_ar(v) rmb() |
64e2c673 | 30 | #else |
abbbbc83 | 31 | #define __io_ar(v) barrier() |
64e2c673 SK |
32 | #endif |
33 | #endif | |
34 | ||
35 | /* flush writes to coherent DMA data before possibly triggering a DMA read */ | |
36 | #ifndef __io_bw | |
37 | #ifdef wmb | |
38 | #define __io_bw() wmb() | |
39 | #else | |
40 | #define __io_bw() barrier() | |
41 | #endif | |
42 | #endif | |
43 | ||
44 | /* serialize device access against a spin_unlock, usually handled there. */ | |
45 | #ifndef __io_aw | |
60ca1e5a | 46 | #define __io_aw() mmiowb_set_pending() |
64e2c673 SK |
47 | #endif |
48 | ||
49 | #ifndef __io_pbw | |
50 | #define __io_pbw() __io_bw() | |
51 | #endif | |
52 | ||
53 | #ifndef __io_paw | |
54 | #define __io_paw() __io_aw() | |
55 | #endif | |
56 | ||
57 | #ifndef __io_pbr | |
58 | #define __io_pbr() __io_br() | |
59 | #endif | |
60 | ||
61 | #ifndef __io_par | |
abbbbc83 | 62 | #define __io_par(v) __io_ar(v) |
64e2c673 SK |
63 | #endif |
64 | ||
21003197 SPR |
65 | /* |
66 | * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for | |
67 | * specific kernel drivers in case of excessive/unwanted logging. | |
68 | * | |
69 | * Usage: Add a #define flag at the beginning of the driver file. | |
70 | * Ex: #define __DISABLE_TRACE_MMIO__ | |
71 | * #include <...> | |
72 | * ... | |
73 | */ | |
74 | #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) | |
75 | #include <linux/tracepoint-defs.h> | |
76 | ||
77 | DECLARE_TRACEPOINT(rwmmio_write); | |
78 | DECLARE_TRACEPOINT(rwmmio_post_write); | |
79 | DECLARE_TRACEPOINT(rwmmio_read); | |
80 | DECLARE_TRACEPOINT(rwmmio_post_read); | |
81 | ||
82 | void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, | |
5e5ff73c | 83 | unsigned long caller_addr, unsigned long caller_addr0); |
21003197 | 84 | void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, |
5e5ff73c | 85 | unsigned long caller_addr, unsigned long caller_addr0); |
21003197 | 86 | void log_read_mmio(u8 width, const volatile void __iomem *addr, |
5e5ff73c | 87 | unsigned long caller_addr, unsigned long caller_addr0); |
21003197 | 88 | void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, |
5e5ff73c | 89 | unsigned long caller_addr, unsigned long caller_addr0); |
21003197 SPR |
90 | |
91 | #else | |
92 | ||
93 | static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, | |
5e5ff73c | 94 | unsigned long caller_addr, unsigned long caller_addr0) {} |
21003197 | 95 | static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, |
5e5ff73c | 96 | unsigned long caller_addr, unsigned long caller_addr0) {} |
21003197 | 97 | static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, |
5e5ff73c | 98 | unsigned long caller_addr, unsigned long caller_addr0) {} |
21003197 | 99 | static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, |
5e5ff73c | 100 | unsigned long caller_addr, unsigned long caller_addr0) {} |
21003197 SPR |
101 | |
102 | #endif /* CONFIG_TRACE_MMIO_ACCESS */ | |
64e2c673 | 103 | |
3f7e212d | 104 | /* |
9216efaf TR |
105 | * __raw_{read,write}{b,w,l,q}() access memory in native endianness. |
106 | * | |
107 | * On some architectures memory mapped IO needs to be accessed differently. | |
108 | * On the simple architectures, we just read/write the memory location | |
109 | * directly. | |
3f7e212d | 110 | */ |
9216efaf | 111 | |
35dbc0e0 | 112 | #ifndef __raw_readb |
9216efaf | 113 | #define __raw_readb __raw_readb |
3f7e212d AB |
114 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
115 | { | |
9216efaf | 116 | return *(const volatile u8 __force *)addr; |
3f7e212d | 117 | } |
35dbc0e0 | 118 | #endif |
3f7e212d | 119 | |
35dbc0e0 | 120 | #ifndef __raw_readw |
9216efaf | 121 | #define __raw_readw __raw_readw |
3f7e212d AB |
122 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
123 | { | |
9216efaf | 124 | return *(const volatile u16 __force *)addr; |
3f7e212d | 125 | } |
35dbc0e0 | 126 | #endif |
3f7e212d | 127 | |
35dbc0e0 | 128 | #ifndef __raw_readl |
9216efaf | 129 | #define __raw_readl __raw_readl |
3f7e212d AB |
130 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
131 | { | |
9216efaf | 132 | return *(const volatile u32 __force *)addr; |
3f7e212d | 133 | } |
35dbc0e0 | 134 | #endif |
3f7e212d | 135 | |
9216efaf TR |
136 | #ifdef CONFIG_64BIT |
137 | #ifndef __raw_readq | |
138 | #define __raw_readq __raw_readq | |
139 | static inline u64 __raw_readq(const volatile void __iomem *addr) | |
7292e7e0 | 140 | { |
9216efaf | 141 | return *(const volatile u64 __force *)addr; |
7292e7e0 | 142 | } |
9216efaf TR |
143 | #endif |
144 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 145 | |
35dbc0e0 | 146 | #ifndef __raw_writeb |
9216efaf TR |
147 | #define __raw_writeb __raw_writeb |
148 | static inline void __raw_writeb(u8 value, volatile void __iomem *addr) | |
3f7e212d | 149 | { |
9216efaf | 150 | *(volatile u8 __force *)addr = value; |
3f7e212d | 151 | } |
35dbc0e0 | 152 | #endif |
3f7e212d | 153 | |
35dbc0e0 | 154 | #ifndef __raw_writew |
9216efaf TR |
155 | #define __raw_writew __raw_writew |
156 | static inline void __raw_writew(u16 value, volatile void __iomem *addr) | |
3f7e212d | 157 | { |
9216efaf | 158 | *(volatile u16 __force *)addr = value; |
3f7e212d | 159 | } |
35dbc0e0 | 160 | #endif |
3f7e212d | 161 | |
35dbc0e0 | 162 | #ifndef __raw_writel |
9216efaf TR |
163 | #define __raw_writel __raw_writel |
164 | static inline void __raw_writel(u32 value, volatile void __iomem *addr) | |
3f7e212d | 165 | { |
9216efaf | 166 | *(volatile u32 __force *)addr = value; |
3f7e212d | 167 | } |
35dbc0e0 | 168 | #endif |
3f7e212d | 169 | |
3f7e212d | 170 | #ifdef CONFIG_64BIT |
9216efaf TR |
171 | #ifndef __raw_writeq |
172 | #define __raw_writeq __raw_writeq | |
173 | static inline void __raw_writeq(u64 value, volatile void __iomem *addr) | |
3f7e212d | 174 | { |
9216efaf | 175 | *(volatile u64 __force *)addr = value; |
3f7e212d | 176 | } |
cd248341 | 177 | #endif |
9216efaf | 178 | #endif /* CONFIG_64BIT */ |
cd248341 | 179 | |
9216efaf TR |
180 | /* |
181 | * {read,write}{b,w,l,q}() access little endian memory and return result in | |
182 | * native endianness. | |
183 | */ | |
3f7e212d | 184 | |
9216efaf TR |
185 | #ifndef readb |
186 | #define readb readb | |
187 | static inline u8 readb(const volatile void __iomem *addr) | |
3f7e212d | 188 | { |
032d59e1 SK |
189 | u8 val; |
190 | ||
5e5ff73c | 191 | log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); |
032d59e1 SK |
192 | __io_br(); |
193 | val = __raw_readb(addr); | |
abbbbc83 | 194 | __io_ar(val); |
5e5ff73c | 195 | log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 196 | return val; |
3f7e212d | 197 | } |
3f7e212d AB |
198 | #endif |
199 | ||
9216efaf TR |
200 | #ifndef readw |
201 | #define readw readw | |
202 | static inline u16 readw(const volatile void __iomem *addr) | |
203 | { | |
032d59e1 SK |
204 | u16 val; |
205 | ||
5e5ff73c | 206 | log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 207 | __io_br(); |
c1d55d50 | 208 | val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); |
abbbbc83 | 209 | __io_ar(val); |
5e5ff73c | 210 | log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 211 | return val; |
9216efaf | 212 | } |
7dc59bdd G |
213 | #endif |
214 | ||
9216efaf TR |
215 | #ifndef readl |
216 | #define readl readl | |
217 | static inline u32 readl(const volatile void __iomem *addr) | |
3f7e212d | 218 | { |
032d59e1 SK |
219 | u32 val; |
220 | ||
5e5ff73c | 221 | log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 222 | __io_br(); |
c1d55d50 | 223 | val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); |
abbbbc83 | 224 | __io_ar(val); |
5e5ff73c | 225 | log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 226 | return val; |
3f7e212d | 227 | } |
9216efaf | 228 | #endif |
3f7e212d | 229 | |
9216efaf TR |
230 | #ifdef CONFIG_64BIT |
231 | #ifndef readq | |
232 | #define readq readq | |
233 | static inline u64 readq(const volatile void __iomem *addr) | |
3f7e212d | 234 | { |
032d59e1 SK |
235 | u64 val; |
236 | ||
5e5ff73c | 237 | log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 238 | __io_br(); |
d564fa1f | 239 | val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); |
abbbbc83 | 240 | __io_ar(val); |
5e5ff73c | 241 | log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); |
032d59e1 | 242 | return val; |
3f7e212d | 243 | } |
9216efaf TR |
244 | #endif |
245 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 246 | |
9216efaf TR |
247 | #ifndef writeb |
248 | #define writeb writeb | |
249 | static inline void writeb(u8 value, volatile void __iomem *addr) | |
3f7e212d | 250 | { |
5e5ff73c | 251 | log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); |
755bd04a | 252 | __io_bw(); |
9216efaf | 253 | __raw_writeb(value, addr); |
755bd04a | 254 | __io_aw(); |
5e5ff73c | 255 | log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); |
3f7e212d | 256 | } |
9216efaf | 257 | #endif |
3f7e212d | 258 | |
9216efaf TR |
259 | #ifndef writew |
260 | #define writew writew | |
261 | static inline void writew(u16 value, volatile void __iomem *addr) | |
3f7e212d | 262 | { |
5e5ff73c | 263 | log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); |
755bd04a | 264 | __io_bw(); |
c1d55d50 | 265 | __raw_writew((u16 __force)cpu_to_le16(value), addr); |
755bd04a | 266 | __io_aw(); |
5e5ff73c | 267 | log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); |
3f7e212d | 268 | } |
9216efaf | 269 | #endif |
3f7e212d | 270 | |
9216efaf TR |
271 | #ifndef writel |
272 | #define writel writel | |
273 | static inline void writel(u32 value, volatile void __iomem *addr) | |
3f7e212d | 274 | { |
5e5ff73c | 275 | log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); |
755bd04a | 276 | __io_bw(); |
c1d55d50 | 277 | __raw_writel((u32 __force)__cpu_to_le32(value), addr); |
755bd04a | 278 | __io_aw(); |
5e5ff73c | 279 | log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); |
3f7e212d | 280 | } |
9216efaf | 281 | #endif |
3f7e212d | 282 | |
9216efaf TR |
283 | #ifdef CONFIG_64BIT |
284 | #ifndef writeq | |
285 | #define writeq writeq | |
286 | static inline void writeq(u64 value, volatile void __iomem *addr) | |
3f7e212d | 287 | { |
5e5ff73c | 288 | log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); |
755bd04a | 289 | __io_bw(); |
d564fa1f | 290 | __raw_writeq((u64 __force)__cpu_to_le64(value), addr); |
755bd04a | 291 | __io_aw(); |
5e5ff73c | 292 | log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); |
3f7e212d | 293 | } |
9216efaf TR |
294 | #endif |
295 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 296 | |
1c8d2969 AB |
297 | /* |
298 | * {read,write}{b,w,l,q}_relaxed() are like the regular version, but | |
299 | * are not guaranteed to provide ordering against spinlocks or memory | |
300 | * accesses. | |
301 | */ | |
302 | #ifndef readb_relaxed | |
8875c554 SK |
303 | #define readb_relaxed readb_relaxed |
304 | static inline u8 readb_relaxed(const volatile void __iomem *addr) | |
305 | { | |
21003197 SPR |
306 | u8 val; |
307 | ||
5e5ff73c | 308 | log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); |
21003197 | 309 | val = __raw_readb(addr); |
5e5ff73c | 310 | log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); |
21003197 | 311 | return val; |
8875c554 | 312 | } |
1c8d2969 AB |
313 | #endif |
314 | ||
315 | #ifndef readw_relaxed | |
8875c554 SK |
316 | #define readw_relaxed readw_relaxed |
317 | static inline u16 readw_relaxed(const volatile void __iomem *addr) | |
318 | { | |
21003197 SPR |
319 | u16 val; |
320 | ||
5e5ff73c | 321 | log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 322 | val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); |
5e5ff73c | 323 | log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); |
21003197 | 324 | return val; |
8875c554 | 325 | } |
1c8d2969 AB |
326 | #endif |
327 | ||
328 | #ifndef readl_relaxed | |
8875c554 SK |
329 | #define readl_relaxed readl_relaxed |
330 | static inline u32 readl_relaxed(const volatile void __iomem *addr) | |
331 | { | |
21003197 SPR |
332 | u32 val; |
333 | ||
5e5ff73c | 334 | log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 335 | val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); |
5e5ff73c | 336 | log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); |
21003197 | 337 | return val; |
8875c554 | 338 | } |
1c8d2969 AB |
339 | #endif |
340 | ||
e511267b | 341 | #if defined(readq) && !defined(readq_relaxed) |
8875c554 SK |
342 | #define readq_relaxed readq_relaxed |
343 | static inline u64 readq_relaxed(const volatile void __iomem *addr) | |
344 | { | |
21003197 SPR |
345 | u64 val; |
346 | ||
5e5ff73c | 347 | log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 348 | val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); |
5e5ff73c | 349 | log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); |
21003197 | 350 | return val; |
8875c554 | 351 | } |
1c8d2969 AB |
352 | #endif |
353 | ||
9439eb3a | 354 | #ifndef writeb_relaxed |
a71e7c44 SK |
355 | #define writeb_relaxed writeb_relaxed |
356 | static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) | |
357 | { | |
5e5ff73c | 358 | log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); |
a71e7c44 | 359 | __raw_writeb(value, addr); |
5e5ff73c | 360 | log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); |
a71e7c44 | 361 | } |
9439eb3a WD |
362 | #endif |
363 | ||
9439eb3a | 364 | #ifndef writew_relaxed |
a71e7c44 SK |
365 | #define writew_relaxed writew_relaxed |
366 | static inline void writew_relaxed(u16 value, volatile void __iomem *addr) | |
367 | { | |
5e5ff73c | 368 | log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 369 | __raw_writew((u16 __force)cpu_to_le16(value), addr); |
5e5ff73c | 370 | log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); |
a71e7c44 | 371 | } |
9439eb3a WD |
372 | #endif |
373 | ||
9439eb3a | 374 | #ifndef writel_relaxed |
a71e7c44 SK |
375 | #define writel_relaxed writel_relaxed |
376 | static inline void writel_relaxed(u32 value, volatile void __iomem *addr) | |
377 | { | |
5e5ff73c | 378 | log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 379 | __raw_writel((u32 __force)__cpu_to_le32(value), addr); |
5e5ff73c | 380 | log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); |
a71e7c44 | 381 | } |
9439eb3a | 382 | #endif |
3f7e212d | 383 | |
e511267b | 384 | #if defined(writeq) && !defined(writeq_relaxed) |
a71e7c44 SK |
385 | #define writeq_relaxed writeq_relaxed |
386 | static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) | |
387 | { | |
5e5ff73c | 388 | log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); |
05d3855b | 389 | __raw_writeq((u64 __force)__cpu_to_le64(value), addr); |
5e5ff73c | 390 | log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); |
a71e7c44 | 391 | } |
1c8d2969 AB |
392 | #endif |
393 | ||
9ab3a7a0 TR |
394 | /* |
395 | * {read,write}s{b,w,l,q}() repeatedly access the same memory address in | |
396 | * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). | |
397 | */ | |
398 | #ifndef readsb | |
399 | #define readsb readsb | |
400 | static inline void readsb(const volatile void __iomem *addr, void *buffer, | |
401 | unsigned int count) | |
3f7e212d AB |
402 | { |
403 | if (count) { | |
404 | u8 *buf = buffer; | |
9ab3a7a0 | 405 | |
3f7e212d | 406 | do { |
9ab3a7a0 | 407 | u8 x = __raw_readb(addr); |
3f7e212d AB |
408 | *buf++ = x; |
409 | } while (--count); | |
410 | } | |
411 | } | |
35dbc0e0 | 412 | #endif |
3f7e212d | 413 | |
9ab3a7a0 TR |
414 | #ifndef readsw |
415 | #define readsw readsw | |
416 | static inline void readsw(const volatile void __iomem *addr, void *buffer, | |
417 | unsigned int count) | |
3f7e212d AB |
418 | { |
419 | if (count) { | |
420 | u16 *buf = buffer; | |
9ab3a7a0 | 421 | |
3f7e212d | 422 | do { |
9ab3a7a0 | 423 | u16 x = __raw_readw(addr); |
3f7e212d AB |
424 | *buf++ = x; |
425 | } while (--count); | |
426 | } | |
427 | } | |
35dbc0e0 | 428 | #endif |
3f7e212d | 429 | |
9ab3a7a0 TR |
430 | #ifndef readsl |
431 | #define readsl readsl | |
432 | static inline void readsl(const volatile void __iomem *addr, void *buffer, | |
433 | unsigned int count) | |
3f7e212d AB |
434 | { |
435 | if (count) { | |
436 | u32 *buf = buffer; | |
9ab3a7a0 | 437 | |
3f7e212d | 438 | do { |
9ab3a7a0 | 439 | u32 x = __raw_readl(addr); |
3f7e212d AB |
440 | *buf++ = x; |
441 | } while (--count); | |
442 | } | |
443 | } | |
35dbc0e0 | 444 | #endif |
3f7e212d | 445 | |
9ab3a7a0 TR |
446 | #ifdef CONFIG_64BIT |
447 | #ifndef readsq | |
448 | #define readsq readsq | |
449 | static inline void readsq(const volatile void __iomem *addr, void *buffer, | |
450 | unsigned int count) | |
451 | { | |
452 | if (count) { | |
453 | u64 *buf = buffer; | |
454 | ||
455 | do { | |
456 | u64 x = __raw_readq(addr); | |
457 | *buf++ = x; | |
458 | } while (--count); | |
459 | } | |
460 | } | |
461 | #endif | |
462 | #endif /* CONFIG_64BIT */ | |
463 | ||
464 | #ifndef writesb | |
465 | #define writesb writesb | |
466 | static inline void writesb(volatile void __iomem *addr, const void *buffer, | |
467 | unsigned int count) | |
3f7e212d AB |
468 | { |
469 | if (count) { | |
470 | const u8 *buf = buffer; | |
9ab3a7a0 | 471 | |
3f7e212d | 472 | do { |
9ab3a7a0 | 473 | __raw_writeb(*buf++, addr); |
3f7e212d AB |
474 | } while (--count); |
475 | } | |
476 | } | |
35dbc0e0 | 477 | #endif |
3f7e212d | 478 | |
9ab3a7a0 TR |
479 | #ifndef writesw |
480 | #define writesw writesw | |
481 | static inline void writesw(volatile void __iomem *addr, const void *buffer, | |
482 | unsigned int count) | |
3f7e212d AB |
483 | { |
484 | if (count) { | |
485 | const u16 *buf = buffer; | |
9ab3a7a0 | 486 | |
3f7e212d | 487 | do { |
9ab3a7a0 | 488 | __raw_writew(*buf++, addr); |
3f7e212d AB |
489 | } while (--count); |
490 | } | |
491 | } | |
35dbc0e0 | 492 | #endif |
3f7e212d | 493 | |
9ab3a7a0 TR |
494 | #ifndef writesl |
495 | #define writesl writesl | |
496 | static inline void writesl(volatile void __iomem *addr, const void *buffer, | |
497 | unsigned int count) | |
3f7e212d AB |
498 | { |
499 | if (count) { | |
500 | const u32 *buf = buffer; | |
9ab3a7a0 | 501 | |
3f7e212d | 502 | do { |
9ab3a7a0 | 503 | __raw_writel(*buf++, addr); |
3f7e212d AB |
504 | } while (--count); |
505 | } | |
506 | } | |
35dbc0e0 | 507 | #endif |
3f7e212d | 508 | |
9ab3a7a0 TR |
509 | #ifdef CONFIG_64BIT |
510 | #ifndef writesq | |
511 | #define writesq writesq | |
512 | static inline void writesq(volatile void __iomem *addr, const void *buffer, | |
513 | unsigned int count) | |
514 | { | |
515 | if (count) { | |
516 | const u64 *buf = buffer; | |
517 | ||
518 | do { | |
519 | __raw_writeq(*buf++, addr); | |
520 | } while (--count); | |
521 | } | |
522 | } | |
523 | #endif | |
524 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 525 | |
9216efaf TR |
526 | #ifndef PCI_IOBASE |
527 | #define PCI_IOBASE ((void __iomem *)0) | |
528 | #endif | |
529 | ||
7dc59bdd G |
530 | #ifndef IO_SPACE_LIMIT |
531 | #define IO_SPACE_LIMIT 0xffff | |
532 | #endif | |
3f7e212d | 533 | |
9216efaf TR |
534 | /* |
535 | * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be | |
536 | * implemented on hardware that needs an additional delay for I/O accesses to | |
537 | * take effect. | |
538 | */ | |
539 | ||
f009c89d JG |
540 | #if !defined(inb) && !defined(_inb) |
541 | #define _inb _inb | |
214ba358 | 542 | static inline u8 _inb(unsigned long addr) |
9216efaf | 543 | { |
87fe2d54 SK |
544 | u8 val; |
545 | ||
546 | __io_pbr(); | |
547 | val = __raw_readb(PCI_IOBASE + addr); | |
abbbbc83 | 548 | __io_par(val); |
87fe2d54 | 549 | return val; |
9216efaf TR |
550 | } |
551 | #endif | |
552 | ||
f009c89d JG |
553 | #if !defined(inw) && !defined(_inw) |
554 | #define _inw _inw | |
555 | static inline u16 _inw(unsigned long addr) | |
9216efaf | 556 | { |
87fe2d54 SK |
557 | u16 val; |
558 | ||
559 | __io_pbr(); | |
c1d55d50 | 560 | val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); |
abbbbc83 | 561 | __io_par(val); |
87fe2d54 | 562 | return val; |
9216efaf TR |
563 | } |
564 | #endif | |
565 | ||
f009c89d JG |
566 | #if !defined(inl) && !defined(_inl) |
567 | #define _inl _inl | |
214ba358 | 568 | static inline u32 _inl(unsigned long addr) |
9216efaf | 569 | { |
87fe2d54 SK |
570 | u32 val; |
571 | ||
572 | __io_pbr(); | |
c1d55d50 | 573 | val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); |
abbbbc83 | 574 | __io_par(val); |
87fe2d54 | 575 | return val; |
9216efaf TR |
576 | } |
577 | #endif | |
578 | ||
f009c89d JG |
579 | #if !defined(outb) && !defined(_outb) |
580 | #define _outb _outb | |
581 | static inline void _outb(u8 value, unsigned long addr) | |
9216efaf | 582 | { |
a7851aa5 SK |
583 | __io_pbw(); |
584 | __raw_writeb(value, PCI_IOBASE + addr); | |
585 | __io_paw(); | |
9216efaf TR |
586 | } |
587 | #endif | |
588 | ||
f009c89d JG |
589 | #if !defined(outw) && !defined(_outw) |
590 | #define _outw _outw | |
591 | static inline void _outw(u16 value, unsigned long addr) | |
9216efaf | 592 | { |
a7851aa5 | 593 | __io_pbw(); |
c1d55d50 | 594 | __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); |
a7851aa5 | 595 | __io_paw(); |
9216efaf TR |
596 | } |
597 | #endif | |
598 | ||
f009c89d JG |
599 | #if !defined(outl) && !defined(_outl) |
600 | #define _outl _outl | |
601 | static inline void _outl(u32 value, unsigned long addr) | |
9216efaf | 602 | { |
a7851aa5 | 603 | __io_pbw(); |
c1d55d50 | 604 | __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); |
a7851aa5 | 605 | __io_paw(); |
9216efaf TR |
606 | } |
607 | #endif | |
608 | ||
f009c89d JG |
609 | #include <linux/logic_pio.h> |
610 | ||
611 | #ifndef inb | |
612 | #define inb _inb | |
613 | #endif | |
614 | ||
615 | #ifndef inw | |
616 | #define inw _inw | |
617 | #endif | |
618 | ||
619 | #ifndef inl | |
620 | #define inl _inl | |
621 | #endif | |
622 | ||
623 | #ifndef outb | |
624 | #define outb _outb | |
625 | #endif | |
626 | ||
627 | #ifndef outw | |
628 | #define outw _outw | |
629 | #endif | |
630 | ||
631 | #ifndef outl | |
632 | #define outl _outl | |
633 | #endif | |
634 | ||
9216efaf TR |
635 | #ifndef inb_p |
636 | #define inb_p inb_p | |
637 | static inline u8 inb_p(unsigned long addr) | |
638 | { | |
639 | return inb(addr); | |
640 | } | |
641 | #endif | |
642 | ||
643 | #ifndef inw_p | |
644 | #define inw_p inw_p | |
645 | static inline u16 inw_p(unsigned long addr) | |
646 | { | |
647 | return inw(addr); | |
648 | } | |
649 | #endif | |
650 | ||
651 | #ifndef inl_p | |
652 | #define inl_p inl_p | |
653 | static inline u32 inl_p(unsigned long addr) | |
654 | { | |
655 | return inl(addr); | |
656 | } | |
657 | #endif | |
658 | ||
659 | #ifndef outb_p | |
660 | #define outb_p outb_p | |
661 | static inline void outb_p(u8 value, unsigned long addr) | |
662 | { | |
663 | outb(value, addr); | |
664 | } | |
665 | #endif | |
666 | ||
667 | #ifndef outw_p | |
668 | #define outw_p outw_p | |
669 | static inline void outw_p(u16 value, unsigned long addr) | |
670 | { | |
671 | outw(value, addr); | |
672 | } | |
673 | #endif | |
674 | ||
675 | #ifndef outl_p | |
676 | #define outl_p outl_p | |
677 | static inline void outl_p(u32 value, unsigned long addr) | |
678 | { | |
679 | outl(value, addr); | |
680 | } | |
681 | #endif | |
682 | ||
9ab3a7a0 TR |
683 | /* |
684 | * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a | |
685 | * single I/O port multiple times. | |
686 | */ | |
687 | ||
688 | #ifndef insb | |
689 | #define insb insb | |
690 | static inline void insb(unsigned long addr, void *buffer, unsigned int count) | |
691 | { | |
692 | readsb(PCI_IOBASE + addr, buffer, count); | |
693 | } | |
694 | #endif | |
695 | ||
696 | #ifndef insw | |
697 | #define insw insw | |
698 | static inline void insw(unsigned long addr, void *buffer, unsigned int count) | |
699 | { | |
700 | readsw(PCI_IOBASE + addr, buffer, count); | |
701 | } | |
702 | #endif | |
703 | ||
704 | #ifndef insl | |
705 | #define insl insl | |
706 | static inline void insl(unsigned long addr, void *buffer, unsigned int count) | |
707 | { | |
708 | readsl(PCI_IOBASE + addr, buffer, count); | |
709 | } | |
710 | #endif | |
711 | ||
712 | #ifndef outsb | |
713 | #define outsb outsb | |
714 | static inline void outsb(unsigned long addr, const void *buffer, | |
715 | unsigned int count) | |
716 | { | |
717 | writesb(PCI_IOBASE + addr, buffer, count); | |
718 | } | |
719 | #endif | |
720 | ||
721 | #ifndef outsw | |
722 | #define outsw outsw | |
723 | static inline void outsw(unsigned long addr, const void *buffer, | |
724 | unsigned int count) | |
725 | { | |
726 | writesw(PCI_IOBASE + addr, buffer, count); | |
727 | } | |
728 | #endif | |
729 | ||
730 | #ifndef outsl | |
731 | #define outsl outsl | |
732 | static inline void outsl(unsigned long addr, const void *buffer, | |
733 | unsigned int count) | |
734 | { | |
735 | writesl(PCI_IOBASE + addr, buffer, count); | |
736 | } | |
737 | #endif | |
738 | ||
739 | #ifndef insb_p | |
740 | #define insb_p insb_p | |
741 | static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) | |
742 | { | |
743 | insb(addr, buffer, count); | |
744 | } | |
745 | #endif | |
746 | ||
747 | #ifndef insw_p | |
748 | #define insw_p insw_p | |
749 | static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) | |
750 | { | |
751 | insw(addr, buffer, count); | |
752 | } | |
753 | #endif | |
754 | ||
755 | #ifndef insl_p | |
756 | #define insl_p insl_p | |
757 | static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) | |
758 | { | |
759 | insl(addr, buffer, count); | |
760 | } | |
761 | #endif | |
762 | ||
763 | #ifndef outsb_p | |
764 | #define outsb_p outsb_p | |
765 | static inline void outsb_p(unsigned long addr, const void *buffer, | |
766 | unsigned int count) | |
767 | { | |
768 | outsb(addr, buffer, count); | |
769 | } | |
770 | #endif | |
771 | ||
772 | #ifndef outsw_p | |
773 | #define outsw_p outsw_p | |
774 | static inline void outsw_p(unsigned long addr, const void *buffer, | |
775 | unsigned int count) | |
776 | { | |
777 | outsw(addr, buffer, count); | |
778 | } | |
779 | #endif | |
780 | ||
781 | #ifndef outsl_p | |
782 | #define outsl_p outsl_p | |
783 | static inline void outsl_p(unsigned long addr, const void *buffer, | |
784 | unsigned int count) | |
785 | { | |
786 | outsl(addr, buffer, count); | |
787 | } | |
788 | #endif | |
789 | ||
9216efaf TR |
790 | #ifndef CONFIG_GENERIC_IOMAP |
791 | #ifndef ioread8 | |
792 | #define ioread8 ioread8 | |
793 | static inline u8 ioread8(const volatile void __iomem *addr) | |
794 | { | |
795 | return readb(addr); | |
796 | } | |
797 | #endif | |
798 | ||
799 | #ifndef ioread16 | |
800 | #define ioread16 ioread16 | |
801 | static inline u16 ioread16(const volatile void __iomem *addr) | |
802 | { | |
803 | return readw(addr); | |
804 | } | |
805 | #endif | |
806 | ||
807 | #ifndef ioread32 | |
808 | #define ioread32 ioread32 | |
809 | static inline u32 ioread32(const volatile void __iomem *addr) | |
810 | { | |
811 | return readl(addr); | |
812 | } | |
813 | #endif | |
814 | ||
9e44fb18 HG |
815 | #ifdef CONFIG_64BIT |
816 | #ifndef ioread64 | |
817 | #define ioread64 ioread64 | |
818 | static inline u64 ioread64(const volatile void __iomem *addr) | |
819 | { | |
820 | return readq(addr); | |
821 | } | |
822 | #endif | |
823 | #endif /* CONFIG_64BIT */ | |
824 | ||
9216efaf TR |
825 | #ifndef iowrite8 |
826 | #define iowrite8 iowrite8 | |
827 | static inline void iowrite8(u8 value, volatile void __iomem *addr) | |
828 | { | |
829 | writeb(value, addr); | |
830 | } | |
831 | #endif | |
832 | ||
833 | #ifndef iowrite16 | |
834 | #define iowrite16 iowrite16 | |
835 | static inline void iowrite16(u16 value, volatile void __iomem *addr) | |
836 | { | |
837 | writew(value, addr); | |
838 | } | |
839 | #endif | |
840 | ||
841 | #ifndef iowrite32 | |
842 | #define iowrite32 iowrite32 | |
843 | static inline void iowrite32(u32 value, volatile void __iomem *addr) | |
844 | { | |
845 | writel(value, addr); | |
846 | } | |
847 | #endif | |
848 | ||
9e44fb18 HG |
849 | #ifdef CONFIG_64BIT |
850 | #ifndef iowrite64 | |
851 | #define iowrite64 iowrite64 | |
852 | static inline void iowrite64(u64 value, volatile void __iomem *addr) | |
853 | { | |
854 | writeq(value, addr); | |
855 | } | |
856 | #endif | |
857 | #endif /* CONFIG_64BIT */ | |
858 | ||
9216efaf TR |
859 | #ifndef ioread16be |
860 | #define ioread16be ioread16be | |
861 | static inline u16 ioread16be(const volatile void __iomem *addr) | |
862 | { | |
7a1aedba | 863 | return swab16(readw(addr)); |
9216efaf TR |
864 | } |
865 | #endif | |
866 | ||
867 | #ifndef ioread32be | |
868 | #define ioread32be ioread32be | |
869 | static inline u32 ioread32be(const volatile void __iomem *addr) | |
870 | { | |
7a1aedba | 871 | return swab32(readl(addr)); |
9216efaf TR |
872 | } |
873 | #endif | |
874 | ||
9e44fb18 HG |
875 | #ifdef CONFIG_64BIT |
876 | #ifndef ioread64be | |
877 | #define ioread64be ioread64be | |
878 | static inline u64 ioread64be(const volatile void __iomem *addr) | |
879 | { | |
880 | return swab64(readq(addr)); | |
881 | } | |
882 | #endif | |
883 | #endif /* CONFIG_64BIT */ | |
884 | ||
9216efaf TR |
885 | #ifndef iowrite16be |
886 | #define iowrite16be iowrite16be | |
887 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) | |
888 | { | |
7a1aedba | 889 | writew(swab16(value), addr); |
9216efaf TR |
890 | } |
891 | #endif | |
892 | ||
893 | #ifndef iowrite32be | |
894 | #define iowrite32be iowrite32be | |
895 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) | |
896 | { | |
7a1aedba | 897 | writel(swab32(value), addr); |
9216efaf TR |
898 | } |
899 | #endif | |
9ab3a7a0 | 900 | |
9e44fb18 HG |
901 | #ifdef CONFIG_64BIT |
902 | #ifndef iowrite64be | |
903 | #define iowrite64be iowrite64be | |
904 | static inline void iowrite64be(u64 value, volatile void __iomem *addr) | |
905 | { | |
906 | writeq(swab64(value), addr); | |
907 | } | |
908 | #endif | |
909 | #endif /* CONFIG_64BIT */ | |
910 | ||
9ab3a7a0 TR |
911 | #ifndef ioread8_rep |
912 | #define ioread8_rep ioread8_rep | |
913 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, | |
914 | unsigned int count) | |
915 | { | |
916 | readsb(addr, buffer, count); | |
917 | } | |
918 | #endif | |
919 | ||
920 | #ifndef ioread16_rep | |
921 | #define ioread16_rep ioread16_rep | |
922 | static inline void ioread16_rep(const volatile void __iomem *addr, | |
923 | void *buffer, unsigned int count) | |
924 | { | |
925 | readsw(addr, buffer, count); | |
926 | } | |
927 | #endif | |
928 | ||
929 | #ifndef ioread32_rep | |
930 | #define ioread32_rep ioread32_rep | |
931 | static inline void ioread32_rep(const volatile void __iomem *addr, | |
932 | void *buffer, unsigned int count) | |
933 | { | |
934 | readsl(addr, buffer, count); | |
935 | } | |
936 | #endif | |
937 | ||
9e44fb18 HG |
938 | #ifdef CONFIG_64BIT |
939 | #ifndef ioread64_rep | |
940 | #define ioread64_rep ioread64_rep | |
941 | static inline void ioread64_rep(const volatile void __iomem *addr, | |
942 | void *buffer, unsigned int count) | |
943 | { | |
944 | readsq(addr, buffer, count); | |
945 | } | |
946 | #endif | |
947 | #endif /* CONFIG_64BIT */ | |
948 | ||
9ab3a7a0 TR |
949 | #ifndef iowrite8_rep |
950 | #define iowrite8_rep iowrite8_rep | |
951 | static inline void iowrite8_rep(volatile void __iomem *addr, | |
952 | const void *buffer, | |
953 | unsigned int count) | |
954 | { | |
955 | writesb(addr, buffer, count); | |
956 | } | |
957 | #endif | |
958 | ||
959 | #ifndef iowrite16_rep | |
960 | #define iowrite16_rep iowrite16_rep | |
961 | static inline void iowrite16_rep(volatile void __iomem *addr, | |
962 | const void *buffer, | |
963 | unsigned int count) | |
964 | { | |
965 | writesw(addr, buffer, count); | |
966 | } | |
967 | #endif | |
968 | ||
969 | #ifndef iowrite32_rep | |
970 | #define iowrite32_rep iowrite32_rep | |
971 | static inline void iowrite32_rep(volatile void __iomem *addr, | |
972 | const void *buffer, | |
973 | unsigned int count) | |
974 | { | |
975 | writesl(addr, buffer, count); | |
976 | } | |
977 | #endif | |
9e44fb18 HG |
978 | |
979 | #ifdef CONFIG_64BIT | |
980 | #ifndef iowrite64_rep | |
981 | #define iowrite64_rep iowrite64_rep | |
982 | static inline void iowrite64_rep(volatile void __iomem *addr, | |
983 | const void *buffer, | |
984 | unsigned int count) | |
985 | { | |
986 | writesq(addr, buffer, count); | |
987 | } | |
988 | #endif | |
989 | #endif /* CONFIG_64BIT */ | |
9216efaf TR |
990 | #endif /* CONFIG_GENERIC_IOMAP */ |
991 | ||
3f7e212d AB |
992 | #ifdef __KERNEL__ |
993 | ||
994 | #include <linux/vmalloc.h> | |
9216efaf | 995 | #define __io_virt(x) ((void __force *)(x)) |
3f7e212d | 996 | |
3f7e212d AB |
997 | /* |
998 | * Change virtual addresses to physical addresses and vv. | |
999 | * These are pretty trivial | |
1000 | */ | |
cd248341 | 1001 | #ifndef virt_to_phys |
9216efaf | 1002 | #define virt_to_phys virt_to_phys |
3f7e212d AB |
1003 | static inline unsigned long virt_to_phys(volatile void *address) |
1004 | { | |
1005 | return __pa((unsigned long)address); | |
1006 | } | |
9216efaf | 1007 | #endif |
3f7e212d | 1008 | |
9216efaf TR |
1009 | #ifndef phys_to_virt |
1010 | #define phys_to_virt phys_to_virt | |
3f7e212d AB |
1011 | static inline void *phys_to_virt(unsigned long address) |
1012 | { | |
1013 | return __va(address); | |
1014 | } | |
cd248341 | 1015 | #endif |
3f7e212d | 1016 | |
8c7ea50c LR |
1017 | /** |
1018 | * DOC: ioremap() and ioremap_*() variants | |
1019 | * | |
97c9801a | 1020 | * Architectures with an MMU are expected to provide ioremap() and iounmap() |
80b0ca98 CH |
1021 | * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide |
1022 | * a default nop-op implementation that expect that the physical address used | |
1023 | * for MMIO are already marked as uncached, and can be used as kernel virtual | |
1024 | * addresses. | |
8c7ea50c | 1025 | * |
97c9801a CH |
1026 | * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes |
1027 | * for specific drivers if the architecture choses to implement them. If they | |
7c566bb5 HM |
1028 | * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() |
1029 | * can provide stricter non-posted write semantics if the architecture | |
1030 | * implements them. | |
8c7ea50c | 1031 | */ |
e9713395 | 1032 | #ifndef CONFIG_MMU |
9216efaf TR |
1033 | #ifndef ioremap |
1034 | #define ioremap ioremap | |
1035 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) | |
3f7e212d | 1036 | { |
9216efaf | 1037 | return (void __iomem *)(unsigned long)offset; |
3f7e212d | 1038 | } |
9216efaf | 1039 | #endif |
3f7e212d | 1040 | |
b3ada9d0 GH |
1041 | #ifndef iounmap |
1042 | #define iounmap iounmap | |
2fbc3499 | 1043 | static inline void iounmap(volatile void __iomem *addr) |
b3ada9d0 GH |
1044 | { |
1045 | } | |
1046 | #endif | |
80b0ca98 | 1047 | #elif defined(CONFIG_GENERIC_IOREMAP) |
ca5999fd | 1048 | #include <linux/pgtable.h> |
80b0ca98 | 1049 | |
18e780b4 KW |
1050 | /* |
1051 | * Arch code can implement the following two hooks when using GENERIC_IOREMAP | |
1052 | * ioremap_allowed() return a bool, | |
1053 | * - true means continue to remap | |
1054 | * - false means skip remap and return directly | |
1055 | * iounmap_allowed() return a bool, | |
1056 | * - true means continue to vunmap | |
1057 | * - false means skip vunmap and return directly | |
1058 | */ | |
1059 | #ifndef ioremap_allowed | |
1060 | #define ioremap_allowed ioremap_allowed | |
1061 | static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, | |
1062 | unsigned long prot) | |
1063 | { | |
1064 | return true; | |
1065 | } | |
1066 | #endif | |
1067 | ||
1068 | #ifndef iounmap_allowed | |
1069 | #define iounmap_allowed iounmap_allowed | |
1070 | static inline bool iounmap_allowed(void *addr) | |
1071 | { | |
1072 | return true; | |
1073 | } | |
1074 | #endif | |
1075 | ||
abc5992b KW |
1076 | void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, |
1077 | unsigned long prot); | |
80b0ca98 CH |
1078 | void iounmap(volatile void __iomem *addr); |
1079 | ||
1080 | static inline void __iomem *ioremap(phys_addr_t addr, size_t size) | |
1081 | { | |
1082 | /* _PAGE_IOREMAP needs to be supplied by the architecture */ | |
1083 | return ioremap_prot(addr, size, _PAGE_IOREMAP); | |
1084 | } | |
1085 | #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ | |
97c9801a | 1086 | |
3f7e212d | 1087 | #ifndef ioremap_wc |
d092a870 | 1088 | #define ioremap_wc ioremap |
3f7e212d AB |
1089 | #endif |
1090 | ||
d838270e | 1091 | #ifndef ioremap_wt |
d092a870 | 1092 | #define ioremap_wt ioremap |
d838270e TK |
1093 | #endif |
1094 | ||
e9713395 CH |
1095 | /* |
1096 | * ioremap_uc is special in that we do require an explicit architecture | |
1097 | * implementation. In general you do not want to use this function in a | |
1098 | * driver and use plain ioremap, which is uncached by default. Similarly | |
1099 | * architectures should not implement it unless they have a very good | |
1100 | * reason. | |
1101 | */ | |
1102 | #ifndef ioremap_uc | |
1103 | #define ioremap_uc ioremap_uc | |
1104 | static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) | |
1105 | { | |
1106 | return NULL; | |
1107 | } | |
ea962928 | 1108 | #endif |
7c566bb5 HM |
1109 | |
1110 | /* | |
1111 | * ioremap_np needs an explicit architecture implementation, as it | |
1112 | * requests stronger semantics than regular ioremap(). Portable drivers | |
1113 | * should instead use one of the higher-level abstractions, like | |
1114 | * devm_ioremap_resource(), to choose the correct variant for any given | |
1115 | * device and bus. Portable drivers with a good reason to want non-posted | |
1116 | * write semantics should always provide an ioremap() fallback in case | |
1117 | * ioremap_np() is not available. | |
1118 | */ | |
1119 | #ifndef ioremap_np | |
1120 | #define ioremap_np ioremap_np | |
1121 | static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) | |
1122 | { | |
1123 | return NULL; | |
1124 | } | |
1125 | #endif | |
1126 | ||
ce816fa8 | 1127 | #ifdef CONFIG_HAS_IOPORT_MAP |
3f7e212d | 1128 | #ifndef CONFIG_GENERIC_IOMAP |
9216efaf TR |
1129 | #ifndef ioport_map |
1130 | #define ioport_map ioport_map | |
3f7e212d AB |
1131 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) |
1132 | { | |
500dd232 AM |
1133 | port &= IO_SPACE_LIMIT; |
1134 | return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; | |
3f7e212d | 1135 | } |
316e8d79 | 1136 | #define ARCH_HAS_GENERIC_IOPORT_MAP |
9216efaf | 1137 | #endif |
3f7e212d | 1138 | |
9216efaf TR |
1139 | #ifndef ioport_unmap |
1140 | #define ioport_unmap ioport_unmap | |
3f7e212d AB |
1141 | static inline void ioport_unmap(void __iomem *p) |
1142 | { | |
1143 | } | |
9216efaf | 1144 | #endif |
3f7e212d AB |
1145 | #else /* CONFIG_GENERIC_IOMAP */ |
1146 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | |
1147 | extern void ioport_unmap(void __iomem *p); | |
1148 | #endif /* CONFIG_GENERIC_IOMAP */ | |
ce816fa8 | 1149 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
3f7e212d | 1150 | |
f5810e5c | 1151 | #ifndef CONFIG_GENERIC_IOMAP |
f5810e5c | 1152 | #ifndef pci_iounmap |
316e8d79 LT |
1153 | #define ARCH_WANTS_GENERIC_PCI_IOUNMAP |
1154 | #endif | |
f5810e5c | 1155 | #endif |
f5810e5c | 1156 | |
576ebd74 | 1157 | #ifndef xlate_dev_mem_ptr |
9216efaf TR |
1158 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr |
1159 | static inline void *xlate_dev_mem_ptr(phys_addr_t addr) | |
1160 | { | |
1161 | return __va(addr); | |
1162 | } | |
1163 | #endif | |
1164 | ||
1165 | #ifndef unxlate_dev_mem_ptr | |
1166 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr | |
1167 | static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) | |
1168 | { | |
1169 | } | |
576ebd74 | 1170 | #endif |
3f7e212d | 1171 | |
cd248341 | 1172 | #ifndef memset_io |
9216efaf | 1173 | #define memset_io memset_io |
c2327da0 AS |
1174 | /** |
1175 | * memset_io Set a range of I/O memory to a constant value | |
1176 | * @addr: The beginning of the I/O-memory range to set | |
1177 | * @val: The value to set the memory to | |
1178 | * @count: The number of bytes to set | |
1179 | * | |
1180 | * Set a range of I/O memory to a given value. | |
1181 | */ | |
9216efaf TR |
1182 | static inline void memset_io(volatile void __iomem *addr, int value, |
1183 | size_t size) | |
1184 | { | |
1185 | memset(__io_virt(addr), value, size); | |
1186 | } | |
cd248341 JG |
1187 | #endif |
1188 | ||
1189 | #ifndef memcpy_fromio | |
9216efaf | 1190 | #define memcpy_fromio memcpy_fromio |
c2327da0 AS |
1191 | /** |
1192 | * memcpy_fromio Copy a block of data from I/O memory | |
1193 | * @dst: The (RAM) destination for the copy | |
1194 | * @src: The (I/O memory) source for the data | |
1195 | * @count: The number of bytes to copy | |
1196 | * | |
1197 | * Copy a block of data from I/O memory. | |
1198 | */ | |
9216efaf TR |
1199 | static inline void memcpy_fromio(void *buffer, |
1200 | const volatile void __iomem *addr, | |
1201 | size_t size) | |
1202 | { | |
1203 | memcpy(buffer, __io_virt(addr), size); | |
1204 | } | |
cd248341 | 1205 | #endif |
9216efaf | 1206 | |
cd248341 | 1207 | #ifndef memcpy_toio |
9216efaf | 1208 | #define memcpy_toio memcpy_toio |
c2327da0 AS |
1209 | /** |
1210 | * memcpy_toio Copy a block of data into I/O memory | |
1211 | * @dst: The (I/O memory) destination for the copy | |
1212 | * @src: The (RAM) source for the data | |
1213 | * @count: The number of bytes to copy | |
1214 | * | |
1215 | * Copy a block of data to I/O memory. | |
1216 | */ | |
9216efaf TR |
1217 | static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, |
1218 | size_t size) | |
1219 | { | |
1220 | memcpy(__io_virt(addr), buffer, size); | |
1221 | } | |
cd248341 | 1222 | #endif |
3f7e212d | 1223 | |
527701ed | 1224 | extern int devmem_is_allowed(unsigned long pfn); |
527701ed | 1225 | |
3f7e212d AB |
1226 | #endif /* __KERNEL__ */ |
1227 | ||
1228 | #endif /* __ASM_GENERIC_IO_H */ |