include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only
[linux-2.6-block.git] / include / asm-x86 / io_64.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_IO_H
2#define _ASM_IO_H
3
1da177e4
LT
4
5/*
6 * This file contains the definitions for the x86 IO instructions
7 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9 * versions of the single-IO instructions (inb_p/inw_p/..).
10 *
11 * This file is not meant to be obfuscating: it's just complicated
12 * to (a) handle it all in a way that makes gcc able to optimize it
13 * as well as possible and (b) trying to avoid writing the same thing
14 * over and over again with slight variations and possibly making a
15 * mistake somewhere.
16 */
17
18/*
19 * Thanks to James van Artsdalen for a better timing-fix than
20 * the two short jumps: using outb's to a nonexistent port seems
21 * to guarantee better timings even on fast machines.
22 *
23 * On the other hand, I'd like to be sure of a non-existent port:
24 * I feel a bit unsafe about using 0x80 (should be safe, though)
25 *
26 * Linus
27 */
28
29 /*
30 * Bit simplified and optimized by Jan Hubicka
31 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 *
33 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34 * isa_read[wl] and isa_write[wl] fixed
35 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
36 */
37
b02aae9c 38extern void native_io_delay(void);
1da177e4 39
6e7c4025
IM
40extern int io_delay_type;
41extern void io_delay_init(void);
42
ba082427
GOC
43#if defined(CONFIG_PARAVIRT)
44#include <asm/paravirt.h>
45#else
46
b02aae9c
RH
47static inline void slow_down_io(void)
48{
49 native_io_delay();
1da177e4 50#ifdef REALLY_SLOW_IO
b02aae9c
RH
51 native_io_delay();
52 native_io_delay();
53 native_io_delay();
1da177e4 54#endif
b02aae9c 55}
ba082427 56#endif
1da177e4
LT
57
58/*
59 * Talk about misusing macros..
60 */
61#define __OUT1(s,x) \
9c0aa0f9 62static inline void out##s(unsigned x value, unsigned short port) {
1da177e4
LT
63
64#define __OUT2(s,s1,s2) \
ba082427
GOC
65__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
66
67#ifndef REALLY_SLOW_IO
68#define REALLY_SLOW_IO
69#define UNSET_REALLY_SLOW_IO
70#endif
1da177e4
LT
71
72#define __OUT(s,s1,x) \
ba082427
GOC
73__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
74__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
75 slow_down_io(); }
1da177e4
LT
76
77#define __IN1(s) \
9c0aa0f9 78static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
1da177e4
LT
79
80#define __IN2(s,s1,s2) \
ba082427
GOC
81__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
82
83#define __IN(s,s1,i...) \
84__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \
85__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
86 slow_down_io(); return _v; }
1da177e4 87
ba082427
GOC
88#ifdef UNSET_REALLY_SLOW_IO
89#undef REALLY_SLOW_IO
90#endif
1da177e4
LT
91
92#define __INS(s) \
9c0aa0f9 93static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
1da177e4
LT
94{ __asm__ __volatile__ ("rep ; ins" #s \
95: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
96
97#define __OUTS(s) \
9c0aa0f9 98static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
1da177e4
LT
99{ __asm__ __volatile__ ("rep ; outs" #s \
100: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
101
102#define RETURN_TYPE unsigned char
103__IN(b,"")
104#undef RETURN_TYPE
105#define RETURN_TYPE unsigned short
106__IN(w,"")
107#undef RETURN_TYPE
108#define RETURN_TYPE unsigned int
109__IN(l,"")
110#undef RETURN_TYPE
111
112__OUT(b,"b",char)
113__OUT(w,"w",short)
114__OUT(l,,int)
115
116__INS(b)
117__INS(w)
118__INS(l)
119
120__OUTS(b)
121__OUTS(w)
122__OUTS(l)
123
124#define IO_SPACE_LIMIT 0xffff
125
b0957f1a 126#if defined(__KERNEL__) && defined(__x86_64__)
1da177e4
LT
127
128#include <linux/vmalloc.h>
129
130#ifndef __i386__
131/*
132 * Change virtual addresses to physical addresses and vv.
133 * These are pretty trivial
134 */
9c0aa0f9 135static inline unsigned long virt_to_phys(volatile void * address)
1da177e4
LT
136{
137 return __pa(address);
138}
139
9c0aa0f9 140static inline void * phys_to_virt(unsigned long address)
1da177e4
LT
141{
142 return __va(address);
143}
144#endif
145
146/*
147 * Change "struct page" to physical address.
148 */
1da177e4 149#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
1da177e4
LT
150
151#include <asm-generic/iomap.h>
152
f2d3efed
AK
153extern void *early_ioremap(unsigned long addr, unsigned long size);
154extern void early_iounmap(void *addr, unsigned long size);
155
1da177e4
LT
156/*
157 * This one maps high address device memory and turns off caching for that area.
158 * it's useful if some control registers are in such an area and write combining
159 * or read caching is not desirable:
160 */
b9e76a00
LT
161extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
162extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
6371b495
IM
163
164/*
165 * The default ioremap() behavior is non-cached:
166 */
b9e76a00 167static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
6371b495 168{
9af993a9 169 return ioremap_nocache(offset, size);
6371b495
IM
170}
171
1da177e4 172extern void iounmap(volatile void __iomem *addr);
6371b495 173
18a8bd94 174extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
1da177e4
LT
175
176/*
177 * ISA I/O bus memory addresses are 1:1 with the physical address.
178 */
179#define isa_virt_to_bus virt_to_phys
180#define isa_page_to_bus page_to_phys
181#define isa_bus_to_virt phys_to_virt
182
183/*
184 * However PCI ones are not necessarily 1:1 and therefore these interfaces
185 * are forbidden in portable PCI drivers.
186 *
187 * Allow them on x86 for legacy drivers, though.
188 */
189#define virt_to_bus virt_to_phys
190#define bus_to_virt phys_to_virt
191
192/*
193 * readX/writeX() are used to access memory mapped devices. On some
194 * architectures the memory mapped IO stuff needs to be accessed
195 * differently. On the x86 architecture, we just read/write the
196 * memory location directly.
197 */
198
199static inline __u8 __readb(const volatile void __iomem *addr)
200{
201 return *(__force volatile __u8 *)addr;
202}
203static inline __u16 __readw(const volatile void __iomem *addr)
204{
205 return *(__force volatile __u16 *)addr;
206}
cde227af 207static __always_inline __u32 __readl(const volatile void __iomem *addr)
1da177e4
LT
208{
209 return *(__force volatile __u32 *)addr;
210}
211static inline __u64 __readq(const volatile void __iomem *addr)
212{
213 return *(__force volatile __u64 *)addr;
214}
215#define readb(x) __readb(x)
216#define readw(x) __readw(x)
217#define readl(x) __readl(x)
218#define readq(x) __readq(x)
219#define readb_relaxed(a) readb(a)
220#define readw_relaxed(a) readw(a)
221#define readl_relaxed(a) readl(a)
222#define readq_relaxed(a) readq(a)
223#define __raw_readb readb
224#define __raw_readw readw
225#define __raw_readl readl
226#define __raw_readq readq
227
228#define mmiowb()
229
1da177e4
LT
230static inline void __writel(__u32 b, volatile void __iomem *addr)
231{
232 *(__force volatile __u32 *)addr = b;
233}
234static inline void __writeq(__u64 b, volatile void __iomem *addr)
235{
236 *(__force volatile __u64 *)addr = b;
237}
1da177e4
LT
238static inline void __writeb(__u8 b, volatile void __iomem *addr)
239{
240 *(__force volatile __u8 *)addr = b;
241}
242static inline void __writew(__u16 b, volatile void __iomem *addr)
243{
244 *(__force volatile __u16 *)addr = b;
245}
246#define writeq(val,addr) __writeq((val),(addr))
247#define writel(val,addr) __writel((val),(addr))
248#define writew(val,addr) __writew((val),(addr))
249#define writeb(val,addr) __writeb((val),(addr))
250#define __raw_writeb writeb
251#define __raw_writew writew
252#define __raw_writel writel
253#define __raw_writeq writeq
254
255void __memcpy_fromio(void*,unsigned long,unsigned);
256void __memcpy_toio(unsigned long,const void*,unsigned);
257
258static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
259{
260 __memcpy_fromio(to,(unsigned long)from,len);
261}
262static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
263{
264 __memcpy_toio((unsigned long)to,from,len);
265}
266
267void memset_io(volatile void __iomem *a, int b, size_t c);
268
269/*
270 * ISA space is 'always mapped' on a typical x86 system, no need to
271 * explicitly ioremap() it. The fact that the ISA IO space is mapped
272 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
273 * are physical addresses. The following constant pointer can be
274 * used as the IO-area pointer (it can be iounmapped as well, so the
275 * analogy with PCI is quite large):
276 */
277#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
278
1da177e4
LT
279#define flush_write_buffers()
280
281extern int iommu_bio_merge;
282#define BIO_VMERGE_BOUNDARY iommu_bio_merge
283
284/*
285 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
286 * access
287 */
288#define xlate_dev_mem_ptr(p) __va(p)
289
290/*
291 * Convert a virtual cached pointer to an uncached pointer
292 */
293#define xlate_dev_kmem_ptr(p) p
294
295#endif /* __KERNEL__ */
296
297#endif