2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
15 #define ARCH_HAS_IOREMAP_WC
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/irqflags.h>
22 #include <asm/addrspace.h>
23 #include <asm/barrier.h>
25 #include <asm/byteorder.h>
27 #include <asm/cpu-features.h>
28 #include <asm-generic/iomap.h>
30 #include <asm/pgtable-bits.h>
31 #include <asm/processor.h>
32 #include <asm/string.h>
35 #include <mangle-port.h>
38 * Raw operations are never swapped in software. OTOH values that raw
39 * operations are working on may or may not have been swapped by the bus
40 * hardware. An example use would be for flash memory that's used for
43 # define __raw_ioswabb(a, x) (x)
44 # define __raw_ioswabw(a, x) (x)
45 # define __raw_ioswabl(a, x) (x)
46 # define __raw_ioswabq(a, x) (x)
47 # define ____raw_ioswabq(a, x) (x)
49 # define __relaxed_ioswabb ioswabb
50 # define __relaxed_ioswabw ioswabw
51 # define __relaxed_ioswabl ioswabl
52 # define __relaxed_ioswabq ioswabq
54 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
56 #define IO_SPACE_LIMIT 0xffff
59 * On MIPS I/O ports are memory mapped, so we access them using normal
60 * load/store instructions. mips_io_port_base is the virtual address to
61 * which all ports are being mapped. For sake of efficiency some code
62 * assumes that this is an address that can be loaded with a single lui
63 * instruction, so the lower 16 bits must be zero. Should be true on
64 * on any sane architecture; generic code does not use this assumption.
66 extern const unsigned long mips_io_port_base;
69 * Gcc will generate code to load the value of mips_io_port_base after each
70 * function call which may be fairly wasteful in some cases. So we don't
71 * play quite by the book. We tell gcc mips_io_port_base is a long variable
72 * which solves the code generation issue. Now we need to violate the
73 * aliasing rules a little to make initialization possible and finally we
74 * will need the barrier() to fight side effects of the aliasing chat.
75 * This trickery will eventually collapse under gcc's optimizer. Oh well.
77 static inline void set_io_port_base(unsigned long base)
79 * (unsigned long *) &mips_io_port_base = base;
84 * Provide the necessary definitions for generic iomap. We make use of
85 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
89 #define HAVE_ARCH_PIO_SIZE
90 #define PIO_OFFSET mips_io_port_base
91 #define PIO_MASK IO_SPACE_LIMIT
92 #define PIO_RESERVED 0x0UL
95 * Enforce in-order execution of data I/O. In the MIPS architecture
96 * these are equivalent to corresponding platform-specific memory
97 * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
98 * with sync additionally defined.
100 #define iobarrier_rw() mb()
101 #define iobarrier_r() rmb()
102 #define iobarrier_w() wmb()
103 #define iobarrier_sync() iob()
106 * virt_to_phys - map virtual addresses to physical
107 * @address: address to remap
109 * The returned physical address is the physical (CPU) mapping for
110 * the memory address given. It is only valid to use this function on
111 * addresses directly mapped or allocated via kmalloc.
113 * This function does not give bus mappings for DMA transfers. In
114 * almost all conceivable cases a device driver should not be using
117 static inline unsigned long virt_to_phys(volatile const void *address)
119 return __pa(address);
123 * phys_to_virt - map physical address to virtual
124 * @address: address to remap
126 * The returned virtual address is a current CPU mapping for
127 * the memory address given. It is only valid to use this function on
128 * addresses that have a kernel mapping
130 * This function does not handle bus mappings for DMA transfers. In
131 * almost all conceivable cases a device driver should not be using
134 static inline void * phys_to_virt(unsigned long address)
136 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
140 * ISA I/O bus memory addresses are 1:1 with the physical address.
142 static inline unsigned long isa_virt_to_bus(volatile void *address)
144 return virt_to_phys(address);
147 static inline void *isa_bus_to_virt(unsigned long address)
149 return phys_to_virt(address);
153 * However PCI ones are not necessarily 1:1 and therefore these interfaces
154 * are forbidden in portable PCI drivers.
156 * Allow them for x86 for legacy drivers, though.
158 #define virt_to_bus virt_to_phys
159 #define bus_to_virt phys_to_virt
162 * Change "struct page" to physical address.
164 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
166 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
167 extern void __iounmap(const volatile void __iomem *addr);
169 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
172 void __iomem *addr = plat_ioremap(offset, size, flags);
177 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
179 if (cpu_has_64bit_addresses) {
180 u64 base = UNCAC_BASE;
183 * R10000 supports a 2 bit uncached attribute therefore
184 * UNCAC_BASE may not equal IO_BASE.
186 if (flags == _CACHE_UNCACHED)
187 base = (u64) IO_BASE;
188 return (void __iomem *) (unsigned long) (base + offset);
189 } else if (__builtin_constant_p(offset) &&
190 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
191 phys_addr_t phys_addr, last_addr;
193 phys_addr = fixup_bigphys_addr(offset, size);
195 /* Don't allow wraparound or zero size. */
196 last_addr = phys_addr + size - 1;
197 if (!size || last_addr < phys_addr)
201 * Map uncached objects in the low 512MB of address
204 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
205 flags == _CACHE_UNCACHED)
206 return (void __iomem *)
207 (unsigned long)CKSEG1ADDR(phys_addr);
210 return __ioremap(offset, size, flags);
216 * ioremap_prot - map bus memory into CPU space
217 * @offset: bus address of the memory
218 * @size: size of the resource to map
220 * ioremap_prot gives the caller control over cache coherency attributes (CCA)
222 static inline void __iomem *ioremap_prot(phys_addr_t offset,
223 unsigned long size, unsigned long prot_val) {
224 return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
228 * ioremap - map bus memory into CPU space
229 * @offset: bus address of the memory
230 * @size: size of the resource to map
232 * ioremap performs a platform specific sequence of operations to
233 * make bus memory CPU accessible via the readb/readw/readl/writeb/
234 * writew/writel functions and the other mmio helpers. The returned
235 * address is not guaranteed to be usable directly as a virtual
238 #define ioremap(offset, size) \
239 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
242 * ioremap_nocache - map bus memory into CPU space
243 * @offset: bus address of the memory
244 * @size: size of the resource to map
246 * ioremap_nocache performs a platform specific sequence of operations to
247 * make bus memory CPU accessible via the readb/readw/readl/writeb/
248 * writew/writel functions and the other mmio helpers. The returned
249 * address is not guaranteed to be usable directly as a virtual
252 * This version of ioremap ensures that the memory is marked uncachable
253 * on the CPU as well as honouring existing caching rules from things like
254 * the PCI bus. Note that there are other caches and buffers on many
255 * busses. In particular driver authors should read up on PCI writes
257 * It's useful if some control registers are in such an area and
258 * write combining or read caching is not desirable:
260 #define ioremap_nocache(offset, size) \
261 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
262 #define ioremap_uc ioremap_nocache
265 * ioremap_cachable - map bus memory into CPU space
266 * @offset: bus address of the memory
267 * @size: size of the resource to map
269 * ioremap_nocache performs a platform specific sequence of operations to
270 * make bus memory CPU accessible via the readb/readw/readl/writeb/
271 * writew/writel functions and the other mmio helpers. The returned
272 * address is not guaranteed to be usable directly as a virtual
275 * This version of ioremap ensures that the memory is marked cachable by
276 * the CPU. Also enables full write-combining. Useful for some
277 * memory-like regions on I/O busses.
279 #define ioremap_cachable(offset, size) \
280 __ioremap_mode((offset), (size), _page_cachable_default)
281 #define ioremap_cache ioremap_cachable
284 * ioremap_wc - map bus memory into CPU space
285 * @offset: bus address of the memory
286 * @size: size of the resource to map
288 * ioremap_wc performs a platform specific sequence of operations to
289 * make bus memory CPU accessible via the readb/readw/readl/writeb/
290 * writew/writel functions and the other mmio helpers. The returned
291 * address is not guaranteed to be usable directly as a virtual
294 * This version of ioremap ensures that the memory is marked uncachable
295 * but accelerated by means of write-combining feature. It is specifically
296 * useful for PCIe prefetchable windows, which may vastly improve a
297 * communications performance. If it was determined on boot stage, what
298 * CPU CCA doesn't support UCA, the method shall fall-back to the
299 * _CACHE_UNCACHED option (see cpu_probe() method).
301 #define ioremap_wc(offset, size) \
302 __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
304 static inline void iounmap(const volatile void __iomem *addr)
306 if (plat_iounmap(addr))
309 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
311 if (cpu_has_64bit_addresses ||
312 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
320 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
321 #define war_io_reorder_wmb() wmb()
323 #define war_io_reorder_wmb() barrier()
326 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
328 static inline void pfx##write##bwlq(type val, \
329 volatile void __iomem *mem) \
331 volatile type *__mem; \
337 war_io_reorder_wmb(); \
339 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
341 __val = pfx##ioswab##bwlq(__mem, val); \
343 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
345 else if (cpu_has_64bits) { \
346 unsigned long __flags; \
350 local_irq_save(__flags); \
351 __asm__ __volatile__( \
352 ".set push" "\t\t# __writeq""\n\t" \
353 ".set arch=r4000" "\n\t" \
354 "dsll32 %L0, %L0, 0" "\n\t" \
355 "dsrl32 %L0, %L0, 0" "\n\t" \
356 "dsll32 %M0, %M0, 0" "\n\t" \
357 "or %L0, %L0, %M0" "\n\t" \
358 "sd %L0, %2" "\n\t" \
361 : "0" (__val), "m" (*__mem)); \
363 local_irq_restore(__flags); \
368 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
370 volatile type *__mem; \
373 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
378 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
380 else if (cpu_has_64bits) { \
381 unsigned long __flags; \
384 local_irq_save(__flags); \
385 __asm__ __volatile__( \
386 ".set push" "\t\t# __readq" "\n\t" \
387 ".set arch=r4000" "\n\t" \
388 "ld %L0, %1" "\n\t" \
389 "dsra32 %M0, %L0, 0" "\n\t" \
390 "sll %L0, %L0, 0" "\n\t" \
395 local_irq_restore(__flags); \
401 /* prevent prefetching of coherent DMA data prematurely */ \
404 return pfx##ioswab##bwlq(__mem, __val); \
407 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
409 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
411 volatile type *__addr; \
417 war_io_reorder_wmb(); \
419 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
421 __val = pfx##ioswab##bwlq(__addr, val); \
423 /* Really, we want this to be atomic */ \
424 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
429 static inline type pfx##in##bwlq##p(unsigned long port) \
431 volatile type *__addr; \
434 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
436 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
443 /* prevent prefetching of coherent DMA data prematurely */ \
446 return pfx##ioswab##bwlq(__addr, __val); \
449 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
451 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
453 #define BUILDIO_MEM(bwlq, type) \
455 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
456 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
457 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
458 __BUILD_MEMORY_PFX(, bwlq, type, 0)
466 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
467 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
470 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
471 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
472 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
474 #define BUILDIO_IOPORT(bwlq, type) \
475 __BUILD_IOPORT_PFX(, bwlq, type) \
476 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
478 BUILDIO_IOPORT(b, u8)
479 BUILDIO_IOPORT(w, u16)
480 BUILDIO_IOPORT(l, u32)
482 BUILDIO_IOPORT(q, u64)
485 #define __BUILDIO(bwlq, type) \
487 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
491 #define readb_relaxed __relaxed_readb
492 #define readw_relaxed __relaxed_readw
493 #define readl_relaxed __relaxed_readl
495 #define readq_relaxed __relaxed_readq
498 #define writeb_relaxed __relaxed_writeb
499 #define writew_relaxed __relaxed_writew
500 #define writel_relaxed __relaxed_writel
502 #define writeq_relaxed __relaxed_writeq
505 #define readb_be(addr) \
506 __raw_readb((__force unsigned *)(addr))
507 #define readw_be(addr) \
508 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
509 #define readl_be(addr) \
510 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
511 #define readq_be(addr) \
512 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
514 #define writeb_be(val, addr) \
515 __raw_writeb((val), (__force unsigned *)(addr))
516 #define writew_be(val, addr) \
517 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
518 #define writel_be(val, addr) \
519 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
520 #define writeq_be(val, addr) \
521 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
524 * Some code tests for these symbols
528 #define writeq writeq
531 #define __BUILD_MEMORY_STRING(bwlq, type) \
533 static inline void writes##bwlq(volatile void __iomem *mem, \
534 const void *addr, unsigned int count) \
536 const volatile type *__addr = addr; \
539 __mem_write##bwlq(*__addr, mem); \
544 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
545 unsigned int count) \
547 volatile type *__addr = addr; \
550 *__addr = __mem_read##bwlq(mem); \
555 #define __BUILD_IOPORT_STRING(bwlq, type) \
557 static inline void outs##bwlq(unsigned long port, const void *addr, \
558 unsigned int count) \
560 const volatile type *__addr = addr; \
563 __mem_out##bwlq(*__addr, port); \
568 static inline void ins##bwlq(unsigned long port, void *addr, \
569 unsigned int count) \
571 volatile type *__addr = addr; \
574 *__addr = __mem_in##bwlq(port); \
579 #define BUILDSTRING(bwlq, type) \
581 __BUILD_MEMORY_STRING(bwlq, type) \
582 __BUILD_IOPORT_STRING(bwlq, type)
591 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
593 memset((void __force *) addr, val, count);
595 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
597 memcpy(dst, (void __force *) src, count);
599 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
601 memcpy((void __force *) dst, src, count);
605 * The caches on some architectures aren't dma-coherent and have need to
606 * handle this in software. There are three types of operations that
607 * can be applied to dma buffers.
609 * - dma_cache_wback_inv(start, size) makes caches and coherent by
610 * writing the content of the caches back to memory, if necessary.
611 * The function also invalidates the affected part of the caches as
612 * necessary before DMA transfers from outside to memory.
613 * - dma_cache_wback(start, size) makes caches and coherent by
614 * writing the content of the caches back to memory, if necessary.
615 * The function also invalidates the affected part of the caches as
616 * necessary before DMA transfers from outside to memory.
617 * - dma_cache_inv(start, size) invalidates the affected parts of the
618 * caches. Dirty lines of the caches may be written back or simply
619 * be discarded. This operation is necessary before dma operations
622 * This API used to be exported; it now is for arch code internal use only.
624 #ifdef CONFIG_DMA_NONCOHERENT
626 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
627 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
628 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
630 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
631 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
632 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
634 #else /* Sane hardware */
636 #define dma_cache_wback_inv(start,size) \
637 do { (void) (start); (void) (size); } while (0)
638 #define dma_cache_wback(start,size) \
639 do { (void) (start); (void) (size); } while (0)
640 #define dma_cache_inv(start,size) \
641 do { (void) (start); (void) (size); } while (0)
643 #endif /* CONFIG_DMA_NONCOHERENT */
646 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
647 * Avoid interrupt mucking, just adjust the address for 4-byte access.
648 * Assume the addresses are 8-byte aligned.
651 #define __CSR_32_ADJUST 4
653 #define __CSR_32_ADJUST 0
656 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
657 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
660 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
663 #define xlate_dev_mem_ptr(p) __va(p)
666 * Convert a virtual cached pointer to an uncached pointer
668 #define xlate_dev_kmem_ptr(p) p
670 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
672 #endif /* _ASM_IO_H */