From: Michael Ellerman Date: Wed, 18 Dec 2024 10:55:06 +0000 (+1100) Subject: powerpc/io: Use standard barrier macros in io.c X-Git-Tag: io_uring-6.15-20250403~55^2~17 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=1b52e091e7f1422a5b285c68606c307cfdf2b674;p=linux-block.git powerpc/io: Use standard barrier macros in io.c io.c uses open-coded barriers. Update it to use the equivalent but in macro form. Signed-off-by: Michael Ellerman Signed-off-by: Madhavan Srinivasan Link: https://patch.msgid.link/20241218105523.416573-18-mpe@ellerman.id.au --- diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 6e7b49a676d9..bcc201c01514 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c @@ -31,13 +31,14 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u8 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } EXPORT_SYMBOL(_insb); @@ -47,11 +48,12 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u8 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } EXPORT_SYMBOL(_outsb); @@ -62,13 +64,14 @@ void _insw(const volatile u16 __iomem *port, void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u16 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } EXPORT_SYMBOL(_insw); @@ -78,11 +81,12 @@ void _outsw(volatile u16 __iomem *port, const void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u16 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } EXPORT_SYMBOL(_outsw); @@ -93,13 +97,14 @@ void _insl(const volatile u32 __iomem *port, void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u32 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } EXPORT_SYMBOL(_insl); @@ -109,11 +114,12 @@ void _outsl(volatile u32 __iomem *port, const void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u32 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } EXPORT_SYMBOL(_outsl); @@ -127,7 +133,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n) lc |= lc << 8; lc |= lc << 16; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && !IO_CHECK_ALIGN(p, 4)) { *((volatile u8 *)p) = c; p++; @@ -143,7 +149,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n) p++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memset_io); @@ -152,7 +158,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, { void *vsrc = (void __force *) src; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { *((u8 *)dest) = *((volatile u8 *)vsrc); eieio(); @@ -174,7 +180,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, dest++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memcpy_fromio); @@ -182,7 +188,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) { void *vdest = (void __force *) dest; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) { *((volatile u8 *)vdest) = *((u8 *)src); src++; @@ -201,6 +207,6 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) vdest++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memcpy_toio);