mm: delete checks for xor_unlock_is_negative_byte()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 4 Oct 2023 16:53:14 +0000 (17:53 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 18 Oct 2023 21:34:17 +0000 (14:34 -0700)
Architectures which don't define their own use the one in
asm-generic/bitops/lock.h.  Get rid of all the ifdefs around "maybe we
don't have it".

Link: https://lkml.kernel.org/r/20231004165317.1061855-15-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/alpha/include/asm/bitops.h
arch/m68k/include/asm/bitops.h
arch/mips/include/asm/bitops.h
arch/riscv/include/asm/bitops.h
include/asm-generic/bitops/instrumented-lock.h
include/asm-generic/bitops/lock.h
kernel/kcsan/kcsan_test.c
kernel/kcsan/selftest.c
mm/filemap.c
mm/kasan/kasan_test.c

index b50ad6b83e85486d26305bd7b6a34dfaa041bd3d..3e33621922c31bea419e27ed763bb7fba00c95ac 100644 (file)
@@ -305,7 +305,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
 
        return (old & BIT(7)) != 0;
 }
-#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
 
 /*
  * ffz = Find First Zero in word. Undefined if no zero exists,
index 80ee3609590556a9f43b80f226a719736e656f41..14c64a6f1217624cb68c86f7faee2ac9e5a54b4b 100644 (file)
@@ -339,7 +339,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
        return result;
 #endif
 }
-#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
 
 /*
  *     The true 68020 and more advanced processors support the "bfffo"
index d98a05c478f427614bbd3afad6fb9bc0db5c93d4..89f73d1a4ea4e71191ec34221ac22fe5b4cbdc4a 100644 (file)
@@ -301,7 +301,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
 
        return res;
 }
-#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
 
 #undef __bit_op
 #undef __test_bit_op
index 15e3044298a2723ba1578d6ef68b1d17e1acf9d9..65f6eee4ab8d7751d412c04b9a3f2d6ec078858e 100644 (file)
@@ -202,7 +202,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
                : "memory");
        return (res & BIT(7)) != 0;
 }
-#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
 
 #undef __test_and_op_bit
 #undef __op_bit
index e8ea3aeda9a9ae232ba32acd1d140cb7bbf23205..542d3727ee4e3aaebb8e0742588c684ad071abf3 100644 (file)
@@ -58,7 +58,6 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
        return arch_test_and_set_bit_lock(nr, addr);
 }
 
-#if defined(arch_xor_unlock_is_negative_byte)
 /**
  * xor_unlock_is_negative_byte - XOR a single byte in memory and test if
  * it is negative, for unlock.
@@ -80,8 +79,4 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
        instrument_atomic_write(addr, sizeof(long));
        return arch_xor_unlock_is_negative_byte(mask, addr);
 }
-/* Let everybody know we have it. */
-#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
-#endif
-
 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
index 6a638e89d130da2a67c2762880cc89f008397939..14d4ec8c5152d63da4062c146018a2022ccf0db0 100644 (file)
@@ -75,7 +75,6 @@ static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
        old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
        return !!(old & BIT(7));
 }
-#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
 #endif
 
 #include <asm-generic/bitops/instrumented-lock.h>
index 1333d23ac4efe2dc6cf29af954ec830c1a848198..015586217875319dec892e73ff13832600ff5646 100644 (file)
@@ -699,12 +699,9 @@ static void test_barrier_nothreads(struct kunit *test)
        KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
        KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
        KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
-
-#ifdef xor_unlock_is_negative_byte
        KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
        KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
        KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
-#endif
        kcsan_nestable_atomic_end();
 }
 
index 619be7417420f6a428f3092ad11003df7fedc032..84a1200271affaa0c5152c168d1990c04fef86d0 100644 (file)
@@ -227,12 +227,9 @@ static bool __init test_barrier(void)
        KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock));
        spin_lock(&test_spinlock);
        KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
-
-#ifdef xor_unlock_is_negative_byte
        KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
        KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
        KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
-#endif
        kcsan_nestable_atomic_end();
 
        return ret;
index c637863f4643c154b98c57a97a2b4c650ce792f3..458377e9a1843c0f06401d73553c5ecc70f2f94c 100644 (file)
@@ -1482,34 +1482,6 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
 }
 EXPORT_SYMBOL_GPL(folio_add_wait_queue);
 
-#ifdef xor_unlock_is_negative_byte
-#define clear_bit_unlock_is_negative_byte(nr, p)       \
-       xor_unlock_is_negative_byte(1 << nr, p)
-#endif
-
-#ifndef clear_bit_unlock_is_negative_byte
-
-/*
- * PG_waiters is the high bit in the same byte as PG_lock.
- *
- * On x86 (and on many other architectures), we can clear PG_lock and
- * test the sign bit at the same time. But if the architecture does
- * not support that special operation, we just do this all by hand
- * instead.
- *
- * The read of PG_waiters has to be after (or concurrently with) PG_locked
- * being cleared, but a memory barrier should be unnecessary since it is
- * in the same byte as PG_locked.
- */
-static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
-{
-       clear_bit_unlock(nr, mem);
-       /* smp_mb__after_atomic(); */
-       return test_bit(PG_waiters, mem);
-}
-
-#endif
-
 /**
  * folio_unlock - Unlock a locked folio.
  * @folio: The folio.
@@ -1525,7 +1497,7 @@ void folio_unlock(struct folio *folio)
        BUILD_BUG_ON(PG_waiters != 7);
        BUILD_BUG_ON(PG_locked > 7);
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
-       if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
+       if (xor_unlock_is_negative_byte(1 << PG_locked, folio_flags(folio, 0)))
                folio_wake_bit(folio, PG_locked);
 }
 EXPORT_SYMBOL(folio_unlock);
index 821c9ea0473a3fa2f8a42fc57c801c3188066d75..8281eb42464be15af4188c115f3878f4ebeea374 100644 (file)
@@ -1098,12 +1098,9 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
        KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
        KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
-
-#if defined(xor_unlock_is_negative_byte)
        if (nr < 7)
                KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
                                xor_unlock_is_negative_byte(1 << nr, addr));
-#endif
 }
 
 static void kasan_bitops_generic(struct kunit *test)