alpha: Replace smp_read_barrier_depends() usage with smp_[r]mb()
authorWill Deacon <will@kernel.org>
Wed, 30 Oct 2019 17:15:01 +0000 (17:15 +0000)
committerWill Deacon <will@kernel.org>
Tue, 21 Jul 2020 09:50:36 +0000 (10:50 +0100)
In preparation for removing smp_read_barrier_depends() altogether,
move the Alpha code over to using smp_rmb() and smp_mb() directly.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/pgtable.h
mm/memory.c

index 2144530d1428ca618a599ae3488fef7b5e90ed3b..2f8f7e54792f4449e589ea4cb80cbd3cc0ce65e8 100644 (file)
 
 /*
  * To ensure dependency ordering is preserved for the _relaxed and
- * _release atomics, an smp_read_barrier_depends() is unconditionally
- * inserted into the _relaxed variants, which are used to build the
- * barriered versions. Avoid redundant back-to-back fences in the
- * _acquire and _fence versions.
+ * _release atomics, an smp_mb() is unconditionally inserted into the
+ * _relaxed variants, which are used to build the barriered versions.
+ * Avoid redundant back-to-back fences in the _acquire and _fence
+ * versions.
  */
 #define __atomic_acquire_fence()
 #define __atomic_post_full_fence()
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)    \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_read_barrier_depends();                                     \
+       smp_mb();                                                       \
        return result;                                                  \
 }
 
@@ -88,7 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)     \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_read_barrier_depends();                                     \
+       smp_mb();                                                       \
        return result;                                                  \
 }
 
@@ -123,7 +123,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_read_barrier_depends();                                     \
+       smp_mb();                                                       \
        return result;                                                  \
 }
 
@@ -141,7 +141,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)  \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_read_barrier_depends();                                     \
+       smp_mb();                                                       \
        return result;                                                  \
 }
 
index 162c17b2631fb00f26590667d5eb9594996966d7..660b14ce13179d4d5c0e7220490bf38ec735340e 100644 (file)
@@ -277,9 +277,9 @@ extern inline pte_t pte_mkdirty(pte_t pte)  { pte_val(pte) |= __DIRTY_BITS; retur
 extern inline pte_t pte_mkyoung(pte_t pte)     { pte_val(pte) |= __ACCESS_BITS; return pte; }
 
 /*
- * The smp_read_barrier_depends() in the following functions are required to
- * order the load of *dir (the pointer in the top level page table) with any
- * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
+ * The smp_rmb() in the following functions are required to order the load of
+ * *dir (the pointer in the top level page table) with any subsequent load of
+ * the returned pmd_t *ret (ret is data dependent on *dir).
  *
  * If this ordering is not enforced, the CPU might load an older value of
  * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
@@ -293,7 +293,7 @@ extern inline pte_t pte_mkyoung(pte_t pte)  { pte_val(pte) |= __ACCESS_BITS; retu
 extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
 {
        pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
-       smp_read_barrier_depends(); /* see above */
+       smp_rmb(); /* see above */
        return ret;
 }
 #define pmd_offset pmd_offset
@@ -303,7 +303,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
 {
        pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
                + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
-       smp_read_barrier_depends(); /* see above */
+       smp_rmb(); /* see above */
        return ret;
 }
 #define pte_offset_kernel pte_offset_kernel
index 87ec87cdc1ff1a096243fd7c9beec1c431dbd556..e1f2c730d8bb60d3acef1d170f935f69fc507ccb 100644 (file)
@@ -437,7 +437,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
         * of a chain of data-dependent loads, meaning most CPUs (alpha
         * being the notable exception) will already guarantee loads are
         * seen in-order. See the alpha page table accessors for the
-        * smp_read_barrier_depends() barriers in page table walking code.
+        * smp_rmb() barriers in page table walking code.
         */
        smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */