locking/barriers: Convert users of lockless_dereference() to READ_ONCE()
authorWill Deacon <will.deacon@arm.com>
Tue, 24 Oct 2017 10:22:48 +0000 (11:22 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 24 Oct 2017 11:17:33 +0000 (13:17 +0200)
READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
can be used instead of lockless_dereference() without any change in
semantics.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
13 files changed:
arch/x86/events/core.c
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/ldt.c
drivers/md/dm-mpath.c
fs/dcache.c
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
include/linux/rculist.h
include/linux/rcupdate.h
kernel/events/core.c
kernel/seccomp.c
kernel/task_work.c
mm/slab.h

index 80534d3c2480013caa8b170c09c0803b7fef55b9..589af1eec7c1cbf0f6e47fb9a28109820fae2ce4 100644 (file)
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
                struct ldt_struct *ldt;
 
                /* IRQs are off, so this synchronizes with smp_store_release */
-               ldt = lockless_dereference(current->active_mm->context.ldt);
+               ldt = READ_ONCE(current->active_mm->context.ldt);
                if (!ldt || idx >= ldt->nr_entries)
                        return 0;
 
index 3c856a15b98e8edda98cc5ba7e40fc67f49be230..efc530642f7d5d88d481fc507a7ff213e4581ec5 100644 (file)
@@ -72,8 +72,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
        struct ldt_struct *ldt;
 
-       /* lockless_dereference synchronizes with smp_store_release */
-       ldt = lockless_dereference(mm->context.ldt);
+       /* READ_ONCE synchronizes with smp_store_release */
+       ldt = READ_ONCE(mm->context.ldt);
 
        /*
         * Any change to mm->context.ldt is followed by an IPI to all
index f0e64db18ac83db5f45e6bb7f531e1536d11522a..0a21390642c4c079805edf3b911c9916f01797e5 100644 (file)
@@ -101,7 +101,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
 static void install_ldt(struct mm_struct *current_mm,
                        struct ldt_struct *ldt)
 {
-       /* Synchronizes with lockless_dereference in load_mm_ldt. */
+       /* Synchronizes with READ_ONCE in load_mm_ldt. */
        smp_store_release(&current_mm->context.ldt, ldt);
 
        /* Activate the LDT for all CPUs using current_mm. */
index 11f273d2f018e722b1e9480c5a5eb0e59b0f0048..3f88c9d32f7ecc9a66613d6eb0a3df26cf1dc788 100644 (file)
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
 
        pgpath = path_to_pgpath(path);
 
-       if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+       if (unlikely(READ_ONCE(m->current_pg) != pg)) {
                /* Only update current_pgpath if pg changed */
                spin_lock_irqsave(&m->lock, flags);
                m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        }
 
        /* Were we instructed to switch PG? */
-       if (lockless_dereference(m->next_pg)) {
+       if (READ_ONCE(m->next_pg)) {
                spin_lock_irqsave(&m->lock, flags);
                pg = m->next_pg;
                if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 
        /* Don't change PG until it has no remaining paths */
 check_current_pg:
-       pg = lockless_dereference(m->current_pg);
+       pg = READ_ONCE(m->current_pg);
        if (pg) {
                pgpath = choose_path_in_pg(m, pg, nr_bytes);
                if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
        struct request *clone;
 
        /* Do we need to select a new pgpath? */
-       pgpath = lockless_dereference(m->current_pgpath);
+       pgpath = READ_ONCE(m->current_pgpath);
        if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
                pgpath = choose_pgpath(m, nr_bytes);
 
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
        bool queue_io;
 
        /* Do we need to select a new pgpath? */
-       pgpath = lockless_dereference(m->current_pgpath);
+       pgpath = READ_ONCE(m->current_pgpath);
        queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
        if (!pgpath || !queue_io)
                pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
        struct pgpath *current_pgpath;
        int r;
 
-       current_pgpath = lockless_dereference(m->current_pgpath);
+       current_pgpath = READ_ONCE(m->current_pgpath);
        if (!current_pgpath)
                current_pgpath = choose_pgpath(m, 0);
 
@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
        }
 
        if (r == -ENOTCONN) {
-               if (!lockless_dereference(m->current_pg)) {
+               if (!READ_ONCE(m->current_pg)) {
                        /* Path status changed, redo selection */
                        (void) choose_pgpath(m, 0);
                }
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
                return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
 
        /* Guess which priority_group will be used at next mapping time */
-       pg = lockless_dereference(m->current_pg);
-       next_pg = lockless_dereference(m->next_pg);
-       if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+       pg = READ_ONCE(m->current_pg);
+       next_pg = READ_ONCE(m->next_pg);
+       if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
                pg = next_pg;
 
        if (!pg) {
index f90141387f01ea4ed61bae215e137042527182c5..34c852af215c0f1ff2d73800ed0bc02c9a2f0794 100644 (file)
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
 {
        /*
         * Be careful about RCU walk racing with rename:
-        * use 'lockless_dereference' to fetch the name pointer.
+        * use 'READ_ONCE' to fetch the name pointer.
         *
         * NOTE! Even if a rename will mean that the length
         * was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
         * early because the data cannot match (there can
         * be no NUL in the ct/tcount data)
         */
-       const unsigned char *cs = lockless_dereference(dentry->d_name.name);
+       const unsigned char *cs = READ_ONCE(dentry->d_name.name);
 
        return dentry_string_cmp(cs, ct, tcount);
 }
index 25d9b5adcd429071537c5edf3185b58a6ab9cd60..36b49bd09264a5bd92df902a1de7d10771d4b770 100644 (file)
@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
 
 static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
 {
-       return lockless_dereference(oi->__upperdentry);
+       return READ_ONCE(oi->__upperdentry);
 }
index 0f85ee9c3268adb320dcc6c4d37e88ae3ea92013..c67a7703296b924b04e42e2ae1240f9a2cb055f6 100644 (file)
@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
                struct inode *inode = file_inode(file);
 
-               realfile = lockless_dereference(od->upperfile);
+               realfile = READ_ONCE(od->upperfile);
                if (!realfile) {
                        struct path upperpath;
 
index 2bea1d5e99302bd1b440d595f9df7ab30531e228..5ed091c064b21850c139557c8e5e2662354e3791 100644 (file)
@@ -274,7 +274,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  */
 #define list_entry_rcu(ptr, type, member) \
-       container_of(lockless_dereference(ptr), type, member)
+       container_of(READ_ONCE(ptr), type, member)
 
 /*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -367,7 +367,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * example is when items are added to the list, but never deleted.
  */
 #define list_entry_lockless(ptr, type, member) \
-       container_of((typeof(ptr))lockless_dereference(ptr), type, member)
+       container_of((typeof(ptr))READ_ONCE(ptr), type, member)
 
 /**
  * list_for_each_entry_lockless - iterate over rcu list of given type
index 1a9f70d44af954ffe790dcb75872704beab152b2..a6ddc42f87a5786d08dda84f023f1ed879a74f22 100644 (file)
@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
 #define __rcu_dereference_check(p, c, space) \
 ({ \
        /* Dependency order vs. p above. */ \
-       typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
+       typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
        RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
        rcu_dereference_sparse(p, space); \
        ((typeof(*p) __force __kernel *)(________p1)); \
@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
 #define rcu_dereference_raw(p) \
 ({ \
        /* Dependency order vs. p above. */ \
-       typeof(p) ________p1 = lockless_dereference(p); \
+       typeof(p) ________p1 = READ_ONCE(p); \
        ((typeof(*p) __force __kernel *)(________p1)); \
 })
 
index 9d93db81fa36e683724a50762b1afd0b4057bb7b..824a583079a15a4d7cc6709ca3a90d68f06c7400 100644 (file)
@@ -4231,7 +4231,7 @@ static void perf_remove_from_owner(struct perf_event *event)
         * indeed free this event, otherwise we need to serialize on
         * owner->perf_event_mutex.
         */
-       owner = lockless_dereference(event->owner);
+       owner = READ_ONCE(event->owner);
        if (owner) {
                /*
                 * Since delayed_put_task_struct() also drops the last
@@ -4328,7 +4328,7 @@ again:
                 * Cannot change, child events are not migrated, see the
                 * comment with perf_event_ctx_lock_nested().
                 */
-               ctx = lockless_dereference(child->ctx);
+               ctx = READ_ONCE(child->ctx);
                /*
                 * Since child_mutex nests inside ctx::mutex, we must jump
                 * through hoops. We start by grabbing a reference on the ctx.
index 0ae832e13b974041002c508f03a7c9cc8d6d7a6d..8ac79355915b3d544d3ef29ff96dd6b258d597ea 100644 (file)
@@ -189,7 +189,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
        u32 ret = SECCOMP_RET_ALLOW;
        /* Make sure cross-thread synced filter points somewhere sane. */
        struct seccomp_filter *f =
-                       lockless_dereference(current->seccomp.filter);
+                       READ_ONCE(current->seccomp.filter);
 
        /* Ensure unexpected behavior doesn't result in failing open. */
        if (unlikely(WARN_ON(f == NULL)))
index 836a72a66fba14ec1b66f941c50f81ea58444b3a..9a9f262fc53d097b391fc53c2f1abd46817d8fe8 100644 (file)
@@ -67,7 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
         * we raced with task_work_run(), *pprev == NULL/exited.
         */
        raw_spin_lock_irqsave(&task->pi_lock, flags);
-       while ((work = lockless_dereference(*pprev))) {
+       while ((work = READ_ONCE(*pprev))) {
                if (work->func != func)
                        pprev = &work->next;
                else if (cmpxchg(pprev, work, work->next) == work)
index 073362816acc8dd4914e610e6a56e2fdb3295f2a..8894f811a89dedf1df25852592bf97340d59c9f5 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -258,7 +258,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
         * memcg_caches issues a write barrier to match this (see
         * memcg_create_kmem_cache()).
         */
-       cachep = lockless_dereference(arr->entries[idx]);
+       cachep = READ_ONCE(arr->entries[idx]);
        rcu_read_unlock();
 
        return cachep;