lazy tlb: introduce lazy tlb mm refcount helper functions
authorNicholas Piggin <npiggin@gmail.com>
Fri, 3 Feb 2023 07:18:34 +0000 (17:18 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:20:08 +0000 (16:20 -0700)
Add explicit _lazy_tlb annotated functions for lazy tlb mm refcounting.
This makes the lazy tlb mm references more obvious, and allows the
refcounting scheme to be modified in later changes.  There is no
functional change with this patch.

Link: https://lkml.kernel.org/r/20230203071837.1136453-3-npiggin@gmail.com
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm/mach-rpc/ecard.c
arch/powerpc/kernel/smp.c
arch/powerpc/mm/book3s64/radix_tlb.c
fs/exec.c
include/linux/sched/mm.h
kernel/cpu.c
kernel/exit.c
kernel/kthread.c
kernel/sched/core.c

index 53813f9464a2426494b844ba545f18fb89862863..c30df1097c524b4c701834e093775662802d9341 100644 (file)
@@ -253,7 +253,7 @@ static int ecard_init_mm(void)
        current->mm = mm;
        current->active_mm = mm;
        activate_mm(active_mm, mm);
-       mmdrop(active_mm);
+       mmdrop_lazy_tlb(active_mm);
        ecard_init_pgtables(mm);
        return 0;
 }
index 6b90f10a6c819b2cf95accf64950e23ce92cc538..7db6b3faea657e54a1fbf6f55afb7140ce574c54 100644 (file)
@@ -1611,7 +1611,7 @@ void start_secondary(void *unused)
        if (IS_ENABLED(CONFIG_PPC32))
                setup_kup();
 
-       mmgrab(&init_mm);
+       mmgrab_lazy_tlb(&init_mm);
        current->active_mm = &init_mm;
 
        smp_store_cpu_info(cpu);
index e50bc5fc7ddff8084756651b3db4d0162dd85393..ce804b7bf84e42c131634e7639c93096f4acfe9d 100644 (file)
@@ -797,10 +797,10 @@ void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
        if (current->active_mm == mm) {
                WARN_ON_ONCE(current->mm != NULL);
                /* Is a kernel thread and is using mm as the lazy tlb */
-               mmgrab(&init_mm);
+               mmgrab_lazy_tlb(&init_mm);
                current->active_mm = &init_mm;
                switch_mm_irqs_off(mm, &init_mm, current);
-               mmdrop(mm);
+               mmdrop_lazy_tlb(mm);
        }
 
        /*
index 7c44d0c65b1b4c7bcb91905110afc330d52a2bc8..87cf3a2f0e9a1bdb2bd1eb0ae9caf67cddcbd291 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1034,7 +1034,7 @@ static int exec_mmap(struct mm_struct *mm)
                mmput(old_mm);
                return 0;
        }
-       mmdrop(active_mm);
+       mmdrop_lazy_tlb(active_mm);
        return 0;
 }
 
index 2a243616f222d85b12f2a799d605c90ccea40fbd..5376caf6fcf3adf1d32156970affd2d4d9c195c6 100644 (file)
@@ -79,6 +79,22 @@ static inline void mmdrop_sched(struct mm_struct *mm)
 }
 #endif
 
+/* Helpers for lazy TLB mm refcounting */
+static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
+{
+       mmgrab(mm);
+}
+
+static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
+{
+       mmdrop(mm);
+}
+
+static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
+{
+       mmdrop_sched(mm);
+}
+
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
  * @mm: The address space to pin.
index 6c0a92ca6bb59c2cb9f2069de409d51f0d359b40..189895288d9db9c8b2c91a95aba80f3b8be26618 100644 (file)
@@ -623,7 +623,7 @@ static int finish_cpu(unsigned int cpu)
         */
        if (mm != &init_mm)
                idle->active_mm = &init_mm;
-       mmdrop(mm);
+       mmdrop_lazy_tlb(mm);
        return 0;
 }
 
index f2afdb0add7c5173956940c2a49342807c6407e7..86902cb5ab78c4e43e7fbb625656572ab9a19eca 100644 (file)
@@ -537,7 +537,7 @@ static void exit_mm(void)
                return;
        sync_mm_rss(mm);
        mmap_read_lock(mm);
-       mmgrab(mm);
+       mmgrab_lazy_tlb(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
index 1f1b60f1a7466d2eb5c5ed5c56d169dce702c964..470708c205e8982030966e41c5f3d4d8c4a9113b 100644 (file)
@@ -1415,6 +1415,11 @@ void kthread_use_mm(struct mm_struct *mm)
        WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
        WARN_ON_ONCE(tsk->mm);
 
+       /*
+        * It is possible for mm to be the same as tsk->active_mm, but
+        * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
+        * because these references are not equivalent.
+        */
        mmgrab(mm);
 
        task_lock(tsk);
@@ -1438,9 +1443,9 @@ void kthread_use_mm(struct mm_struct *mm)
         * memory barrier after storing to tsk->mm, before accessing
         * user-space memory. A full memory barrier for membarrier
         * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
-        * mmdrop().
+        * mmdrop_lazy_tlb().
         */
-       mmdrop(active_mm);
+       mmdrop_lazy_tlb(active_mm);
 }
 EXPORT_SYMBOL_GPL(kthread_use_mm);
 
@@ -1468,10 +1473,13 @@ void kthread_unuse_mm(struct mm_struct *mm)
        local_irq_disable();
        tsk->mm = NULL;
        membarrier_update_current_mm(NULL);
+       mmgrab_lazy_tlb(mm);
        /* active_mm is still 'mm' */
        enter_lazy_tlb(mm, tsk);
        local_irq_enable();
        task_unlock(tsk);
+
+       mmdrop(mm);
 }
 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
 
index 0d18c3969f90400e5c91e1e0132268dcff5feb65..143e46bd2a68ee48fc1c8c95ca7e4e0a03b7bdf6 100644 (file)
@@ -5203,13 +5203,14 @@ static struct rq *finish_task_switch(struct task_struct *prev)
         * rq->curr, before returning to userspace, so provide them here:
         *
         * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
-        *   provided by mmdrop(),
+        *   provided by mmdrop_lazy_tlb(),
         * - a sync_core for SYNC_CORE.
         */
        if (mm) {
                membarrier_mm_sync_core_before_usermode(mm);
-               mmdrop_sched(mm);
+               mmdrop_lazy_tlb_sched(mm);
        }
+
        if (unlikely(prev_state == TASK_DEAD)) {
                if (prev->sched_class->task_dead)
                        prev->sched_class->task_dead(prev);
@@ -5266,9 +5267,9 @@ context_switch(struct rq *rq, struct task_struct *prev,
 
        /*
         * kernel -> kernel   lazy + transfer active
-        *   user -> kernel   lazy + mmgrab() active
+        *   user -> kernel   lazy + mmgrab_lazy_tlb() active
         *
-        * kernel ->   user   switch + mmdrop() active
+        * kernel ->   user   switch + mmdrop_lazy_tlb() active
         *   user ->   user   switch
         */
        if (!next->mm) {                                // to kernel
@@ -5276,7 +5277,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 
                next->active_mm = prev->active_mm;
                if (prev->mm)                           // from user
-                       mmgrab(prev->active_mm);
+                       mmgrab_lazy_tlb(prev->active_mm);
                else
                        prev->active_mm = NULL;
        } else {                                        // to user
@@ -5293,7 +5294,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
                lru_gen_use_mm(next->mm);
 
                if (!prev->mm) {                        // from kernel
-                       /* will mmdrop() in finish_task_switch(). */
+                       /* will mmdrop_lazy_tlb() in finish_task_switch(). */
                        rq->prev_mm = prev->active_mm;
                        prev->active_mm = NULL;
                }
@@ -9935,7 +9936,7 @@ void __init sched_init(void)
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
-       mmgrab(&init_mm);
+       mmgrab_lazy_tlb(&init_mm);
        enter_lazy_tlb(&init_mm, current);
 
        /*