kernel: better document the use_mm/unuse_mm API contract
authorChristoph Hellwig <hch@lst.de>
Thu, 11 Jun 2020 01:42:06 +0000 (18:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2020 02:14:18 +0000 (19:14 -0700)
Switch the function documentation to kerneldoc comments, and add
WARN_ON_ONCE asserts that the calling thread is a kernel thread and does
not have ->mm set (or has ->mm set in the case of unuse_mm).

Also give the functions a kthread_ prefix to better document the use case.

[hch@lst.de: fix a comment typo, cover the newly merged use_mm/unuse_mm caller in vfio]
Link: http://lkml.kernel.org/r/20200416053158.586887-3-hch@lst.de
[sfr@canb.auug.org.au: powerpc/vas: fix up for {un}use_mm() rename]
Link: http://lkml.kernel.org/r/20200422163935.5aa93ba5@canb.auug.org.au
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> [usb]
Acked-by: Haren Myneni <haren@linux.ibm.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Link: http://lkml.kernel.org/r/20200404094101.672954-6-hch@lst.de
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
12 files changed:
arch/powerpc/platforms/powernv/vas-fault.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/inode.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
fs/io-wq.c
fs/io_uring.c
include/linux/kthread.h
kernel/kthread.c
mm/oom_kill.c
mm/vmacache.c

index 25db70be4c9ce8285685c91ae5d871b6b28abc4a..266a6ca5e15e678e8f042f3d30787fdb0e2aead1 100644 (file)
@@ -127,7 +127,7 @@ static void update_csb(struct vas_window *window,
                return;
        }
 
-       use_mm(window->mm);
+       kthread_use_mm(window->mm);
        rc = copy_to_user(csb_addr, &csb, sizeof(csb));
        /*
         * User space polls on csb.flags (first byte). So add barrier
@@ -139,7 +139,7 @@ static void update_csb(struct vas_window *window,
                smp_mb();
                rc = copy_to_user(csb_addr, &csb, sizeof(u8));
        }
-       unuse_mm(window->mm);
+       kthread_unuse_mm(window->mm);
        put_task_struct(tsk);
 
        /* Success */
index b94bbb8e7bb4c96fd22c55f21a5466ba74bcf8fa..142746836838c465302d50fdec7600d9f7d977e5 100644 (file)
@@ -197,9 +197,9 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
                        if ((mmptr) == current->mm) {                   \
                                valid = !get_user((dst), (wptr));       \
                        } else if (current->mm == NULL) {               \
-                               use_mm(mmptr);                          \
+                               kthread_use_mm(mmptr);                  \
                                valid = !get_user((dst), (wptr));       \
-                               unuse_mm(mmptr);                        \
+                               kthread_unuse_mm(mmptr);                \
                        }                                               \
                        pagefault_enable();                             \
                }                                                       \
index 7ae54b7b637ba8236b02dbe2b8dcac2d74ec7327..f80b2747d7c57ecd2da4ba906ae3743173522d3c 100644 (file)
@@ -827,9 +827,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
                mm_segment_t oldfs = get_fs();
 
                set_fs(USER_DS);
-               use_mm(io_data->mm);
+               kthread_use_mm(io_data->mm);
                ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
-               unuse_mm(io_data->mm);
+               kthread_unuse_mm(io_data->mm);
                set_fs(oldfs);
        }
 
index 20fba95ed0a65faaa20acffc0f36e7fdaf00058c..9ee0bfe7bcdaeac2b6377894adf05b64813b7c2d 100644 (file)
@@ -462,9 +462,9 @@ static void ep_user_copy_worker(struct work_struct *work)
        struct kiocb *iocb = priv->iocb;
        size_t ret;
 
-       use_mm(mm);
+       kthread_use_mm(mm);
        ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
-       unuse_mm(mm);
+       kthread_unuse_mm(mm);
        if (!ret)
                ret = -EFAULT;
 
index d5c08a750441fd038c473b59ef115d8dc9782ca5..5e556ac9102a51f6aa0bf55e09b60c88556af4ba 100644 (file)
@@ -2817,7 +2817,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
                return -EPERM;
 
        if (kthread)
-               use_mm(mm);
+               kthread_use_mm(mm);
        else if (current->mm != mm)
                goto out;
 
@@ -2844,7 +2844,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
                *copied = copy_from_user(data, (void __user *)vaddr,
                                           count) ? 0 : count;
        if (kthread)
-               unuse_mm(mm);
+               kthread_unuse_mm(mm);
 out:
        mmput(mm);
        return *copied ? 0 : -EFAULT;
index ffc7cc31d7eba006cabcb94bb2c8efc84254a676..1ad3d10c121a582ae23fe7d561b0534abaaeed31 100644 (file)
@@ -332,7 +332,7 @@ static int vhost_worker(void *data)
        mm_segment_t oldfs = get_fs();
 
        set_fs(USER_DS);
-       use_mm(dev->mm);
+       kthread_use_mm(dev->mm);
 
        for (;;) {
                /* mb paired w/ kthread_stop */
@@ -360,7 +360,7 @@ static int vhost_worker(void *data)
                                schedule();
                }
        }
-       unuse_mm(dev->mm);
+       kthread_unuse_mm(dev->mm);
        set_fs(oldfs);
        return 0;
 }
index 5f590bf27bffd97e2e95c93b9a5981132f46ddbf..748621f7391ec6a814c43a1b8b44163a2165be7e 100644 (file)
@@ -170,7 +170,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
                }
                __set_current_state(TASK_RUNNING);
                set_fs(KERNEL_DS);
-               unuse_mm(worker->mm);
+               kthread_unuse_mm(worker->mm);
                mmput(worker->mm);
                worker->mm = NULL;
        }
@@ -417,7 +417,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
 {
        if (worker->mm) {
-               unuse_mm(worker->mm);
+               kthread_unuse_mm(worker->mm);
                mmput(worker->mm);
                worker->mm = NULL;
        }
@@ -426,7 +426,7 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
                return;
        }
        if (mmget_not_zero(work->mm)) {
-               use_mm(work->mm);
+               kthread_use_mm(work->mm);
                if (!worker->mm)
                        set_fs(USER_DS);
                worker->mm = work->mm;
index 9842443dde203acaee8bec528dbfce78313e0e4a..ec4e9d36210beab0cc6dca23974a1c5750390189 100644 (file)
@@ -5866,7 +5866,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (io_op_defs[req->opcode].needs_mm && !current->mm) {
                if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
                        return -EFAULT;
-               use_mm(ctx->sqo_mm);
+               kthread_use_mm(ctx->sqo_mm);
        }
 
        sqe_flags = READ_ONCE(sqe->flags);
@@ -5980,7 +5980,7 @@ static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
        struct mm_struct *mm = current->mm;
 
        if (mm) {
-               unuse_mm(mm);
+               kthread_unuse_mm(mm);
                mmput(mm);
        }
 }
index c2d40c9672d67f1c7dd4424b58908ad3d90f385b..12258ea077cf41a33e4c9508346202d375ab00b2 100644 (file)
@@ -200,8 +200,8 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
 
 void kthread_destroy_worker(struct kthread_worker *worker);
 
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
 
 struct cgroup_subsys_state;
 
index ce4610316377f5c6c07280e5b49f7b85f027dee3..8ed4b4fbec7c50713f9328434b85af685ae31e2c 100644 (file)
@@ -1208,18 +1208,18 @@ void kthread_destroy_worker(struct kthread_worker *worker)
 }
 EXPORT_SYMBOL(kthread_destroy_worker);
 
-/*
- * use_mm
- *     Makes the calling kernel thread take on the specified
- *     mm context.
- *     (Note: this routine is intended to be called only
- *     from a kernel thread context)
+/**
+ * kthread_use_mm - make the calling kthread operate on an address space
+ * @mm: address space to operate on
  */
-void use_mm(struct mm_struct *mm)
+void kthread_use_mm(struct mm_struct *mm)
 {
        struct mm_struct *active_mm;
        struct task_struct *tsk = current;
 
+       WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+       WARN_ON_ONCE(tsk->mm);
+
        task_lock(tsk);
        active_mm = tsk->active_mm;
        if (active_mm != mm) {
@@ -1236,20 +1236,19 @@ void use_mm(struct mm_struct *mm)
        if (active_mm != mm)
                mmdrop(active_mm);
 }
-EXPORT_SYMBOL_GPL(use_mm);
+EXPORT_SYMBOL_GPL(kthread_use_mm);
 
-/*
- * unuse_mm
- *     Reverses the effect of use_mm, i.e. releases the
- *     specified mm context which was earlier taken on
- *     by the calling kernel thread
- *     (Note: this routine is intended to be called only
- *     from a kernel thread context)
+/**
+ * kthread_unuse_mm - reverse the effect of kthread_use_mm()
+ * @mm: address space to operate on
  */
-void unuse_mm(struct mm_struct *mm)
+void kthread_unuse_mm(struct mm_struct *mm)
 {
        struct task_struct *tsk = current;
 
+       WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+       WARN_ON_ONCE(!tsk->mm);
+
        task_lock(tsk);
        sync_mm_rss(mm);
        tsk->mm = NULL;
@@ -1257,7 +1256,7 @@ void unuse_mm(struct mm_struct *mm)
        enter_lazy_tlb(mm, tsk);
        task_unlock(tsk);
 }
-EXPORT_SYMBOL_GPL(unuse_mm);
+EXPORT_SYMBOL_GPL(kthread_unuse_mm);
 
 #ifdef CONFIG_BLK_CGROUP
 /**
index b4e9491cb320e13347e97bf9732bae5dbcb3737b..6e94962893ee8432a65945a497fab1596c1a1895 100644 (file)
@@ -126,7 +126,7 @@ static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
 
 /*
  * The process p may have detached its own ->mm while exiting or through
- * use_mm(), but one or more of its subthreads may still have a valid
+ * kthread_use_mm(), but one or more of its subthreads may still have a valid
  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
  * task_lock() held.
  */
@@ -919,8 +919,8 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
                        continue;
                }
                /*
-                * No use_mm() user needs to read from the userspace so we are
-                * ok to reap it.
+                * No kthead_use_mm() user needs to read from the userspace so
+                * we are ok to reap it.
                 */
                if (unlikely(p->flags & PF_KTHREAD))
                        continue;
index d9092814c772f82b4a8ce23303b078c2571f569f..01a6e6688ec1fbb8b30e0025e4fab1374664ed9d 100644 (file)
@@ -24,8 +24,8 @@
  * task's vmacache pertains to a different mm (ie, its own).  There is
  * nothing we can do here.
  *
- * Also handle the case where a kernel thread has adopted this mm via use_mm().
- * That kernel thread's vmacache is not applicable to this mm.
+ * Also handle the case where a kernel thread has adopted this mm via
+ * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
  */
 static inline bool vmacache_valid_mm(struct mm_struct *mm)
 {