mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary
[linux-block.git] / kernel / fork.c
index da8d360fb0326e18a059313c8b21fe93220b5eb5..50c90d3681179125aa89665d9375a5c2dfc20ac2 100644 (file)
@@ -95,6 +95,7 @@
 #include <linux/stackleak.h>
 #include <linux/kasan.h>
 #include <linux/scs.h>
+#include <linux/io_uring.h>
 
 #include <asm/pgalloc.h>
 #include <linux/uaccess.h>
@@ -558,7 +559,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                                atomic_dec(&inode->i_writecount);
                        i_mmap_lock_write(mapping);
                        if (tmp->vm_flags & VM_SHARED)
-                               atomic_inc(&mapping->i_mmap_writable);
+                               mapping_allow_writable(mapping);
                        flush_dcache_mmap_lock(mapping);
                        /* insert tmp into the share list, just after mpnt */
                        vma_interval_tree_insert_after(tmp, mpnt,
@@ -589,7 +590,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 
                mm->map_count++;
                if (!(tmp->vm_flags & VM_WIPEONFORK))
-                       retval = copy_page_range(mm, oldmm, mpnt, tmp);
+                       retval = copy_page_range(tmp, mpnt);
 
                if (tmp->vm_ops && tmp->vm_ops->open)
                        tmp->vm_ops->open(tmp);
@@ -728,6 +729,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(refcount_read(&tsk->usage));
        WARN_ON(tsk == current);
 
+       io_uring_free(tsk);
        cgroup_free(tsk);
        task_numa_free(tsk, true);
        security_task_free(tsk);
@@ -1810,6 +1812,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
                free_task(tsk);
 }
 
+static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
+{
+       /* Skip if kernel thread */
+       if (!tsk->mm)
+               return;
+
+       /* Skip if spawning a thread or using vfork */
+       if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
+               return;
+
+       /* We need to synchronize with __set_oom_adj */
+       mutex_lock(&oom_adj_mutex);
+       set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
+       /* Update the values in case they were changed after copy_signal */
+       tsk->signal->oom_score_adj = current->signal->oom_score_adj;
+       tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
+       mutex_unlock(&oom_adj_mutex);
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1983,6 +2004,10 @@ static __latent_entropy struct task_struct *copy_process(
        p->vtime.state = VTIME_INACTIVE;
 #endif
 
+#ifdef CONFIG_IO_URING
+       p->io_uring = NULL;
+#endif
+
 #if defined(SPLIT_RSS_COUNTING)
        memset(&p->rss_stat, 0, sizeof(p->rss_stat));
 #endif
@@ -2282,6 +2307,8 @@ static __latent_entropy struct task_struct *copy_process(
        trace_task_newtask(p, clone_flags);
        uprobe_copy_process(p, clone_flags);
 
+       copy_oom_score_adj(clone_flags, p);
+
        return p;
 
 bad_fork_cancel_cgroup: