{
struct mmu_gather tlb;
struct vm_area_struct *vma;
- struct mm_struct *mm;
+ struct mm_struct *mm = NULL;
struct task_struct *p;
struct zap_details details = {.check_swap_entries = true,
.ignore_dirty = true};
bool ret = true;
+ /*
+ * We have to make sure to not race with the victim exit path
+ * and cause premature new oom victim selection:
+ * __oom_reap_task exit_mm
+ * atomic_inc_not_zero
+ * mmput
+ * atomic_dec_and_test
+ * exit_oom_victim
+ * [...]
+ * out_of_memory
+ * select_bad_process
+ * # no TIF_MEMDIE task selects new victim
+ * unmap_page_range # frees some memory
+ */
+ mutex_lock(&oom_lock);
+
/*
* Make sure we find the associated mm_struct even when the particular
* thread has already terminated and cleared its mm.
*/
p = find_lock_task_mm(tsk);
if (!p)
- return true;
+ goto unlock_oom;
mm = p->mm;
if (!atomic_inc_not_zero(&mm->mm_users)) {
task_unlock(p);
- return true;
+ goto unlock_oom;
}
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
- goto out;
+ goto unlock_oom;
}
tlb_gather_mmu(&tlb, mm, 0, -1);
* to release its memory.
*/
set_bit(MMF_OOM_REAPED, &mm->flags);
-out:
+unlock_oom:
+ mutex_unlock(&oom_lock);
/*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
- mmput_async(mm);
+ if (mm)
+ mmput_async(mm);
return ret;
}
if (atomic_read(&mm->mm_users) > 1) {
rcu_read_lock();
for_each_process(p) {
- bool exiting;
-
if (!process_shares_mm(p, mm))
continue;
- if (same_thread_group(p, tsk))
- continue;
if (fatal_signal_pending(p))
continue;
* If the task is exiting make sure the whole thread group
* is exiting and cannot acces mm anymore.
*/
- spin_lock_irq(&p->sighand->siglock);
- exiting = signal_group_exit(p->signal);
- spin_unlock_irq(&p->sighand->siglock);
- if (exiting)
+ if (signal_group_exit(p->signal))
continue;
/* Give up */