]> git.baikalelectronics.ru Git - kernel.git/commitdiff
oom_reaper: close race with exiting task
authorMichal Hocko <mhocko@suse.com>
Fri, 27 May 2016 21:27:35 +0000 (14:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 May 2016 21:49:37 +0000 (14:49 -0700)
Tetsuo has reported:
  Out of memory: Kill process 443 (oleg's-test) score 855 or sacrifice child
  Killed process 443 (oleg's-test) total-vm:493248kB, anon-rss:423880kB, file-rss:4kB, shmem-rss:0kB
  sh invoked oom-killer: gfp_mask=0x24201ca(GFP_HIGHUSER_MOVABLE|__GFP_COLD), order=0, oom_score_adj=0
  sh cpuset=/ mems_allowed=0
  CPU: 2 PID: 1 Comm: sh Not tainted 4.6.0-rc7+ #51
  Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013
  Call Trace:
    dump_stack+0x85/0xc8
    dump_header+0x5b/0x394
  oom_reaper: reaped process 443 (oleg's-test), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB

In other words:

  __oom_reap_task exit_mm
    atomic_inc_not_zero
  tsk->mm = NULL
  mmput
    atomic_dec_and_test # > 0
  exit_oom_victim # New victim will be
  # selected
<OOM killer invoked>
  # no TIF_MEMDIE task so we can select a new one
    unmap_page_range # to release the memory

The race exists even without the oom_reaper because anybody who pins the
address space and gets preempted might race with exit_mm but oom_reaper
made this race more probable.

We can address the oom_reaper part by using oom_lock for __oom_reap_task
because this would guarantee that a new oom victim will not be selected
if the oom reaper might race with the exit path.  This doesn't solve the
original issue, though, because somebody else still might be pinning
mm_users and so __mmput won't be called to release the memory but that
is not really realiably solvable because the task will get away from the
oom sight as soon as it is unhashed from the task_list and so we cannot
guarantee a new victim won't be selected.

[akpm@linux-foundation.org: fix use of unused `mm', Per Stephen]
[akpm@linux-foundation.org: coding-style fixes]
Fixes: aac453635549 ("mm, oom: introduce oom reaper")
Link: http://lkml.kernel.org/r/1464271493-20008-1-git-send-email-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/oom_kill.c

index 326dd14938f08d8278d6b536fee28e9c7bf05356..dfb1ab61fb2374379ca014ee4f764366dc20c081 100644 (file)
@@ -443,12 +443,28 @@ static bool __oom_reap_task(struct task_struct *tsk)
 {
        struct mmu_gather tlb;
        struct vm_area_struct *vma;
-       struct mm_struct *mm;
+       struct mm_struct *mm = NULL;
        struct task_struct *p;
        struct zap_details details = {.check_swap_entries = true,
                                      .ignore_dirty = true};
        bool ret = true;
 
+       /*
+        * We have to make sure to not race with the victim exit path
+        * and cause premature new oom victim selection:
+        * __oom_reap_task              exit_mm
+        *   atomic_inc_not_zero
+        *                                mmput
+        *                                  atomic_dec_and_test
+        *                                exit_oom_victim
+        *                              [...]
+        *                              out_of_memory
+        *                                select_bad_process
+        *                                  # no TIF_MEMDIE task selects new victim
+        *  unmap_page_range # frees some memory
+        */
+       mutex_lock(&oom_lock);
+
        /*
         * Make sure we find the associated mm_struct even when the particular
         * thread has already terminated and cleared its mm.
@@ -457,19 +473,19 @@ static bool __oom_reap_task(struct task_struct *tsk)
         */
        p = find_lock_task_mm(tsk);
        if (!p)
-               return true;
+               goto unlock_oom;
 
        mm = p->mm;
        if (!atomic_inc_not_zero(&mm->mm_users)) {
                task_unlock(p);
-               return true;
+               goto unlock_oom;
        }
 
        task_unlock(p);
 
        if (!down_read_trylock(&mm->mmap_sem)) {
                ret = false;
-               goto out;
+               goto unlock_oom;
        }
 
        tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -511,13 +527,15 @@ static bool __oom_reap_task(struct task_struct *tsk)
         * to release its memory.
         */
        set_bit(MMF_OOM_REAPED, &mm->flags);
-out:
+unlock_oom:
+       mutex_unlock(&oom_lock);
        /*
         * Drop our reference but make sure the mmput slow path is called from a
         * different context because we shouldn't risk we get stuck there and
         * put the oom_reaper out of the way.
         */
-       mmput_async(mm);
+       if (mm)
+               mmput_async(mm);
        return ret;
 }