Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
a63d83f4 DR |
7 | * Copyright (C) 2010 Google, Inc. |
8 | * Rewritten by David Rientjes | |
1da177e4 LT |
9 | * |
10 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
11 | * we're seriously out of memory. This gets called from __alloc_pages() |
12 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
13 | * |
14 | * Since we won't call these routines often (on a well-configured | |
15 | * machine) this file will double as a 'coding guide' and a signpost | |
16 | * for newbie kernel hackers. It features several pointers to major | |
17 | * kernel subsystems and hints as to where to find out what things do. | |
18 | */ | |
19 | ||
8ac773b4 | 20 | #include <linux/oom.h> |
1da177e4 | 21 | #include <linux/mm.h> |
4e950f6f | 22 | #include <linux/err.h> |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
1da177e4 | 24 | #include <linux/sched.h> |
6e84f315 | 25 | #include <linux/sched/mm.h> |
f7ccbae4 | 26 | #include <linux/sched/coredump.h> |
29930025 | 27 | #include <linux/sched/task.h> |
1da177e4 LT |
28 | #include <linux/swap.h> |
29 | #include <linux/timex.h> | |
30 | #include <linux/jiffies.h> | |
ef08e3b4 | 31 | #include <linux/cpuset.h> |
b95f1b31 | 32 | #include <linux/export.h> |
8bc719d3 | 33 | #include <linux/notifier.h> |
c7ba5c9e | 34 | #include <linux/memcontrol.h> |
6f48d0eb | 35 | #include <linux/mempolicy.h> |
5cd9c58f | 36 | #include <linux/security.h> |
edd45544 | 37 | #include <linux/ptrace.h> |
f660daac | 38 | #include <linux/freezer.h> |
43d2b113 | 39 | #include <linux/ftrace.h> |
dc3f21ea | 40 | #include <linux/ratelimit.h> |
aac45363 MH |
41 | #include <linux/kthread.h> |
42 | #include <linux/init.h> | |
43 | ||
44 | #include <asm/tlb.h> | |
45 | #include "internal.h" | |
43d2b113 KH |
46 | |
47 | #define CREATE_TRACE_POINTS | |
48 | #include <trace/events/oom.h> | |
1da177e4 | 49 | |
fadd8fbd | 50 | int sysctl_panic_on_oom; |
fe071d7e | 51 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 52 | int sysctl_oom_dump_tasks = 1; |
dc56401f JW |
53 | |
54 | DEFINE_MUTEX(oom_lock); | |
1da177e4 | 55 | |
6f48d0eb DR |
56 | #ifdef CONFIG_NUMA |
57 | /** | |
58 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
ad962441 | 59 | * @start: task struct of which task to consider |
6f48d0eb DR |
60 | * @mask: nodemask passed to page allocator for mempolicy ooms |
61 | * | |
62 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
63 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
64 | * and whether or not it has the same set of allowed cpuset nodes. | |
495789a5 | 65 | */ |
ad962441 | 66 | static bool has_intersects_mems_allowed(struct task_struct *start, |
6f48d0eb | 67 | const nodemask_t *mask) |
495789a5 | 68 | { |
ad962441 ON |
69 | struct task_struct *tsk; |
70 | bool ret = false; | |
495789a5 | 71 | |
ad962441 | 72 | rcu_read_lock(); |
1da4db0c | 73 | for_each_thread(start, tsk) { |
6f48d0eb DR |
74 | if (mask) { |
75 | /* | |
76 | * If this is a mempolicy constrained oom, tsk's | |
77 | * cpuset is irrelevant. Only return true if its | |
78 | * mempolicy intersects current, otherwise it may be | |
79 | * needlessly killed. | |
80 | */ | |
ad962441 | 81 | ret = mempolicy_nodemask_intersects(tsk, mask); |
6f48d0eb DR |
82 | } else { |
83 | /* | |
84 | * This is not a mempolicy constrained oom, so only | |
85 | * check the mems of tsk's cpuset. | |
86 | */ | |
ad962441 | 87 | ret = cpuset_mems_allowed_intersects(current, tsk); |
6f48d0eb | 88 | } |
ad962441 ON |
89 | if (ret) |
90 | break; | |
1da4db0c | 91 | } |
ad962441 | 92 | rcu_read_unlock(); |
df1090a8 | 93 | |
ad962441 | 94 | return ret; |
6f48d0eb DR |
95 | } |
96 | #else | |
97 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
98 | const nodemask_t *mask) | |
99 | { | |
100 | return true; | |
495789a5 | 101 | } |
6f48d0eb | 102 | #endif /* CONFIG_NUMA */ |
495789a5 | 103 | |
6f48d0eb DR |
104 | /* |
105 | * The process p may have detached its own ->mm while exiting or through | |
106 | * use_mm(), but one or more of its subthreads may still have a valid | |
107 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
108 | * task_lock() held. | |
109 | */ | |
158e0a2d | 110 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 | 111 | { |
1da4db0c | 112 | struct task_struct *t; |
dd8e8f40 | 113 | |
4d4048be ON |
114 | rcu_read_lock(); |
115 | ||
1da4db0c | 116 | for_each_thread(p, t) { |
dd8e8f40 ON |
117 | task_lock(t); |
118 | if (likely(t->mm)) | |
4d4048be | 119 | goto found; |
dd8e8f40 | 120 | task_unlock(t); |
1da4db0c | 121 | } |
4d4048be ON |
122 | t = NULL; |
123 | found: | |
124 | rcu_read_unlock(); | |
dd8e8f40 | 125 | |
4d4048be | 126 | return t; |
dd8e8f40 ON |
127 | } |
128 | ||
db2a0dd7 YB |
129 | /* |
130 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
131 | * for display purposes. | |
132 | */ | |
133 | static inline bool is_sysrq_oom(struct oom_control *oc) | |
134 | { | |
135 | return oc->order == -1; | |
136 | } | |
137 | ||
7c5f64f8 VD |
138 | static inline bool is_memcg_oom(struct oom_control *oc) |
139 | { | |
140 | return oc->memcg != NULL; | |
141 | } | |
142 | ||
ab290adb | 143 | /* return true if the task is not adequate as candidate victim task. */ |
e85bfd3a | 144 | static bool oom_unkillable_task(struct task_struct *p, |
2314b42d | 145 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
ab290adb KM |
146 | { |
147 | if (is_global_init(p)) | |
148 | return true; | |
149 | if (p->flags & PF_KTHREAD) | |
150 | return true; | |
151 | ||
152 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
72835c86 | 153 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
ab290adb KM |
154 | return true; |
155 | ||
156 | /* p may not have freeable memory in nodemask */ | |
157 | if (!has_intersects_mems_allowed(p, nodemask)) | |
158 | return true; | |
159 | ||
160 | return false; | |
161 | } | |
162 | ||
1da177e4 | 163 | /** |
a63d83f4 | 164 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 165 | * @p: task struct of which task we should calculate |
a63d83f4 | 166 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 167 | * |
a63d83f4 DR |
168 | * The heuristic for determining which task to kill is made to be as simple and |
169 | * predictable as possible. The goal is to return the highest value for the | |
170 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 171 | */ |
a7f638f9 DR |
172 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
173 | const nodemask_t *nodemask, unsigned long totalpages) | |
1da177e4 | 174 | { |
1e11ad8d | 175 | long points; |
61eafb00 | 176 | long adj; |
28b83c51 | 177 | |
72835c86 | 178 | if (oom_unkillable_task(p, memcg, nodemask)) |
26ebc984 | 179 | return 0; |
1da177e4 | 180 | |
dd8e8f40 ON |
181 | p = find_lock_task_mm(p); |
182 | if (!p) | |
1da177e4 LT |
183 | return 0; |
184 | ||
bb8a4b7f MH |
185 | /* |
186 | * Do not even consider tasks which are explicitly marked oom | |
b18dc5f2 MH |
187 | * unkillable or have been already oom reaped or the are in |
188 | * the middle of vfork | |
bb8a4b7f | 189 | */ |
a9c58b90 | 190 | adj = (long)p->signal->oom_score_adj; |
bb8a4b7f | 191 | if (adj == OOM_SCORE_ADJ_MIN || |
862e3073 | 192 | test_bit(MMF_OOM_SKIP, &p->mm->flags) || |
b18dc5f2 | 193 | in_vfork(p)) { |
5aecc85a MH |
194 | task_unlock(p); |
195 | return 0; | |
196 | } | |
197 | ||
1da177e4 | 198 | /* |
a63d83f4 | 199 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 200 | * task's rss, pagetable and swap space use. |
1da177e4 | 201 | */ |
dc6c9a35 KS |
202 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
203 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); | |
a63d83f4 | 204 | task_unlock(p); |
1da177e4 LT |
205 | |
206 | /* | |
a63d83f4 DR |
207 | * Root processes get 3% bonus, just like the __vm_enough_memory() |
208 | * implementation used by LSMs. | |
1da177e4 | 209 | */ |
a63d83f4 | 210 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
778c14af | 211 | points -= (points * 3) / 100; |
1da177e4 | 212 | |
61eafb00 DR |
213 | /* Normalize to oom_score_adj units */ |
214 | adj *= totalpages / 1000; | |
215 | points += adj; | |
1da177e4 | 216 | |
f19e8aa1 | 217 | /* |
a7f638f9 DR |
218 | * Never return 0 for an eligible task regardless of the root bonus and |
219 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | |
f19e8aa1 | 220 | */ |
1e11ad8d | 221 | return points > 0 ? points : 1; |
1da177e4 LT |
222 | } |
223 | ||
7c5f64f8 VD |
224 | enum oom_constraint { |
225 | CONSTRAINT_NONE, | |
226 | CONSTRAINT_CPUSET, | |
227 | CONSTRAINT_MEMORY_POLICY, | |
228 | CONSTRAINT_MEMCG, | |
229 | }; | |
230 | ||
9b0f8b04 CL |
231 | /* |
232 | * Determine the type of allocation constraint. | |
233 | */ | |
7c5f64f8 | 234 | static enum oom_constraint constrained_alloc(struct oom_control *oc) |
4365a567 | 235 | { |
54a6eb5c | 236 | struct zone *zone; |
dd1a239f | 237 | struct zoneref *z; |
6e0fc46d | 238 | enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); |
a63d83f4 DR |
239 | bool cpuset_limited = false; |
240 | int nid; | |
9b0f8b04 | 241 | |
7c5f64f8 VD |
242 | if (is_memcg_oom(oc)) { |
243 | oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; | |
244 | return CONSTRAINT_MEMCG; | |
245 | } | |
246 | ||
a63d83f4 | 247 | /* Default to all available memory */ |
7c5f64f8 VD |
248 | oc->totalpages = totalram_pages + total_swap_pages; |
249 | ||
250 | if (!IS_ENABLED(CONFIG_NUMA)) | |
251 | return CONSTRAINT_NONE; | |
a63d83f4 | 252 | |
6e0fc46d | 253 | if (!oc->zonelist) |
a63d83f4 | 254 | return CONSTRAINT_NONE; |
4365a567 KH |
255 | /* |
256 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
257 | * to kill current.We have to random task kill in this case. | |
258 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
259 | */ | |
6e0fc46d | 260 | if (oc->gfp_mask & __GFP_THISNODE) |
4365a567 | 261 | return CONSTRAINT_NONE; |
9b0f8b04 | 262 | |
4365a567 | 263 | /* |
a63d83f4 DR |
264 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
265 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
266 | * is enforced in get_page_from_freelist(). | |
4365a567 | 267 | */ |
6e0fc46d DR |
268 | if (oc->nodemask && |
269 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { | |
7c5f64f8 | 270 | oc->totalpages = total_swap_pages; |
6e0fc46d | 271 | for_each_node_mask(nid, *oc->nodemask) |
7c5f64f8 | 272 | oc->totalpages += node_spanned_pages(nid); |
9b0f8b04 | 273 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 274 | } |
4365a567 KH |
275 | |
276 | /* Check this allocation failure is caused by cpuset's wall function */ | |
6e0fc46d DR |
277 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, |
278 | high_zoneidx, oc->nodemask) | |
279 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) | |
a63d83f4 | 280 | cpuset_limited = true; |
9b0f8b04 | 281 | |
a63d83f4 | 282 | if (cpuset_limited) { |
7c5f64f8 | 283 | oc->totalpages = total_swap_pages; |
a63d83f4 | 284 | for_each_node_mask(nid, cpuset_current_mems_allowed) |
7c5f64f8 | 285 | oc->totalpages += node_spanned_pages(nid); |
a63d83f4 DR |
286 | return CONSTRAINT_CPUSET; |
287 | } | |
9b0f8b04 CL |
288 | return CONSTRAINT_NONE; |
289 | } | |
290 | ||
7c5f64f8 | 291 | static int oom_evaluate_task(struct task_struct *task, void *arg) |
462607ec | 292 | { |
7c5f64f8 VD |
293 | struct oom_control *oc = arg; |
294 | unsigned long points; | |
295 | ||
6e0fc46d | 296 | if (oom_unkillable_task(task, NULL, oc->nodemask)) |
7c5f64f8 | 297 | goto next; |
462607ec DR |
298 | |
299 | /* | |
300 | * This task already has access to memory reserves and is being killed. | |
a373966d | 301 | * Don't allow any other task to have access to the reserves unless |
862e3073 | 302 | * the task has MMF_OOM_SKIP because chances that it would release |
a373966d | 303 | * any memory is quite low. |
462607ec | 304 | */ |
862e3073 MH |
305 | if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { |
306 | if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) | |
7c5f64f8 VD |
307 | goto next; |
308 | goto abort; | |
a373966d | 309 | } |
462607ec | 310 | |
e1e12d2f DR |
311 | /* |
312 | * If task is allocating a lot of memory and has been marked to be | |
313 | * killed first if it triggers an oom, then select it. | |
314 | */ | |
7c5f64f8 VD |
315 | if (oom_task_origin(task)) { |
316 | points = ULONG_MAX; | |
317 | goto select; | |
318 | } | |
e1e12d2f | 319 | |
7c5f64f8 VD |
320 | points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); |
321 | if (!points || points < oc->chosen_points) | |
322 | goto next; | |
323 | ||
324 | /* Prefer thread group leaders for display purposes */ | |
325 | if (points == oc->chosen_points && thread_group_leader(oc->chosen)) | |
326 | goto next; | |
327 | select: | |
328 | if (oc->chosen) | |
329 | put_task_struct(oc->chosen); | |
330 | get_task_struct(task); | |
331 | oc->chosen = task; | |
332 | oc->chosen_points = points; | |
333 | next: | |
334 | return 0; | |
335 | abort: | |
336 | if (oc->chosen) | |
337 | put_task_struct(oc->chosen); | |
338 | oc->chosen = (void *)-1UL; | |
339 | return 1; | |
462607ec DR |
340 | } |
341 | ||
1da177e4 | 342 | /* |
7c5f64f8 VD |
343 | * Simple selection loop. We choose the process with the highest number of |
344 | * 'points'. In case scan was aborted, oc->chosen is set to -1. | |
1da177e4 | 345 | */ |
7c5f64f8 | 346 | static void select_bad_process(struct oom_control *oc) |
1da177e4 | 347 | { |
7c5f64f8 VD |
348 | if (is_memcg_oom(oc)) |
349 | mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); | |
350 | else { | |
351 | struct task_struct *p; | |
d49ad935 | 352 | |
7c5f64f8 VD |
353 | rcu_read_lock(); |
354 | for_each_process(p) | |
355 | if (oom_evaluate_task(p, oc)) | |
356 | break; | |
357 | rcu_read_unlock(); | |
1da4db0c | 358 | } |
972c4ea5 | 359 | |
7c5f64f8 | 360 | oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; |
1da177e4 LT |
361 | } |
362 | ||
fef1bdd6 | 363 | /** |
1b578df0 | 364 | * dump_tasks - dump current memory state of all system tasks |
dad7557e | 365 | * @memcg: current's memory controller, if constrained |
e85bfd3a | 366 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
1b578df0 | 367 | * |
e85bfd3a DR |
368 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
369 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
370 | * are not shown. | |
de34d965 DR |
371 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, |
372 | * swapents, oom_score_adj value, and name. | |
fef1bdd6 | 373 | */ |
2314b42d | 374 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) |
fef1bdd6 | 375 | { |
c55db957 KM |
376 | struct task_struct *p; |
377 | struct task_struct *task; | |
fef1bdd6 | 378 | |
dc6c9a35 | 379 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); |
6b0c81b3 | 380 | rcu_read_lock(); |
c55db957 | 381 | for_each_process(p) { |
72835c86 | 382 | if (oom_unkillable_task(p, memcg, nodemask)) |
b4416d2b | 383 | continue; |
fef1bdd6 | 384 | |
c55db957 KM |
385 | task = find_lock_task_mm(p); |
386 | if (!task) { | |
6d2661ed | 387 | /* |
74ab7f1d DR |
388 | * This is a kthread or all of p's threads have already |
389 | * detached their mm's. There's no need to report | |
c55db957 | 390 | * them; they can't be oom killed anyway. |
6d2661ed | 391 | */ |
6d2661ed DR |
392 | continue; |
393 | } | |
c55db957 | 394 | |
dc6c9a35 | 395 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", |
078de5f7 EB |
396 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
397 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
e1f56c89 | 398 | atomic_long_read(&task->mm->nr_ptes), |
dc6c9a35 | 399 | mm_nr_pmds(task->mm), |
de34d965 | 400 | get_mm_counter(task->mm, MM_SWAPENTS), |
a63d83f4 | 401 | task->signal->oom_score_adj, task->comm); |
c55db957 KM |
402 | task_unlock(task); |
403 | } | |
6b0c81b3 | 404 | rcu_read_unlock(); |
fef1bdd6 DR |
405 | } |
406 | ||
2a966b77 | 407 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
1b604d75 | 408 | { |
299c517a DR |
409 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=", |
410 | current->comm, oc->gfp_mask, &oc->gfp_mask); | |
411 | if (oc->nodemask) | |
412 | pr_cont("%*pbl", nodemask_pr_args(oc->nodemask)); | |
413 | else | |
414 | pr_cont("(null)"); | |
415 | pr_cont(", order=%d, oom_score_adj=%hd\n", | |
416 | oc->order, current->signal->oom_score_adj); | |
9254990f MH |
417 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
418 | pr_warn("COMPACTION is disabled!!!\n"); | |
a0795cd4 | 419 | |
da39da3a | 420 | cpuset_print_current_mems_allowed(); |
1b604d75 | 421 | dump_stack(); |
2a966b77 VD |
422 | if (oc->memcg) |
423 | mem_cgroup_print_oom_info(oc->memcg, p); | |
58cf188e | 424 | else |
299c517a | 425 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
1b604d75 | 426 | if (sysctl_oom_dump_tasks) |
2a966b77 | 427 | dump_tasks(oc->memcg, oc->nodemask); |
1b604d75 DR |
428 | } |
429 | ||
5695be14 | 430 | /* |
c32b3cbe | 431 | * Number of OOM victims in flight |
5695be14 | 432 | */ |
c32b3cbe MH |
433 | static atomic_t oom_victims = ATOMIC_INIT(0); |
434 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
5695be14 | 435 | |
7c5f64f8 | 436 | static bool oom_killer_disabled __read_mostly; |
5695be14 | 437 | |
bc448e89 MH |
438 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
439 | ||
3ef22dff MH |
440 | /* |
441 | * task->mm can be NULL if the task is the exited group leader. So to | |
442 | * determine whether the task is using a particular mm, we examine all the | |
443 | * task's threads: if one of those is using this mm then this task was also | |
444 | * using it. | |
445 | */ | |
44a70ade | 446 | bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) |
3ef22dff MH |
447 | { |
448 | struct task_struct *t; | |
449 | ||
450 | for_each_thread(p, t) { | |
451 | struct mm_struct *t_mm = READ_ONCE(t->mm); | |
452 | if (t_mm) | |
453 | return t_mm == mm; | |
454 | } | |
455 | return false; | |
456 | } | |
457 | ||
458 | ||
aac45363 MH |
459 | #ifdef CONFIG_MMU |
460 | /* | |
461 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM | |
462 | * victim (if that is possible) to help the OOM killer to move on. | |
463 | */ | |
464 | static struct task_struct *oom_reaper_th; | |
aac45363 | 465 | static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); |
29c696e1 | 466 | static struct task_struct *oom_reaper_list; |
03049269 MH |
467 | static DEFINE_SPINLOCK(oom_reaper_lock); |
468 | ||
7ebffa45 | 469 | static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
aac45363 MH |
470 | { |
471 | struct mmu_gather tlb; | |
472 | struct vm_area_struct *vma; | |
aac45363 MH |
473 | bool ret = true; |
474 | ||
e2fe1456 MH |
475 | /* |
476 | * We have to make sure to not race with the victim exit path | |
477 | * and cause premature new oom victim selection: | |
7ebffa45 | 478 | * __oom_reap_task_mm exit_mm |
e5e3f4c4 | 479 | * mmget_not_zero |
e2fe1456 MH |
480 | * mmput |
481 | * atomic_dec_and_test | |
482 | * exit_oom_victim | |
483 | * [...] | |
484 | * out_of_memory | |
485 | * select_bad_process | |
486 | * # no TIF_MEMDIE task selects new victim | |
487 | * unmap_page_range # frees some memory | |
488 | */ | |
489 | mutex_lock(&oom_lock); | |
490 | ||
aac45363 MH |
491 | if (!down_read_trylock(&mm->mmap_sem)) { |
492 | ret = false; | |
422580c3 | 493 | trace_skip_task_reaping(tsk->pid); |
7ebffa45 | 494 | goto unlock_oom; |
e5e3f4c4 MH |
495 | } |
496 | ||
497 | /* | |
498 | * increase mm_users only after we know we will reap something so | |
499 | * that the mmput_async is called only when we have reaped something | |
500 | * and delayed __mmput doesn't matter that much | |
501 | */ | |
502 | if (!mmget_not_zero(mm)) { | |
503 | up_read(&mm->mmap_sem); | |
422580c3 | 504 | trace_skip_task_reaping(tsk->pid); |
7ebffa45 | 505 | goto unlock_oom; |
aac45363 MH |
506 | } |
507 | ||
422580c3 RG |
508 | trace_start_task_reaping(tsk->pid); |
509 | ||
3f70dc38 MH |
510 | /* |
511 | * Tell all users of get_user/copy_from_user etc... that the content | |
512 | * is no longer stable. No barriers really needed because unmapping | |
513 | * should imply barriers already and the reader would hit a page fault | |
514 | * if it stumbled over a reaped memory. | |
515 | */ | |
516 | set_bit(MMF_UNSTABLE, &mm->flags); | |
517 | ||
aac45363 MH |
518 | tlb_gather_mmu(&tlb, mm, 0, -1); |
519 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | |
23519073 | 520 | if (!can_madv_dontneed_vma(vma)) |
aac45363 MH |
521 | continue; |
522 | ||
523 | /* | |
524 | * Only anonymous pages have a good chance to be dropped | |
525 | * without additional steps which we cannot afford as we | |
526 | * are OOM already. | |
527 | * | |
528 | * We do not even care about fs backed pages because all | |
529 | * which are reclaimable have already been reclaimed and | |
530 | * we do not want to block exit_mmap by keeping mm ref | |
531 | * count elevated without a good reason. | |
532 | */ | |
533 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) | |
534 | unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, | |
3e8715fd | 535 | NULL); |
aac45363 MH |
536 | } |
537 | tlb_finish_mmu(&tlb, 0, -1); | |
bc448e89 MH |
538 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
539 | task_pid_nr(tsk), tsk->comm, | |
540 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
541 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
542 | K(get_mm_counter(mm, MM_SHMEMPAGES))); | |
aac45363 | 543 | up_read(&mm->mmap_sem); |
36324a99 | 544 | |
ec8d7c14 MH |
545 | /* |
546 | * Drop our reference but make sure the mmput slow path is called from a | |
547 | * different context because we shouldn't risk we get stuck there and | |
548 | * put the oom_reaper out of the way. | |
549 | */ | |
e5e3f4c4 | 550 | mmput_async(mm); |
422580c3 | 551 | trace_finish_task_reaping(tsk->pid); |
e5e3f4c4 MH |
552 | unlock_oom: |
553 | mutex_unlock(&oom_lock); | |
aac45363 MH |
554 | return ret; |
555 | } | |
556 | ||
bc448e89 | 557 | #define MAX_OOM_REAP_RETRIES 10 |
36324a99 | 558 | static void oom_reap_task(struct task_struct *tsk) |
aac45363 MH |
559 | { |
560 | int attempts = 0; | |
26db62f1 | 561 | struct mm_struct *mm = tsk->signal->oom_mm; |
aac45363 MH |
562 | |
563 | /* Retry the down_read_trylock(mmap_sem) a few times */ | |
7ebffa45 | 564 | while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) |
aac45363 MH |
565 | schedule_timeout_idle(HZ/10); |
566 | ||
7ebffa45 TH |
567 | if (attempts <= MAX_OOM_REAP_RETRIES) |
568 | goto done; | |
11a410d5 | 569 | |
8496afab | 570 | |
7ebffa45 TH |
571 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", |
572 | task_pid_nr(tsk), tsk->comm); | |
7ebffa45 | 573 | debug_show_all_locks(); |
bc448e89 | 574 | |
7ebffa45 | 575 | done: |
449d777d | 576 | tsk->oom_reaper_list = NULL; |
449d777d | 577 | |
26db62f1 MH |
578 | /* |
579 | * Hide this mm from OOM killer because it has been either reaped or | |
580 | * somebody can't call up_write(mmap_sem). | |
581 | */ | |
862e3073 | 582 | set_bit(MMF_OOM_SKIP, &mm->flags); |
26db62f1 | 583 | |
aac45363 | 584 | /* Drop a reference taken by wake_oom_reaper */ |
36324a99 | 585 | put_task_struct(tsk); |
aac45363 MH |
586 | } |
587 | ||
588 | static int oom_reaper(void *unused) | |
589 | { | |
590 | while (true) { | |
03049269 | 591 | struct task_struct *tsk = NULL; |
aac45363 | 592 | |
29c696e1 | 593 | wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); |
03049269 | 594 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
595 | if (oom_reaper_list != NULL) { |
596 | tsk = oom_reaper_list; | |
597 | oom_reaper_list = tsk->oom_reaper_list; | |
03049269 MH |
598 | } |
599 | spin_unlock(&oom_reaper_lock); | |
600 | ||
601 | if (tsk) | |
602 | oom_reap_task(tsk); | |
aac45363 MH |
603 | } |
604 | ||
605 | return 0; | |
606 | } | |
607 | ||
7c5f64f8 | 608 | static void wake_oom_reaper(struct task_struct *tsk) |
aac45363 | 609 | { |
af8e15cc MH |
610 | if (!oom_reaper_th) |
611 | return; | |
612 | ||
613 | /* tsk is already queued? */ | |
614 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) | |
aac45363 MH |
615 | return; |
616 | ||
36324a99 | 617 | get_task_struct(tsk); |
aac45363 | 618 | |
03049269 | 619 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
620 | tsk->oom_reaper_list = oom_reaper_list; |
621 | oom_reaper_list = tsk; | |
03049269 | 622 | spin_unlock(&oom_reaper_lock); |
422580c3 | 623 | trace_wake_reaper(tsk->pid); |
03049269 | 624 | wake_up(&oom_reaper_wait); |
aac45363 MH |
625 | } |
626 | ||
627 | static int __init oom_init(void) | |
628 | { | |
629 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); | |
630 | if (IS_ERR(oom_reaper_th)) { | |
631 | pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", | |
632 | PTR_ERR(oom_reaper_th)); | |
633 | oom_reaper_th = NULL; | |
634 | } | |
635 | return 0; | |
636 | } | |
637 | subsys_initcall(oom_init) | |
7c5f64f8 VD |
638 | #else |
639 | static inline void wake_oom_reaper(struct task_struct *tsk) | |
640 | { | |
641 | } | |
642 | #endif /* CONFIG_MMU */ | |
aac45363 | 643 | |
49550b60 | 644 | /** |
16e95196 | 645 | * mark_oom_victim - mark the given task as OOM victim |
49550b60 | 646 | * @tsk: task to mark |
c32b3cbe | 647 | * |
dc56401f | 648 | * Has to be called with oom_lock held and never after |
c32b3cbe | 649 | * oom has been disabled already. |
26db62f1 MH |
650 | * |
651 | * tsk->mm has to be non NULL and caller has to guarantee it is stable (either | |
652 | * under task_lock or operate on the current). | |
49550b60 | 653 | */ |
7c5f64f8 | 654 | static void mark_oom_victim(struct task_struct *tsk) |
49550b60 | 655 | { |
26db62f1 MH |
656 | struct mm_struct *mm = tsk->mm; |
657 | ||
c32b3cbe MH |
658 | WARN_ON(oom_killer_disabled); |
659 | /* OOM killer might race with memcg OOM */ | |
660 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
661 | return; | |
26db62f1 | 662 | |
26db62f1 MH |
663 | /* oom_mm is bound to the signal struct life time. */ |
664 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) | |
f1f10076 | 665 | mmgrab(tsk->signal->oom_mm); |
26db62f1 | 666 | |
63a8ca9b MH |
667 | /* |
668 | * Make sure that the task is woken up from uninterruptible sleep | |
669 | * if it is frozen because OOM killer wouldn't be able to free | |
670 | * any memory and livelock. freezing_slow_path will tell the freezer | |
671 | * that TIF_MEMDIE tasks should be ignored. | |
672 | */ | |
673 | __thaw_task(tsk); | |
c32b3cbe | 674 | atomic_inc(&oom_victims); |
422580c3 | 675 | trace_mark_victim(tsk->pid); |
49550b60 MH |
676 | } |
677 | ||
678 | /** | |
16e95196 | 679 | * exit_oom_victim - note the exit of an OOM victim |
49550b60 | 680 | */ |
38531201 | 681 | void exit_oom_victim(void) |
49550b60 | 682 | { |
38531201 | 683 | clear_thread_flag(TIF_MEMDIE); |
c32b3cbe | 684 | |
c38f1025 | 685 | if (!atomic_dec_return(&oom_victims)) |
c32b3cbe | 686 | wake_up_all(&oom_victims_wait); |
c32b3cbe MH |
687 | } |
688 | ||
7d2e7a22 MH |
689 | /** |
690 | * oom_killer_enable - enable OOM killer | |
691 | */ | |
692 | void oom_killer_enable(void) | |
693 | { | |
694 | oom_killer_disabled = false; | |
d75da004 | 695 | pr_info("OOM killer enabled.\n"); |
7d2e7a22 MH |
696 | } |
697 | ||
c32b3cbe MH |
698 | /** |
699 | * oom_killer_disable - disable OOM killer | |
7d2e7a22 | 700 | * @timeout: maximum timeout to wait for oom victims in jiffies |
c32b3cbe MH |
701 | * |
702 | * Forces all page allocations to fail rather than trigger OOM killer. | |
7d2e7a22 MH |
703 | * Will block and wait until all OOM victims are killed or the given |
704 | * timeout expires. | |
c32b3cbe MH |
705 | * |
706 | * The function cannot be called when there are runnable user tasks because | |
707 | * the userspace would see unexpected allocation failures as a result. Any | |
708 | * new usage of this function should be consulted with MM people. | |
709 | * | |
710 | * Returns true if successful and false if the OOM killer cannot be | |
711 | * disabled. | |
712 | */ | |
7d2e7a22 | 713 | bool oom_killer_disable(signed long timeout) |
c32b3cbe | 714 | { |
7d2e7a22 MH |
715 | signed long ret; |
716 | ||
c32b3cbe | 717 | /* |
6afcf289 TH |
718 | * Make sure to not race with an ongoing OOM killer. Check that the |
719 | * current is not killed (possibly due to sharing the victim's memory). | |
c32b3cbe | 720 | */ |
6afcf289 | 721 | if (mutex_lock_killable(&oom_lock)) |
c32b3cbe | 722 | return false; |
c32b3cbe | 723 | oom_killer_disabled = true; |
dc56401f | 724 | mutex_unlock(&oom_lock); |
c32b3cbe | 725 | |
7d2e7a22 MH |
726 | ret = wait_event_interruptible_timeout(oom_victims_wait, |
727 | !atomic_read(&oom_victims), timeout); | |
728 | if (ret <= 0) { | |
729 | oom_killer_enable(); | |
730 | return false; | |
731 | } | |
d75da004 | 732 | pr_info("OOM killer disabled.\n"); |
c32b3cbe MH |
733 | |
734 | return true; | |
735 | } | |
736 | ||
1af8bb43 MH |
737 | static inline bool __task_will_free_mem(struct task_struct *task) |
738 | { | |
739 | struct signal_struct *sig = task->signal; | |
740 | ||
741 | /* | |
742 | * A coredumping process may sleep for an extended period in exit_mm(), | |
743 | * so the oom killer cannot assume that the process will promptly exit | |
744 | * and release memory. | |
745 | */ | |
746 | if (sig->flags & SIGNAL_GROUP_COREDUMP) | |
747 | return false; | |
748 | ||
749 | if (sig->flags & SIGNAL_GROUP_EXIT) | |
750 | return true; | |
751 | ||
752 | if (thread_group_empty(task) && (task->flags & PF_EXITING)) | |
753 | return true; | |
754 | ||
755 | return false; | |
756 | } | |
757 | ||
758 | /* | |
759 | * Checks whether the given task is dying or exiting and likely to | |
760 | * release its address space. This means that all threads and processes | |
761 | * sharing the same mm have to be killed or exiting. | |
091f362c MH |
762 | * Caller has to make sure that task->mm is stable (hold task_lock or |
763 | * it operates on the current). | |
1af8bb43 | 764 | */ |
7c5f64f8 | 765 | static bool task_will_free_mem(struct task_struct *task) |
1af8bb43 | 766 | { |
091f362c | 767 | struct mm_struct *mm = task->mm; |
1af8bb43 | 768 | struct task_struct *p; |
f33e6f06 | 769 | bool ret = true; |
1af8bb43 | 770 | |
1af8bb43 | 771 | /* |
091f362c MH |
772 | * Skip tasks without mm because it might have passed its exit_mm and |
773 | * exit_oom_victim. oom_reaper could have rescued that but do not rely | |
774 | * on that for now. We can consider find_lock_task_mm in future. | |
1af8bb43 | 775 | */ |
091f362c | 776 | if (!mm) |
1af8bb43 MH |
777 | return false; |
778 | ||
091f362c MH |
779 | if (!__task_will_free_mem(task)) |
780 | return false; | |
696453e6 MH |
781 | |
782 | /* | |
783 | * This task has already been drained by the oom reaper so there are | |
784 | * only small chances it will free some more | |
785 | */ | |
862e3073 | 786 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) |
696453e6 | 787 | return false; |
696453e6 | 788 | |
091f362c | 789 | if (atomic_read(&mm->mm_users) <= 1) |
1af8bb43 | 790 | return true; |
1af8bb43 MH |
791 | |
792 | /* | |
5870c2e1 MH |
793 | * Make sure that all tasks which share the mm with the given tasks |
794 | * are dying as well to make sure that a) nobody pins its mm and | |
795 | * b) the task is also reapable by the oom reaper. | |
1af8bb43 MH |
796 | */ |
797 | rcu_read_lock(); | |
798 | for_each_process(p) { | |
799 | if (!process_shares_mm(p, mm)) | |
800 | continue; | |
801 | if (same_thread_group(task, p)) | |
802 | continue; | |
803 | ret = __task_will_free_mem(p); | |
804 | if (!ret) | |
805 | break; | |
806 | } | |
807 | rcu_read_unlock(); | |
1af8bb43 MH |
808 | |
809 | return ret; | |
810 | } | |
811 | ||
7c5f64f8 | 812 | static void oom_kill_process(struct oom_control *oc, const char *message) |
1da177e4 | 813 | { |
7c5f64f8 VD |
814 | struct task_struct *p = oc->chosen; |
815 | unsigned int points = oc->chosen_points; | |
52d3c036 | 816 | struct task_struct *victim = p; |
5e9d834a | 817 | struct task_struct *child; |
1da4db0c | 818 | struct task_struct *t; |
647f2bdf | 819 | struct mm_struct *mm; |
52d3c036 | 820 | unsigned int victim_points = 0; |
dc3f21ea DR |
821 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
822 | DEFAULT_RATELIMIT_BURST); | |
bb29902a | 823 | bool can_oom_reap = true; |
1da177e4 | 824 | |
50ec3bbf NP |
825 | /* |
826 | * If the task is already exiting, don't alarm the sysadmin or kill | |
827 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
828 | */ | |
091f362c | 829 | task_lock(p); |
1af8bb43 | 830 | if (task_will_free_mem(p)) { |
16e95196 | 831 | mark_oom_victim(p); |
1af8bb43 | 832 | wake_oom_reaper(p); |
091f362c | 833 | task_unlock(p); |
6b0c81b3 | 834 | put_task_struct(p); |
2a1c9b1f | 835 | return; |
50ec3bbf | 836 | } |
091f362c | 837 | task_unlock(p); |
50ec3bbf | 838 | |
dc3f21ea | 839 | if (__ratelimit(&oom_rs)) |
2a966b77 | 840 | dump_header(oc, p); |
8447d950 | 841 | |
f0d6647e | 842 | pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", |
5e9d834a | 843 | message, task_pid_nr(p), p->comm, points); |
f3af38d3 | 844 | |
5e9d834a DR |
845 | /* |
846 | * If any of p's children has a different mm and is eligible for kill, | |
11239836 | 847 | * the one with the highest oom_badness() score is sacrificed for its |
5e9d834a DR |
848 | * parent. This attempts to lose the minimal amount of work done while |
849 | * still freeing memory. | |
850 | */ | |
6b0c81b3 | 851 | read_lock(&tasklist_lock); |
1da4db0c | 852 | for_each_thread(p, t) { |
5e9d834a | 853 | list_for_each_entry(child, &t->children, sibling) { |
a63d83f4 | 854 | unsigned int child_points; |
5e9d834a | 855 | |
4d7b3394 | 856 | if (process_shares_mm(child, p->mm)) |
edd45544 | 857 | continue; |
a63d83f4 DR |
858 | /* |
859 | * oom_badness() returns 0 if the thread is unkillable | |
860 | */ | |
2a966b77 | 861 | child_points = oom_badness(child, |
7c5f64f8 | 862 | oc->memcg, oc->nodemask, oc->totalpages); |
5e9d834a | 863 | if (child_points > victim_points) { |
6b0c81b3 | 864 | put_task_struct(victim); |
5e9d834a DR |
865 | victim = child; |
866 | victim_points = child_points; | |
6b0c81b3 | 867 | get_task_struct(victim); |
5e9d834a | 868 | } |
dd8e8f40 | 869 | } |
1da4db0c | 870 | } |
6b0c81b3 | 871 | read_unlock(&tasklist_lock); |
dd8e8f40 | 872 | |
6b0c81b3 DR |
873 | p = find_lock_task_mm(victim); |
874 | if (!p) { | |
6b0c81b3 | 875 | put_task_struct(victim); |
647f2bdf | 876 | return; |
6b0c81b3 DR |
877 | } else if (victim != p) { |
878 | get_task_struct(p); | |
879 | put_task_struct(victim); | |
880 | victim = p; | |
881 | } | |
647f2bdf | 882 | |
880b7689 | 883 | /* Get a reference to safely compare mm after task_unlock(victim) */ |
647f2bdf | 884 | mm = victim->mm; |
f1f10076 | 885 | mmgrab(mm); |
8e675f7a KK |
886 | |
887 | /* Raise event before sending signal: task reaper must see this */ | |
888 | count_vm_event(OOM_KILL); | |
889 | count_memcg_event_mm(mm, OOM_KILL); | |
890 | ||
426fb5e7 TH |
891 | /* |
892 | * We should send SIGKILL before setting TIF_MEMDIE in order to prevent | |
893 | * the OOM victim from depleting the memory reserves from the user | |
894 | * space under its control. | |
895 | */ | |
896 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); | |
16e95196 | 897 | mark_oom_victim(victim); |
eca56ff9 | 898 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
647f2bdf DR |
899 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), |
900 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), | |
eca56ff9 JM |
901 | K(get_mm_counter(victim->mm, MM_FILEPAGES)), |
902 | K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); | |
647f2bdf DR |
903 | task_unlock(victim); |
904 | ||
905 | /* | |
906 | * Kill all user processes sharing victim->mm in other thread groups, if | |
907 | * any. They don't get access to memory reserves, though, to avoid | |
908 | * depletion of all memory. This prevents mm->mmap_sem livelock when an | |
909 | * oom killed thread cannot exit because it requires the semaphore and | |
910 | * its contended by another thread trying to allocate memory itself. | |
911 | * That thread will now get access to memory reserves since it has a | |
912 | * pending fatal signal. | |
913 | */ | |
4d4048be | 914 | rcu_read_lock(); |
c319025a | 915 | for_each_process(p) { |
4d7b3394 | 916 | if (!process_shares_mm(p, mm)) |
c319025a ON |
917 | continue; |
918 | if (same_thread_group(p, victim)) | |
919 | continue; | |
1b51e65e | 920 | if (is_global_init(p)) { |
aac45363 | 921 | can_oom_reap = false; |
862e3073 | 922 | set_bit(MMF_OOM_SKIP, &mm->flags); |
a373966d MH |
923 | pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", |
924 | task_pid_nr(victim), victim->comm, | |
925 | task_pid_nr(p), p->comm); | |
c319025a | 926 | continue; |
aac45363 | 927 | } |
1b51e65e MH |
928 | /* |
929 | * No use_mm() user needs to read from the userspace so we are | |
930 | * ok to reap it. | |
931 | */ | |
932 | if (unlikely(p->flags & PF_KTHREAD)) | |
933 | continue; | |
c319025a ON |
934 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); |
935 | } | |
6b0c81b3 | 936 | rcu_read_unlock(); |
647f2bdf | 937 | |
aac45363 | 938 | if (can_oom_reap) |
36324a99 | 939 | wake_oom_reaper(victim); |
aac45363 | 940 | |
880b7689 | 941 | mmdrop(mm); |
6b0c81b3 | 942 | put_task_struct(victim); |
1da177e4 | 943 | } |
647f2bdf | 944 | #undef K |
1da177e4 | 945 | |
309ed882 DR |
946 | /* |
947 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
948 | */ | |
7c5f64f8 VD |
949 | static void check_panic_on_oom(struct oom_control *oc, |
950 | enum oom_constraint constraint) | |
309ed882 DR |
951 | { |
952 | if (likely(!sysctl_panic_on_oom)) | |
953 | return; | |
954 | if (sysctl_panic_on_oom != 2) { | |
955 | /* | |
956 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
957 | * does not panic for cpuset, mempolicy, or memcg allocation | |
958 | * failures. | |
959 | */ | |
960 | if (constraint != CONSTRAINT_NONE) | |
961 | return; | |
962 | } | |
071a4bef | 963 | /* Do not panic for oom kills triggered by sysrq */ |
db2a0dd7 | 964 | if (is_sysrq_oom(oc)) |
071a4bef | 965 | return; |
2a966b77 | 966 | dump_header(oc, NULL); |
309ed882 DR |
967 | panic("Out of memory: %s panic_on_oom is enabled\n", |
968 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
969 | } | |
970 | ||
8bc719d3 MS |
971 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
972 | ||
973 | int register_oom_notifier(struct notifier_block *nb) | |
974 | { | |
975 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
976 | } | |
977 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
978 | ||
979 | int unregister_oom_notifier(struct notifier_block *nb) | |
980 | { | |
981 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
982 | } | |
983 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
984 | ||
1da177e4 | 985 | /** |
6e0fc46d DR |
986 | * out_of_memory - kill the "best" process when we run out of memory |
987 | * @oc: pointer to struct oom_control | |
1da177e4 LT |
988 | * |
989 | * If we run out of memory, we have the choice between either | |
990 | * killing a random task (bad), letting the system crash (worse) | |
991 | * OR try to be smart about which process to kill. Note that we | |
992 | * don't have to be perfect here, we just have to be good. | |
993 | */ | |
6e0fc46d | 994 | bool out_of_memory(struct oom_control *oc) |
1da177e4 | 995 | { |
8bc719d3 | 996 | unsigned long freed = 0; |
e3658932 | 997 | enum oom_constraint constraint = CONSTRAINT_NONE; |
8bc719d3 | 998 | |
dc56401f JW |
999 | if (oom_killer_disabled) |
1000 | return false; | |
1001 | ||
7c5f64f8 VD |
1002 | if (!is_memcg_oom(oc)) { |
1003 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
1004 | if (freed > 0) | |
1005 | /* Got some memory back in the last second. */ | |
1006 | return true; | |
1007 | } | |
1da177e4 | 1008 | |
7b98c2e4 | 1009 | /* |
9ff4868e DR |
1010 | * If current has a pending SIGKILL or is exiting, then automatically |
1011 | * select it. The goal is to allow it to allocate so that it may | |
1012 | * quickly exit and free its memory. | |
7b98c2e4 | 1013 | */ |
091f362c | 1014 | if (task_will_free_mem(current)) { |
16e95196 | 1015 | mark_oom_victim(current); |
1af8bb43 | 1016 | wake_oom_reaper(current); |
75e8f8b2 | 1017 | return true; |
7b98c2e4 DR |
1018 | } |
1019 | ||
3da88fb3 MH |
1020 | /* |
1021 | * The OOM killer does not compensate for IO-less reclaim. | |
1022 | * pagefault_out_of_memory lost its gfp context so we have to | |
1023 | * make sure exclude 0 mask - all other users should have at least | |
1024 | * ___GFP_DIRECT_RECLAIM to get here. | |
1025 | */ | |
06ad276a | 1026 | if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) |
3da88fb3 MH |
1027 | return true; |
1028 | ||
9b0f8b04 CL |
1029 | /* |
1030 | * Check if there were limitations on the allocation (only relevant for | |
7c5f64f8 | 1031 | * NUMA and memcg) that may require different handling. |
9b0f8b04 | 1032 | */ |
7c5f64f8 | 1033 | constraint = constrained_alloc(oc); |
6e0fc46d DR |
1034 | if (constraint != CONSTRAINT_MEMORY_POLICY) |
1035 | oc->nodemask = NULL; | |
2a966b77 | 1036 | check_panic_on_oom(oc, constraint); |
0aad4b31 | 1037 | |
7c5f64f8 VD |
1038 | if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && |
1039 | current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && | |
121d1ba0 | 1040 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 1041 | get_task_struct(current); |
7c5f64f8 VD |
1042 | oc->chosen = current; |
1043 | oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); | |
75e8f8b2 | 1044 | return true; |
0aad4b31 DR |
1045 | } |
1046 | ||
7c5f64f8 | 1047 | select_bad_process(oc); |
0aad4b31 | 1048 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
7c5f64f8 | 1049 | if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { |
2a966b77 | 1050 | dump_header(oc, NULL); |
0aad4b31 DR |
1051 | panic("Out of memory and no killable processes...\n"); |
1052 | } | |
7c5f64f8 VD |
1053 | if (oc->chosen && oc->chosen != (void *)-1UL) { |
1054 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : | |
1055 | "Memory cgroup out of memory"); | |
75e8f8b2 DR |
1056 | /* |
1057 | * Give the killed process a good chance to exit before trying | |
1058 | * to allocate memory again. | |
1059 | */ | |
4f774b91 | 1060 | schedule_timeout_killable(1); |
75e8f8b2 | 1061 | } |
7c5f64f8 | 1062 | return !!oc->chosen; |
c32b3cbe MH |
1063 | } |
1064 | ||
e3658932 DR |
1065 | /* |
1066 | * The pagefault handler calls here because it is out of memory, so kill a | |
798fd756 VD |
1067 | * memory-hogging task. If oom_lock is held by somebody else, a parallel oom |
1068 | * killing is already in progress so do nothing. | |
e3658932 DR |
1069 | */ |
1070 | void pagefault_out_of_memory(void) | |
1071 | { | |
6e0fc46d DR |
1072 | struct oom_control oc = { |
1073 | .zonelist = NULL, | |
1074 | .nodemask = NULL, | |
2a966b77 | 1075 | .memcg = NULL, |
6e0fc46d DR |
1076 | .gfp_mask = 0, |
1077 | .order = 0, | |
6e0fc46d DR |
1078 | }; |
1079 | ||
49426420 | 1080 | if (mem_cgroup_oom_synchronize(true)) |
dc56401f | 1081 | return; |
3812c8c8 | 1082 | |
dc56401f JW |
1083 | if (!mutex_trylock(&oom_lock)) |
1084 | return; | |
a104808e | 1085 | out_of_memory(&oc); |
dc56401f | 1086 | mutex_unlock(&oom_lock); |
e3658932 | 1087 | } |