blob: d514eebad905e21ce0c9793031b9584f1f8328bd
1 | /* |
2 | * linux/mm/oom_kill.c |
3 | * |
4 | * Copyright (C) 1998,2000 Rik van Riel |
5 | * Thanks go out to Claus Fischer for some serious inspiration and |
6 | * for goading me into coding this file... |
7 | * Copyright (C) 2010 Google, Inc. |
8 | * Rewritten by David Rientjes |
9 | * |
10 | * The routines in this file are used to kill a process when |
11 | * we're seriously out of memory. This gets called from __alloc_pages() |
12 | * in mm/page_alloc.c when we really run out of memory. |
13 | * |
14 | * Since we won't call these routines often (on a well-configured |
15 | * machine) this file will double as a 'coding guide' and a signpost |
16 | * for newbie kernel hackers. It features several pointers to major |
17 | * kernel subsystems and hints as to where to find out what things do. |
18 | */ |
19 | |
20 | #include <linux/oom.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/err.h> |
23 | #include <linux/gfp.h> |
24 | #include <linux/sched.h> |
25 | #include <linux/swap.h> |
26 | #include <linux/timex.h> |
27 | #include <linux/jiffies.h> |
28 | #include <linux/cpuset.h> |
29 | #include <linux/export.h> |
30 | #include <linux/notifier.h> |
31 | #include <linux/memcontrol.h> |
32 | #include <linux/mempolicy.h> |
33 | #include <linux/security.h> |
34 | #include <linux/ptrace.h> |
35 | #include <linux/freezer.h> |
36 | #include <linux/ftrace.h> |
37 | #include <linux/ratelimit.h> |
38 | #include <linux/kthread.h> |
39 | #include <linux/init.h> |
40 | #include <linux/mmu_notifier.h> |
41 | |
42 | #include <asm/tlb.h> |
43 | #include "internal.h" |
44 | |
45 | #define CREATE_TRACE_POINTS |
46 | #include <trace/events/oom.h> |
47 | |
48 | int sysctl_panic_on_oom; |
49 | int sysctl_oom_kill_allocating_task; |
50 | int sysctl_oom_dump_tasks = 1; |
51 | |
52 | DEFINE_MUTEX(oom_lock); |
53 | |
54 | #ifdef CONFIG_NUMA |
55 | /** |
56 | * has_intersects_mems_allowed() - check task eligiblity for kill |
57 | * @start: task struct of which task to consider |
58 | * @mask: nodemask passed to page allocator for mempolicy ooms |
59 | * |
60 | * Task eligibility is determined by whether or not a candidate task, @tsk, |
61 | * shares the same mempolicy nodes as current if it is bound by such a policy |
62 | * and whether or not it has the same set of allowed cpuset nodes. |
63 | */ |
64 | static bool has_intersects_mems_allowed(struct task_struct *start, |
65 | const nodemask_t *mask) |
66 | { |
67 | struct task_struct *tsk; |
68 | bool ret = false; |
69 | |
70 | rcu_read_lock(); |
71 | for_each_thread(start, tsk) { |
72 | if (mask) { |
73 | /* |
74 | * If this is a mempolicy constrained oom, tsk's |
75 | * cpuset is irrelevant. Only return true if its |
76 | * mempolicy intersects current, otherwise it may be |
77 | * needlessly killed. |
78 | */ |
79 | ret = mempolicy_nodemask_intersects(tsk, mask); |
80 | } else { |
81 | /* |
82 | * This is not a mempolicy constrained oom, so only |
83 | * check the mems of tsk's cpuset. |
84 | */ |
85 | ret = cpuset_mems_allowed_intersects(current, tsk); |
86 | } |
87 | if (ret) |
88 | break; |
89 | } |
90 | rcu_read_unlock(); |
91 | |
92 | return ret; |
93 | } |
94 | #else |
95 | static bool has_intersects_mems_allowed(struct task_struct *tsk, |
96 | const nodemask_t *mask) |
97 | { |
98 | return true; |
99 | } |
100 | #endif /* CONFIG_NUMA */ |
101 | |
102 | /* |
103 | * The process p may have detached its own ->mm while exiting or through |
104 | * use_mm(), but one or more of its subthreads may still have a valid |
105 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
106 | * task_lock() held. |
107 | */ |
108 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
109 | { |
110 | struct task_struct *t; |
111 | |
112 | rcu_read_lock(); |
113 | |
114 | for_each_thread(p, t) { |
115 | task_lock(t); |
116 | if (likely(t->mm)) |
117 | goto found; |
118 | task_unlock(t); |
119 | } |
120 | t = NULL; |
121 | found: |
122 | rcu_read_unlock(); |
123 | |
124 | return t; |
125 | } |
126 | |
127 | /* |
128 | * order == -1 means the oom kill is required by sysrq, otherwise only |
129 | * for display purposes. |
130 | */ |
131 | static inline bool is_sysrq_oom(struct oom_control *oc) |
132 | { |
133 | return oc->order == -1; |
134 | } |
135 | |
136 | static inline bool is_memcg_oom(struct oom_control *oc) |
137 | { |
138 | return oc->memcg != NULL; |
139 | } |
140 | |
141 | /* return true if the task is not adequate as candidate victim task. */ |
142 | static bool oom_unkillable_task(struct task_struct *p, |
143 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
144 | { |
145 | if (is_global_init(p)) |
146 | return true; |
147 | if (p->flags & PF_KTHREAD) |
148 | return true; |
149 | |
150 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ |
151 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
152 | return true; |
153 | |
154 | /* p may not have freeable memory in nodemask */ |
155 | if (!has_intersects_mems_allowed(p, nodemask)) |
156 | return true; |
157 | |
158 | return false; |
159 | } |
160 | |
161 | /** |
162 | * oom_badness - heuristic function to determine which candidate task to kill |
163 | * @p: task struct of which task we should calculate |
164 | * @totalpages: total present RAM allowed for page allocation |
165 | * |
166 | * The heuristic for determining which task to kill is made to be as simple and |
167 | * predictable as possible. The goal is to return the highest value for the |
168 | * task consuming the most memory to avoid subsequent oom failures. |
169 | */ |
170 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
171 | const nodemask_t *nodemask, unsigned long totalpages) |
172 | { |
173 | long points; |
174 | long adj; |
175 | |
176 | if (oom_unkillable_task(p, memcg, nodemask)) |
177 | return 0; |
178 | |
179 | p = find_lock_task_mm(p); |
180 | if (!p) |
181 | return 0; |
182 | |
183 | /* |
184 | * Do not even consider tasks which are explicitly marked oom |
185 | * unkillable or have been already oom reaped or the are in |
186 | * the middle of vfork |
187 | */ |
188 | adj = (long)p->signal->oom_score_adj; |
189 | if (adj == OOM_SCORE_ADJ_MIN || |
190 | test_bit(MMF_OOM_SKIP, &p->mm->flags) || |
191 | in_vfork(p)) { |
192 | task_unlock(p); |
193 | return 0; |
194 | } |
195 | |
196 | /* |
197 | * The baseline for the badness score is the proportion of RAM that each |
198 | * task's rss, pagetable and swap space use. |
199 | */ |
200 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
201 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); |
202 | task_unlock(p); |
203 | |
204 | /* |
205 | * Root processes get 3% bonus, just like the __vm_enough_memory() |
206 | * implementation used by LSMs. |
207 | */ |
208 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
209 | points -= (points * 3) / 100; |
210 | |
211 | /* Normalize to oom_score_adj units */ |
212 | adj *= totalpages / 1000; |
213 | points += adj; |
214 | |
215 | /* |
216 | * Never return 0 for an eligible task regardless of the root bonus and |
217 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). |
218 | */ |
219 | return points > 0 ? points : 1; |
220 | } |
221 | |
222 | enum oom_constraint { |
223 | CONSTRAINT_NONE, |
224 | CONSTRAINT_CPUSET, |
225 | CONSTRAINT_MEMORY_POLICY, |
226 | CONSTRAINT_MEMCG, |
227 | }; |
228 | |
229 | /* |
230 | * Determine the type of allocation constraint. |
231 | */ |
232 | static enum oom_constraint constrained_alloc(struct oom_control *oc) |
233 | { |
234 | struct zone *zone; |
235 | struct zoneref *z; |
236 | enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); |
237 | bool cpuset_limited = false; |
238 | int nid; |
239 | |
240 | if (is_memcg_oom(oc)) { |
241 | oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; |
242 | return CONSTRAINT_MEMCG; |
243 | } |
244 | |
245 | /* Default to all available memory */ |
246 | oc->totalpages = totalram_pages + total_swap_pages; |
247 | |
248 | if (!IS_ENABLED(CONFIG_NUMA)) |
249 | return CONSTRAINT_NONE; |
250 | |
251 | if (!oc->zonelist) |
252 | return CONSTRAINT_NONE; |
253 | /* |
254 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid |
255 | * to kill current.We have to random task kill in this case. |
256 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. |
257 | */ |
258 | if (oc->gfp_mask & __GFP_THISNODE) |
259 | return CONSTRAINT_NONE; |
260 | |
261 | /* |
262 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
263 | * the page allocator means a mempolicy is in effect. Cpuset policy |
264 | * is enforced in get_page_from_freelist(). |
265 | */ |
266 | if (oc->nodemask && |
267 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { |
268 | oc->totalpages = total_swap_pages; |
269 | for_each_node_mask(nid, *oc->nodemask) |
270 | oc->totalpages += node_spanned_pages(nid); |
271 | return CONSTRAINT_MEMORY_POLICY; |
272 | } |
273 | |
274 | /* Check this allocation failure is caused by cpuset's wall function */ |
275 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, |
276 | high_zoneidx, oc->nodemask) |
277 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) |
278 | cpuset_limited = true; |
279 | |
280 | if (cpuset_limited) { |
281 | oc->totalpages = total_swap_pages; |
282 | for_each_node_mask(nid, cpuset_current_mems_allowed) |
283 | oc->totalpages += node_spanned_pages(nid); |
284 | return CONSTRAINT_CPUSET; |
285 | } |
286 | return CONSTRAINT_NONE; |
287 | } |
288 | |
289 | static int oom_evaluate_task(struct task_struct *task, void *arg) |
290 | { |
291 | struct oom_control *oc = arg; |
292 | unsigned long points; |
293 | |
294 | if (oom_unkillable_task(task, NULL, oc->nodemask)) |
295 | goto next; |
296 | |
297 | /* |
298 | * This task already has access to memory reserves and is being killed. |
299 | * Don't allow any other task to have access to the reserves unless |
300 | * the task has MMF_OOM_SKIP because chances that it would release |
301 | * any memory is quite low. |
302 | */ |
303 | if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { |
304 | if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) |
305 | goto next; |
306 | goto abort; |
307 | } |
308 | |
309 | /* |
310 | * If task is allocating a lot of memory and has been marked to be |
311 | * killed first if it triggers an oom, then select it. |
312 | */ |
313 | if (oom_task_origin(task)) { |
314 | points = ULONG_MAX; |
315 | goto select; |
316 | } |
317 | |
318 | points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); |
319 | if (!points || points < oc->chosen_points) |
320 | goto next; |
321 | |
322 | /* Prefer thread group leaders for display purposes */ |
323 | if (points == oc->chosen_points && thread_group_leader(oc->chosen)) |
324 | goto next; |
325 | select: |
326 | if (oc->chosen) |
327 | put_task_struct(oc->chosen); |
328 | get_task_struct(task); |
329 | oc->chosen = task; |
330 | oc->chosen_points = points; |
331 | next: |
332 | return 0; |
333 | abort: |
334 | if (oc->chosen) |
335 | put_task_struct(oc->chosen); |
336 | oc->chosen = (void *)-1UL; |
337 | return 1; |
338 | } |
339 | |
340 | /* |
341 | * Simple selection loop. We choose the process with the highest number of |
342 | * 'points'. In case scan was aborted, oc->chosen is set to -1. |
343 | */ |
344 | static void select_bad_process(struct oom_control *oc) |
345 | { |
346 | if (is_memcg_oom(oc)) |
347 | mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); |
348 | else { |
349 | struct task_struct *p; |
350 | |
351 | rcu_read_lock(); |
352 | for_each_process(p) |
353 | if (oom_evaluate_task(p, oc)) |
354 | break; |
355 | rcu_read_unlock(); |
356 | } |
357 | |
358 | oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; |
359 | } |
360 | |
361 | /** |
362 | * dump_tasks - dump current memory state of all system tasks |
363 | * @memcg: current's memory controller, if constrained |
364 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
365 | * |
366 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
367 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes |
368 | * are not shown. |
369 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, |
370 | * swapents, oom_score_adj value, and name. |
371 | */ |
372 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) |
373 | { |
374 | struct task_struct *p; |
375 | struct task_struct *task; |
376 | |
377 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); |
378 | rcu_read_lock(); |
379 | for_each_process(p) { |
380 | if (oom_unkillable_task(p, memcg, nodemask)) |
381 | continue; |
382 | |
383 | task = find_lock_task_mm(p); |
384 | if (!task) { |
385 | /* |
386 | * This is a kthread or all of p's threads have already |
387 | * detached their mm's. There's no need to report |
388 | * them; they can't be oom killed anyway. |
389 | */ |
390 | continue; |
391 | } |
392 | |
393 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", |
394 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
395 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), |
396 | atomic_long_read(&task->mm->nr_ptes), |
397 | mm_nr_pmds(task->mm), |
398 | get_mm_counter(task->mm, MM_SWAPENTS), |
399 | task->signal->oom_score_adj, task->comm); |
400 | task_unlock(task); |
401 | } |
402 | rcu_read_unlock(); |
403 | } |
404 | |
405 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
406 | { |
407 | nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed; |
408 | |
409 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", |
410 | current->comm, oc->gfp_mask, &oc->gfp_mask, |
411 | nodemask_pr_args(nm), oc->order, |
412 | current->signal->oom_score_adj); |
413 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
414 | pr_warn("COMPACTION is disabled!!!\n"); |
415 | |
416 | cpuset_print_current_mems_allowed(); |
417 | dump_stack(); |
418 | if (oc->memcg) |
419 | mem_cgroup_print_oom_info(oc->memcg, p); |
420 | else |
421 | show_mem(SHOW_MEM_FILTER_NODES); |
422 | if (sysctl_oom_dump_tasks) |
423 | dump_tasks(oc->memcg, oc->nodemask); |
424 | } |
425 | |
426 | /* |
427 | * Number of OOM victims in flight |
428 | */ |
429 | static atomic_t oom_victims = ATOMIC_INIT(0); |
430 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); |
431 | |
432 | static bool oom_killer_disabled __read_mostly; |
433 | |
434 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
435 | |
436 | /* |
437 | * task->mm can be NULL if the task is the exited group leader. So to |
438 | * determine whether the task is using a particular mm, we examine all the |
439 | * task's threads: if one of those is using this mm then this task was also |
440 | * using it. |
441 | */ |
442 | bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) |
443 | { |
444 | struct task_struct *t; |
445 | |
446 | for_each_thread(p, t) { |
447 | struct mm_struct *t_mm = READ_ONCE(t->mm); |
448 | if (t_mm) |
449 | return t_mm == mm; |
450 | } |
451 | return false; |
452 | } |
453 | |
454 | |
455 | #ifdef CONFIG_MMU |
456 | /* |
457 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM |
458 | * victim (if that is possible) to help the OOM killer to move on. |
459 | */ |
460 | static struct task_struct *oom_reaper_th; |
461 | static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); |
462 | static struct task_struct *oom_reaper_list; |
463 | static DEFINE_SPINLOCK(oom_reaper_lock); |
464 | |
465 | static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
466 | { |
467 | struct mmu_gather tlb; |
468 | struct vm_area_struct *vma; |
469 | struct zap_details details = {.check_swap_entries = true, |
470 | .ignore_dirty = true}; |
471 | bool ret = true; |
472 | |
473 | /* |
474 | * We have to make sure to not race with the victim exit path |
475 | * and cause premature new oom victim selection: |
476 | * __oom_reap_task_mm exit_mm |
477 | * mmget_not_zero |
478 | * mmput |
479 | * atomic_dec_and_test |
480 | * exit_oom_victim |
481 | * [...] |
482 | * out_of_memory |
483 | * select_bad_process |
484 | * # no TIF_MEMDIE task selects new victim |
485 | * unmap_page_range # frees some memory |
486 | */ |
487 | mutex_lock(&oom_lock); |
488 | |
489 | if (!down_read_trylock(&mm->mmap_sem)) { |
490 | ret = false; |
491 | goto unlock_oom; |
492 | } |
493 | |
494 | /* |
495 | * If the mm has notifiers then we would need to invalidate them around |
496 | * unmap_page_range and that is risky because notifiers can sleep and |
497 | * what they do is basically undeterministic. So let's have a short |
498 | * sleep to give the oom victim some more time. |
499 | * TODO: we really want to get rid of this ugly hack and make sure that |
500 | * notifiers cannot block for unbounded amount of time and add |
501 | * mmu_notifier_invalidate_range_{start,end} around unmap_page_range |
502 | */ |
503 | if (mm_has_notifiers(mm)) { |
504 | up_read(&mm->mmap_sem); |
505 | schedule_timeout_idle(HZ); |
506 | goto unlock_oom; |
507 | } |
508 | |
509 | /* |
510 | * increase mm_users only after we know we will reap something so |
511 | * that the mmput_async is called only when we have reaped something |
512 | * and delayed __mmput doesn't matter that much |
513 | */ |
514 | if (!mmget_not_zero(mm)) { |
515 | up_read(&mm->mmap_sem); |
516 | goto unlock_oom; |
517 | } |
518 | |
519 | /* |
520 | * Tell all users of get_user/copy_from_user etc... that the content |
521 | * is no longer stable. No barriers really needed because unmapping |
522 | * should imply barriers already and the reader would hit a page fault |
523 | * if it stumbled over a reaped memory. |
524 | */ |
525 | set_bit(MMF_UNSTABLE, &mm->flags); |
526 | |
527 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { |
528 | if (is_vm_hugetlb_page(vma)) |
529 | continue; |
530 | |
531 | /* |
532 | * mlocked VMAs require explicit munlocking before unmap. |
533 | * Let's keep it simple here and skip such VMAs. |
534 | */ |
535 | if (vma->vm_flags & VM_LOCKED) |
536 | continue; |
537 | |
538 | /* |
539 | * Only anonymous pages have a good chance to be dropped |
540 | * without additional steps which we cannot afford as we |
541 | * are OOM already. |
542 | * |
543 | * We do not even care about fs backed pages because all |
544 | * which are reclaimable have already been reclaimed and |
545 | * we do not want to block exit_mmap by keeping mm ref |
546 | * count elevated without a good reason. |
547 | */ |
548 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { |
549 | tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); |
550 | unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, |
551 | &details); |
552 | tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); |
553 | } |
554 | } |
555 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
556 | task_pid_nr(tsk), tsk->comm, |
557 | K(get_mm_counter(mm, MM_ANONPAGES)), |
558 | K(get_mm_counter(mm, MM_FILEPAGES)), |
559 | K(get_mm_counter(mm, MM_SHMEMPAGES))); |
560 | up_read(&mm->mmap_sem); |
561 | |
562 | /* |
563 | * Drop our reference but make sure the mmput slow path is called from a |
564 | * different context because we shouldn't risk we get stuck there and |
565 | * put the oom_reaper out of the way. |
566 | */ |
567 | mmput_async(mm); |
568 | unlock_oom: |
569 | mutex_unlock(&oom_lock); |
570 | return ret; |
571 | } |
572 | |
573 | #define MAX_OOM_REAP_RETRIES 10 |
574 | static void oom_reap_task(struct task_struct *tsk) |
575 | { |
576 | int attempts = 0; |
577 | struct mm_struct *mm = tsk->signal->oom_mm; |
578 | |
579 | /* Retry the down_read_trylock(mmap_sem) a few times */ |
580 | while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) |
581 | schedule_timeout_idle(HZ/10); |
582 | |
583 | if (attempts <= MAX_OOM_REAP_RETRIES) |
584 | goto done; |
585 | |
586 | |
587 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", |
588 | task_pid_nr(tsk), tsk->comm); |
589 | debug_show_all_locks(); |
590 | |
591 | done: |
592 | tsk->oom_reaper_list = NULL; |
593 | |
594 | /* |
595 | * Hide this mm from OOM killer because it has been either reaped or |
596 | * somebody can't call up_write(mmap_sem). |
597 | */ |
598 | set_bit(MMF_OOM_SKIP, &mm->flags); |
599 | |
600 | /* Drop a reference taken by wake_oom_reaper */ |
601 | put_task_struct(tsk); |
602 | } |
603 | |
604 | static int oom_reaper(void *unused) |
605 | { |
606 | while (true) { |
607 | struct task_struct *tsk = NULL; |
608 | |
609 | wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); |
610 | spin_lock(&oom_reaper_lock); |
611 | if (oom_reaper_list != NULL) { |
612 | tsk = oom_reaper_list; |
613 | oom_reaper_list = tsk->oom_reaper_list; |
614 | } |
615 | spin_unlock(&oom_reaper_lock); |
616 | |
617 | if (tsk) |
618 | oom_reap_task(tsk); |
619 | } |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | static void wake_oom_reaper(struct task_struct *tsk) |
625 | { |
626 | if (!oom_reaper_th) |
627 | return; |
628 | |
629 | /* mm is already queued? */ |
630 | if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) |
631 | return; |
632 | |
633 | get_task_struct(tsk); |
634 | |
635 | spin_lock(&oom_reaper_lock); |
636 | tsk->oom_reaper_list = oom_reaper_list; |
637 | oom_reaper_list = tsk; |
638 | spin_unlock(&oom_reaper_lock); |
639 | wake_up(&oom_reaper_wait); |
640 | } |
641 | |
642 | static int __init oom_init(void) |
643 | { |
644 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); |
645 | if (IS_ERR(oom_reaper_th)) { |
646 | pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", |
647 | PTR_ERR(oom_reaper_th)); |
648 | oom_reaper_th = NULL; |
649 | } |
650 | return 0; |
651 | } |
652 | subsys_initcall(oom_init) |
653 | #else |
654 | static inline void wake_oom_reaper(struct task_struct *tsk) |
655 | { |
656 | } |
657 | #endif /* CONFIG_MMU */ |
658 | |
659 | /** |
660 | * mark_oom_victim - mark the given task as OOM victim |
661 | * @tsk: task to mark |
662 | * |
663 | * Has to be called with oom_lock held and never after |
664 | * oom has been disabled already. |
665 | * |
666 | * tsk->mm has to be non NULL and caller has to guarantee it is stable (either |
667 | * under task_lock or operate on the current). |
668 | */ |
669 | static void mark_oom_victim(struct task_struct *tsk) |
670 | { |
671 | struct mm_struct *mm = tsk->mm; |
672 | |
673 | WARN_ON(oom_killer_disabled); |
674 | /* OOM killer might race with memcg OOM */ |
675 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) |
676 | return; |
677 | |
678 | /* oom_mm is bound to the signal struct life time. */ |
679 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) |
680 | atomic_inc(&tsk->signal->oom_mm->mm_count); |
681 | |
682 | /* |
683 | * Make sure that the task is woken up from uninterruptible sleep |
684 | * if it is frozen because OOM killer wouldn't be able to free |
685 | * any memory and livelock. freezing_slow_path will tell the freezer |
686 | * that TIF_MEMDIE tasks should be ignored. |
687 | */ |
688 | __thaw_task(tsk); |
689 | atomic_inc(&oom_victims); |
690 | } |
691 | |
692 | /** |
693 | * exit_oom_victim - note the exit of an OOM victim |
694 | */ |
695 | void exit_oom_victim(void) |
696 | { |
697 | clear_thread_flag(TIF_MEMDIE); |
698 | |
699 | if (!atomic_dec_return(&oom_victims)) |
700 | wake_up_all(&oom_victims_wait); |
701 | } |
702 | |
703 | /** |
704 | * oom_killer_enable - enable OOM killer |
705 | */ |
706 | void oom_killer_enable(void) |
707 | { |
708 | oom_killer_disabled = false; |
709 | } |
710 | |
711 | /** |
712 | * oom_killer_disable - disable OOM killer |
713 | * @timeout: maximum timeout to wait for oom victims in jiffies |
714 | * |
715 | * Forces all page allocations to fail rather than trigger OOM killer. |
716 | * Will block and wait until all OOM victims are killed or the given |
717 | * timeout expires. |
718 | * |
719 | * The function cannot be called when there are runnable user tasks because |
720 | * the userspace would see unexpected allocation failures as a result. Any |
721 | * new usage of this function should be consulted with MM people. |
722 | * |
723 | * Returns true if successful and false if the OOM killer cannot be |
724 | * disabled. |
725 | */ |
726 | bool oom_killer_disable(signed long timeout) |
727 | { |
728 | signed long ret; |
729 | |
730 | /* |
731 | * Make sure to not race with an ongoing OOM killer. Check that the |
732 | * current is not killed (possibly due to sharing the victim's memory). |
733 | */ |
734 | if (mutex_lock_killable(&oom_lock)) |
735 | return false; |
736 | oom_killer_disabled = true; |
737 | mutex_unlock(&oom_lock); |
738 | |
739 | ret = wait_event_interruptible_timeout(oom_victims_wait, |
740 | !atomic_read(&oom_victims), timeout); |
741 | if (ret <= 0) { |
742 | oom_killer_enable(); |
743 | return false; |
744 | } |
745 | |
746 | return true; |
747 | } |
748 | |
749 | static inline bool __task_will_free_mem(struct task_struct *task) |
750 | { |
751 | struct signal_struct *sig = task->signal; |
752 | |
753 | /* |
754 | * A coredumping process may sleep for an extended period in exit_mm(), |
755 | * so the oom killer cannot assume that the process will promptly exit |
756 | * and release memory. |
757 | */ |
758 | if (sig->flags & SIGNAL_GROUP_COREDUMP) |
759 | return false; |
760 | |
761 | if (sig->flags & SIGNAL_GROUP_EXIT) |
762 | return true; |
763 | |
764 | if (thread_group_empty(task) && (task->flags & PF_EXITING)) |
765 | return true; |
766 | |
767 | return false; |
768 | } |
769 | |
770 | /* |
771 | * Checks whether the given task is dying or exiting and likely to |
772 | * release its address space. This means that all threads and processes |
773 | * sharing the same mm have to be killed or exiting. |
774 | * Caller has to make sure that task->mm is stable (hold task_lock or |
775 | * it operates on the current). |
776 | */ |
777 | static bool task_will_free_mem(struct task_struct *task) |
778 | { |
779 | struct mm_struct *mm = task->mm; |
780 | struct task_struct *p; |
781 | bool ret = true; |
782 | |
783 | /* |
784 | * Skip tasks without mm because it might have passed its exit_mm and |
785 | * exit_oom_victim. oom_reaper could have rescued that but do not rely |
786 | * on that for now. We can consider find_lock_task_mm in future. |
787 | */ |
788 | if (!mm) |
789 | return false; |
790 | |
791 | if (!__task_will_free_mem(task)) |
792 | return false; |
793 | |
794 | /* |
795 | * This task has already been drained by the oom reaper so there are |
796 | * only small chances it will free some more |
797 | */ |
798 | if (test_bit(MMF_OOM_SKIP, &mm->flags)) |
799 | return false; |
800 | |
801 | if (atomic_read(&mm->mm_users) <= 1) |
802 | return true; |
803 | |
804 | /* |
805 | * Make sure that all tasks which share the mm with the given tasks |
806 | * are dying as well to make sure that a) nobody pins its mm and |
807 | * b) the task is also reapable by the oom reaper. |
808 | */ |
809 | rcu_read_lock(); |
810 | for_each_process(p) { |
811 | if (!process_shares_mm(p, mm)) |
812 | continue; |
813 | if (same_thread_group(task, p)) |
814 | continue; |
815 | ret = __task_will_free_mem(p); |
816 | if (!ret) |
817 | break; |
818 | } |
819 | rcu_read_unlock(); |
820 | |
821 | return ret; |
822 | } |
823 | |
824 | static void oom_kill_process(struct oom_control *oc, const char *message) |
825 | { |
826 | struct task_struct *p = oc->chosen; |
827 | unsigned int points = oc->chosen_points; |
828 | struct task_struct *victim = p; |
829 | struct task_struct *child; |
830 | struct task_struct *t; |
831 | struct mm_struct *mm; |
832 | unsigned int victim_points = 0; |
833 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
834 | DEFAULT_RATELIMIT_BURST); |
835 | bool can_oom_reap = true; |
836 | |
837 | /* |
838 | * If the task is already exiting, don't alarm the sysadmin or kill |
839 | * its children or threads, just set TIF_MEMDIE so it can die quickly |
840 | */ |
841 | task_lock(p); |
842 | if (task_will_free_mem(p)) { |
843 | mark_oom_victim(p); |
844 | wake_oom_reaper(p); |
845 | task_unlock(p); |
846 | put_task_struct(p); |
847 | return; |
848 | } |
849 | task_unlock(p); |
850 | |
851 | if (__ratelimit(&oom_rs)) |
852 | dump_header(oc, p); |
853 | |
854 | pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", |
855 | message, task_pid_nr(p), p->comm, points); |
856 | |
857 | /* |
858 | * If any of p's children has a different mm and is eligible for kill, |
859 | * the one with the highest oom_badness() score is sacrificed for its |
860 | * parent. This attempts to lose the minimal amount of work done while |
861 | * still freeing memory. |
862 | */ |
863 | read_lock(&tasklist_lock); |
864 | |
865 | /* |
866 | * The task 'p' might have already exited before reaching here. The |
867 | * put_task_struct() will free task_struct 'p' while the loop still try |
868 | * to access the field of 'p', so, get an extra reference. |
869 | */ |
870 | get_task_struct(p); |
871 | for_each_thread(p, t) { |
872 | list_for_each_entry(child, &t->children, sibling) { |
873 | unsigned int child_points; |
874 | |
875 | if (process_shares_mm(child, p->mm)) |
876 | continue; |
877 | /* |
878 | * oom_badness() returns 0 if the thread is unkillable |
879 | */ |
880 | child_points = oom_badness(child, |
881 | oc->memcg, oc->nodemask, oc->totalpages); |
882 | if (child_points > victim_points) { |
883 | put_task_struct(victim); |
884 | victim = child; |
885 | victim_points = child_points; |
886 | get_task_struct(victim); |
887 | } |
888 | } |
889 | } |
890 | put_task_struct(p); |
891 | read_unlock(&tasklist_lock); |
892 | |
893 | p = find_lock_task_mm(victim); |
894 | if (!p) { |
895 | put_task_struct(victim); |
896 | return; |
897 | } else if (victim != p) { |
898 | get_task_struct(p); |
899 | put_task_struct(victim); |
900 | victim = p; |
901 | } |
902 | |
903 | /* Get a reference to safely compare mm after task_unlock(victim) */ |
904 | mm = victim->mm; |
905 | atomic_inc(&mm->mm_count); |
906 | /* |
907 | * We should send SIGKILL before setting TIF_MEMDIE in order to prevent |
908 | * the OOM victim from depleting the memory reserves from the user |
909 | * space under its control. |
910 | */ |
911 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); |
912 | mark_oom_victim(victim); |
913 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
914 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), |
915 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), |
916 | K(get_mm_counter(victim->mm, MM_FILEPAGES)), |
917 | K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); |
918 | task_unlock(victim); |
919 | |
920 | /* |
921 | * Kill all user processes sharing victim->mm in other thread groups, if |
922 | * any. They don't get access to memory reserves, though, to avoid |
923 | * depletion of all memory. This prevents mm->mmap_sem livelock when an |
924 | * oom killed thread cannot exit because it requires the semaphore and |
925 | * its contended by another thread trying to allocate memory itself. |
926 | * That thread will now get access to memory reserves since it has a |
927 | * pending fatal signal. |
928 | */ |
929 | rcu_read_lock(); |
930 | for_each_process(p) { |
931 | if (!process_shares_mm(p, mm)) |
932 | continue; |
933 | if (same_thread_group(p, victim)) |
934 | continue; |
935 | if (is_global_init(p)) { |
936 | can_oom_reap = false; |
937 | set_bit(MMF_OOM_SKIP, &mm->flags); |
938 | pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", |
939 | task_pid_nr(victim), victim->comm, |
940 | task_pid_nr(p), p->comm); |
941 | continue; |
942 | } |
943 | /* |
944 | * No use_mm() user needs to read from the userspace so we are |
945 | * ok to reap it. |
946 | */ |
947 | if (unlikely(p->flags & PF_KTHREAD)) |
948 | continue; |
949 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); |
950 | } |
951 | rcu_read_unlock(); |
952 | |
953 | if (can_oom_reap) |
954 | wake_oom_reaper(victim); |
955 | |
956 | mmdrop(mm); |
957 | put_task_struct(victim); |
958 | } |
959 | #undef K |
960 | |
961 | /* |
962 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. |
963 | */ |
964 | static void check_panic_on_oom(struct oom_control *oc, |
965 | enum oom_constraint constraint) |
966 | { |
967 | if (likely(!sysctl_panic_on_oom)) |
968 | return; |
969 | if (sysctl_panic_on_oom != 2) { |
970 | /* |
971 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel |
972 | * does not panic for cpuset, mempolicy, or memcg allocation |
973 | * failures. |
974 | */ |
975 | if (constraint != CONSTRAINT_NONE) |
976 | return; |
977 | } |
978 | /* Do not panic for oom kills triggered by sysrq */ |
979 | if (is_sysrq_oom(oc)) |
980 | return; |
981 | dump_header(oc, NULL); |
982 | panic("Out of memory: %s panic_on_oom is enabled\n", |
983 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); |
984 | } |
985 | |
986 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
987 | |
988 | int register_oom_notifier(struct notifier_block *nb) |
989 | { |
990 | return blocking_notifier_chain_register(&oom_notify_list, nb); |
991 | } |
992 | EXPORT_SYMBOL_GPL(register_oom_notifier); |
993 | |
994 | int unregister_oom_notifier(struct notifier_block *nb) |
995 | { |
996 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); |
997 | } |
998 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); |
999 | |
1000 | /** |
1001 | * out_of_memory - kill the "best" process when we run out of memory |
1002 | * @oc: pointer to struct oom_control |
1003 | * |
1004 | * If we run out of memory, we have the choice between either |
1005 | * killing a random task (bad), letting the system crash (worse) |
1006 | * OR try to be smart about which process to kill. Note that we |
1007 | * don't have to be perfect here, we just have to be good. |
1008 | */ |
1009 | bool out_of_memory(struct oom_control *oc) |
1010 | { |
1011 | unsigned long freed = 0; |
1012 | enum oom_constraint constraint = CONSTRAINT_NONE; |
1013 | |
1014 | if (oom_killer_disabled) |
1015 | return false; |
1016 | |
1017 | if (!is_memcg_oom(oc)) { |
1018 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
1019 | if (freed > 0) |
1020 | /* Got some memory back in the last second. */ |
1021 | return true; |
1022 | } |
1023 | |
1024 | /* |
1025 | * If current has a pending SIGKILL or is exiting, then automatically |
1026 | * select it. The goal is to allow it to allocate so that it may |
1027 | * quickly exit and free its memory. |
1028 | */ |
1029 | if (task_will_free_mem(current)) { |
1030 | mark_oom_victim(current); |
1031 | wake_oom_reaper(current); |
1032 | return true; |
1033 | } |
1034 | |
1035 | /* |
1036 | * The OOM killer does not compensate for IO-less reclaim. |
1037 | * pagefault_out_of_memory lost its gfp context so we have to |
1038 | * make sure exclude 0 mask - all other users should have at least |
1039 | * ___GFP_DIRECT_RECLAIM to get here. |
1040 | */ |
1041 | if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL))) |
1042 | return true; |
1043 | |
1044 | /* |
1045 | * Check if there were limitations on the allocation (only relevant for |
1046 | * NUMA and memcg) that may require different handling. |
1047 | */ |
1048 | constraint = constrained_alloc(oc); |
1049 | if (constraint != CONSTRAINT_MEMORY_POLICY) |
1050 | oc->nodemask = NULL; |
1051 | check_panic_on_oom(oc, constraint); |
1052 | |
1053 | if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && |
1054 | current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && |
1055 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
1056 | get_task_struct(current); |
1057 | oc->chosen = current; |
1058 | oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); |
1059 | return true; |
1060 | } |
1061 | |
1062 | select_bad_process(oc); |
1063 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
1064 | if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { |
1065 | dump_header(oc, NULL); |
1066 | panic("Out of memory and no killable processes...\n"); |
1067 | } |
1068 | if (oc->chosen && oc->chosen != (void *)-1UL) { |
1069 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : |
1070 | "Memory cgroup out of memory"); |
1071 | /* |
1072 | * Give the killed process a good chance to exit before trying |
1073 | * to allocate memory again. |
1074 | */ |
1075 | schedule_timeout_killable(1); |
1076 | } |
1077 | return !!oc->chosen; |
1078 | } |
1079 | |
1080 | /* |
1081 | * The pagefault handler calls here because it is out of memory, so kill a |
1082 | * memory-hogging task. If oom_lock is held by somebody else, a parallel oom |
1083 | * killing is already in progress so do nothing. |
1084 | */ |
1085 | void pagefault_out_of_memory(void) |
1086 | { |
1087 | struct oom_control oc = { |
1088 | .zonelist = NULL, |
1089 | .nodemask = NULL, |
1090 | .memcg = NULL, |
1091 | .gfp_mask = 0, |
1092 | .order = 0, |
1093 | }; |
1094 | |
1095 | if (mem_cgroup_oom_synchronize(true)) |
1096 | return; |
1097 | |
1098 | if (!mutex_trylock(&oom_lock)) |
1099 | return; |
1100 | out_of_memory(&oc); |
1101 | mutex_unlock(&oom_lock); |
1102 | } |
1103 |