blob: ec9f11d4f094467e98b8190b7195379174d6be0a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
David Rientjesa63d83f2010-08-09 17:19:46 -07007 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * The routines in this file are used to kill a process when
Paul Jacksona49335c2005-09-06 15:18:09 -070011 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
Alexey Dobriyan8ac773b2006-10-19 23:28:32 -070020#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040022#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/timex.h>
27#include <linux/jiffies.h>
Paul Jacksonef08e3b2005-09-06 15:18:13 -070028#include <linux/cpuset.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040029#include <linux/export.h>
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -070030#include <linux/notifier.h>
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080031#include <linux/memcontrol.h>
David Rientjes6f48d0eb2010-08-09 17:18:52 -070032#include <linux/mempolicy.h>
David Howells5cd9c582008-08-14 11:37:28 +010033#include <linux/security.h>
David Rientjesedd45542011-03-22 16:30:12 -070034#include <linux/ptrace.h>
David Rientjesf660daa2011-10-31 17:07:07 -070035#include <linux/freezer.h>
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -080036#include <linux/ftrace.h>
David Rientjesdc3f21e2012-03-21 16:33:47 -070037#include <linux/ratelimit.h>
Michal Hockoaac45362016-03-25 14:20:24 -070038#include <linux/kthread.h>
39#include <linux/init.h>
40
41#include <asm/tlb.h>
42#include "internal.h"
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -080043
44#define CREATE_TRACE_POINTS
45#include <trace/events/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
KAMEZAWA Hiroyukifadd8fb2006-06-23 02:03:13 -070047int sysctl_panic_on_oom;
David Rientjesfe071d72007-10-16 23:25:56 -070048int sysctl_oom_kill_allocating_task;
David Rientjesad915c42010-08-09 17:18:53 -070049int sysctl_oom_dump_tasks = 1;
Johannes Weinerdc564012015-06-24 16:57:19 -070050
51DEFINE_MUTEX(oom_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
David Rientjes6f48d0eb2010-08-09 17:18:52 -070053#ifdef CONFIG_NUMA
54/**
55 * has_intersects_mems_allowed() - check task eligiblity for kill
Oleg Nesterovad962442014-01-21 15:50:00 -080056 * @start: task struct of which task to consider
David Rientjes6f48d0eb2010-08-09 17:18:52 -070057 * @mask: nodemask passed to page allocator for mempolicy ooms
58 *
59 * Task eligibility is determined by whether or not a candidate task, @tsk,
60 * shares the same mempolicy nodes as current if it is bound by such a policy
61 * and whether or not it has the same set of allowed cpuset nodes.
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070062 */
Oleg Nesterovad962442014-01-21 15:50:00 -080063static bool has_intersects_mems_allowed(struct task_struct *start,
David Rientjes6f48d0eb2010-08-09 17:18:52 -070064 const nodemask_t *mask)
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070065{
Oleg Nesterovad962442014-01-21 15:50:00 -080066 struct task_struct *tsk;
67 bool ret = false;
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070068
Oleg Nesterovad962442014-01-21 15:50:00 -080069 rcu_read_lock();
Oleg Nesterov1da4db02014-01-21 15:49:58 -080070 for_each_thread(start, tsk) {
David Rientjes6f48d0eb2010-08-09 17:18:52 -070071 if (mask) {
72 /*
73 * If this is a mempolicy constrained oom, tsk's
74 * cpuset is irrelevant. Only return true if its
75 * mempolicy intersects current, otherwise it may be
76 * needlessly killed.
77 */
Oleg Nesterovad962442014-01-21 15:50:00 -080078 ret = mempolicy_nodemask_intersects(tsk, mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -070079 } else {
80 /*
81 * This is not a mempolicy constrained oom, so only
82 * check the mems of tsk's cpuset.
83 */
Oleg Nesterovad962442014-01-21 15:50:00 -080084 ret = cpuset_mems_allowed_intersects(current, tsk);
David Rientjes6f48d0eb2010-08-09 17:18:52 -070085 }
Oleg Nesterovad962442014-01-21 15:50:00 -080086 if (ret)
87 break;
Oleg Nesterov1da4db02014-01-21 15:49:58 -080088 }
Oleg Nesterovad962442014-01-21 15:50:00 -080089 rcu_read_unlock();
KOSAKI Motohirodf1090a2010-08-09 17:19:39 -070090
Oleg Nesterovad962442014-01-21 15:50:00 -080091 return ret;
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070092}
David Rientjes6f48d0eb2010-08-09 17:18:52 -070093#else
94static bool has_intersects_mems_allowed(struct task_struct *tsk,
95 const nodemask_t *mask)
96{
97 return true;
98}
99#endif /* CONFIG_NUMA */
KOSAKI Motohiro495789a2009-09-21 17:03:14 -0700100
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700101/*
102 * The process p may have detached its own ->mm while exiting or through
103 * use_mm(), but one or more of its subthreads may still have a valid
104 * pointer. Return p, or any of its subthreads with a valid ->mm, with
105 * task_lock() held.
106 */
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700107struct task_struct *find_lock_task_mm(struct task_struct *p)
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700108{
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800109 struct task_struct *t;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700110
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800111 rcu_read_lock();
112
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800113 for_each_thread(p, t) {
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700114 task_lock(t);
115 if (likely(t->mm))
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800116 goto found;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700117 task_unlock(t);
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800118 }
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800119 t = NULL;
120found:
121 rcu_read_unlock();
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700122
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800123 return t;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700124}
125
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800126/*
127 * order == -1 means the oom kill is required by sysrq, otherwise only
128 * for display purposes.
129 */
130static inline bool is_sysrq_oom(struct oom_control *oc)
131{
132 return oc->order == -1;
133}
134
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700135static inline bool is_memcg_oom(struct oom_control *oc)
136{
137 return oc->memcg != NULL;
138}
139
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700140/* return true if the task is not adequate as candidate victim task. */
David Rientjese85bfd32010-09-22 13:05:10 -0700141static bool oom_unkillable_task(struct task_struct *p,
Johannes Weiner2314b422014-12-10 15:44:33 -0800142 struct mem_cgroup *memcg, const nodemask_t *nodemask)
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700143{
144 if (is_global_init(p))
145 return true;
146 if (p->flags & PF_KTHREAD)
147 return true;
148
149 /* When mem_cgroup_out_of_memory() and p is not member of the group */
Johannes Weiner72835c82012-01-12 17:18:32 -0800150 if (memcg && !task_in_mem_cgroup(p, memcg))
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700151 return true;
152
153 /* p may not have freeable memory in nodemask */
154 if (!has_intersects_mems_allowed(p, nodemask))
155 return true;
156
157 return false;
158}
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160/**
David Rientjesa63d83f2010-08-09 17:19:46 -0700161 * oom_badness - heuristic function to determine which candidate task to kill
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * @p: task struct of which task we should calculate
David Rientjesa63d83f2010-08-09 17:19:46 -0700163 * @totalpages: total present RAM allowed for page allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 *
David Rientjesa63d83f2010-08-09 17:19:46 -0700165 * The heuristic for determining which task to kill is made to be as simple and
166 * predictable as possible. The goal is to return the highest value for the
167 * task consuming the most memory to avoid subsequent oom failures.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 */
David Rientjesa7f638f2012-05-29 15:06:47 -0700169unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
170 const nodemask_t *nodemask, unsigned long totalpages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
David Rientjes1e11ad82012-06-08 13:21:26 -0700172 long points;
David Rientjes61eafb02012-06-20 12:52:58 -0700173 long adj;
KOSAKI Motohiro28b83c52009-09-21 17:03:13 -0700174
Johannes Weiner72835c82012-01-12 17:18:32 -0800175 if (oom_unkillable_task(p, memcg, nodemask))
KOSAKI Motohiro26ebc982010-08-09 17:19:37 -0700176 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700178 p = find_lock_task_mm(p);
179 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 return 0;
181
Michal Hockobb8a4b72016-05-20 16:57:18 -0700182 /*
183 * Do not even consider tasks which are explicitly marked oom
Michal Hockob18dc5f2016-07-28 15:44:46 -0700184 * unkillable or have been already oom reaped or the are in
185 * the middle of vfork
Michal Hockobb8a4b72016-05-20 16:57:18 -0700186 */
David Rientjesa9c58b902012-12-11 16:02:54 -0800187 adj = (long)p->signal->oom_score_adj;
Michal Hockobb8a4b72016-05-20 16:57:18 -0700188 if (adj == OOM_SCORE_ADJ_MIN ||
Michal Hocko862e3072016-10-07 16:58:57 -0700189 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
Michal Hockob18dc5f2016-07-28 15:44:46 -0700190 in_vfork(p)) {
Michal Hocko5aecc852011-11-15 14:36:07 -0800191 task_unlock(p);
192 return 0;
193 }
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700196 * The baseline for the badness score is the proportion of RAM that each
KOSAKI Motohirof755a042011-04-27 15:26:50 -0700197 * task's rss, pagetable and swap space use.
David Rientjesa63d83f2010-08-09 17:19:46 -0700198 */
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800199 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
200 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
Andrew Morton97c2c9b82006-04-18 22:20:38 -0700201 task_unlock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700204 * Root processes get 3% bonus, just like the __vm_enough_memory()
205 * implementation used by LSMs.
Hugh Dickins7ba34852007-01-05 16:37:03 -0800206 */
David Rientjesa63d83f2010-08-09 17:19:46 -0700207 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
David Rientjes778c14a2014-01-30 15:46:11 -0800208 points -= (points * 3) / 100;
Hugh Dickins7ba34852007-01-05 16:37:03 -0800209
David Rientjes61eafb02012-06-20 12:52:58 -0700210 /* Normalize to oom_score_adj units */
211 adj *= totalpages / 1000;
212 points += adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
David Rientjesf19e8aa2010-09-22 13:04:52 -0700214 /*
David Rientjesa7f638f2012-05-29 15:06:47 -0700215 * Never return 0 for an eligible task regardless of the root bonus and
216 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
David Rientjesf19e8aa2010-09-22 13:04:52 -0700217 */
David Rientjes1e11ad82012-06-08 13:21:26 -0700218 return points > 0 ? points : 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700221enum oom_constraint {
222 CONSTRAINT_NONE,
223 CONSTRAINT_CPUSET,
224 CONSTRAINT_MEMORY_POLICY,
225 CONSTRAINT_MEMCG,
226};
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228/*
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800229 * Determine the type of allocation constraint.
230 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700231static enum oom_constraint constrained_alloc(struct oom_control *oc)
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800232{
Mel Gorman54a6eb52008-04-28 02:12:16 -0700233 struct zone *zone;
Mel Gormandd1a2392008-04-28 02:12:17 -0700234 struct zoneref *z;
David Rientjes6e0fc462015-09-08 15:00:36 -0700235 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
David Rientjesa63d83f2010-08-09 17:19:46 -0700236 bool cpuset_limited = false;
237 int nid;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800238
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700239 if (is_memcg_oom(oc)) {
240 oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
241 return CONSTRAINT_MEMCG;
242 }
243
David Rientjesa63d83f2010-08-09 17:19:46 -0700244 /* Default to all available memory */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700245 oc->totalpages = totalram_pages + total_swap_pages;
246
247 if (!IS_ENABLED(CONFIG_NUMA))
248 return CONSTRAINT_NONE;
David Rientjesa63d83f2010-08-09 17:19:46 -0700249
David Rientjes6e0fc462015-09-08 15:00:36 -0700250 if (!oc->zonelist)
David Rientjesa63d83f2010-08-09 17:19:46 -0700251 return CONSTRAINT_NONE;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800252 /*
253 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
254 * to kill current.We have to random task kill in this case.
255 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
256 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700257 if (oc->gfp_mask & __GFP_THISNODE)
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800258 return CONSTRAINT_NONE;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800259
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800260 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700261 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
262 * the page allocator means a mempolicy is in effect. Cpuset policy
263 * is enforced in get_page_from_freelist().
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800264 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700265 if (oc->nodemask &&
266 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700267 oc->totalpages = total_swap_pages;
David Rientjes6e0fc462015-09-08 15:00:36 -0700268 for_each_node_mask(nid, *oc->nodemask)
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700269 oc->totalpages += node_spanned_pages(nid);
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800270 return CONSTRAINT_MEMORY_POLICY;
David Rientjesa63d83f2010-08-09 17:19:46 -0700271 }
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800272
273 /* Check this allocation failure is caused by cpuset's wall function */
David Rientjes6e0fc462015-09-08 15:00:36 -0700274 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
275 high_zoneidx, oc->nodemask)
276 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
David Rientjesa63d83f2010-08-09 17:19:46 -0700277 cpuset_limited = true;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800278
David Rientjesa63d83f2010-08-09 17:19:46 -0700279 if (cpuset_limited) {
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700280 oc->totalpages = total_swap_pages;
David Rientjesa63d83f2010-08-09 17:19:46 -0700281 for_each_node_mask(nid, cpuset_current_mems_allowed)
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700282 oc->totalpages += node_spanned_pages(nid);
David Rientjesa63d83f2010-08-09 17:19:46 -0700283 return CONSTRAINT_CPUSET;
284 }
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800285 return CONSTRAINT_NONE;
286}
287
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700288static int oom_evaluate_task(struct task_struct *task, void *arg)
David Rientjes462607e2012-07-31 16:43:40 -0700289{
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700290 struct oom_control *oc = arg;
291 unsigned long points;
292
David Rientjes6e0fc462015-09-08 15:00:36 -0700293 if (oom_unkillable_task(task, NULL, oc->nodemask))
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700294 goto next;
David Rientjes462607e2012-07-31 16:43:40 -0700295
296 /*
297 * This task already has access to memory reserves and is being killed.
Michal Hockoa3739662016-07-28 15:45:01 -0700298 * Don't allow any other task to have access to the reserves unless
Michal Hocko862e3072016-10-07 16:58:57 -0700299 * the task has MMF_OOM_SKIP because chances that it would release
Michal Hockoa3739662016-07-28 15:45:01 -0700300 * any memory is quite low.
David Rientjes462607e2012-07-31 16:43:40 -0700301 */
Michal Hocko862e3072016-10-07 16:58:57 -0700302 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
303 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700304 goto next;
305 goto abort;
Michal Hockoa3739662016-07-28 15:45:01 -0700306 }
David Rientjes462607e2012-07-31 16:43:40 -0700307
David Rientjese1e12d22012-12-11 16:02:56 -0800308 /*
309 * If task is allocating a lot of memory and has been marked to be
310 * killed first if it triggers an oom, then select it.
311 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700312 if (oom_task_origin(task)) {
313 points = ULONG_MAX;
314 goto select;
315 }
David Rientjese1e12d22012-12-11 16:02:56 -0800316
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700317 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
318 if (!points || points < oc->chosen_points)
319 goto next;
320
321 /* Prefer thread group leaders for display purposes */
322 if (points == oc->chosen_points && thread_group_leader(oc->chosen))
323 goto next;
324select:
325 if (oc->chosen)
326 put_task_struct(oc->chosen);
327 get_task_struct(task);
328 oc->chosen = task;
329 oc->chosen_points = points;
330next:
331 return 0;
332abort:
333 if (oc->chosen)
334 put_task_struct(oc->chosen);
335 oc->chosen = (void *)-1UL;
336 return 1;
David Rientjes462607e2012-07-31 16:43:40 -0700337}
338
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800339/*
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700340 * Simple selection loop. We choose the process with the highest number of
341 * 'points'. In case scan was aborted, oc->chosen is set to -1.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700343static void select_bad_process(struct oom_control *oc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700345 if (is_memcg_oom(oc))
346 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
347 else {
348 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700350 rcu_read_lock();
351 for_each_process(p)
352 if (oom_evaluate_task(p, oc))
353 break;
354 rcu_read_unlock();
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800355 }
Oleg Nesterov972c4ea2006-09-29 02:01:12 -0700356
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700357 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
360/**
Randy Dunlap1b578df2008-03-19 17:00:42 -0700361 * dump_tasks - dump current memory state of all system tasks
Wanpeng Lidad75572012-06-20 12:53:01 -0700362 * @memcg: current's memory controller, if constrained
David Rientjese85bfd32010-09-22 13:05:10 -0700363 * @nodemask: nodemask passed to page allocator for mempolicy ooms
Randy Dunlap1b578df2008-03-19 17:00:42 -0700364 *
David Rientjese85bfd32010-09-22 13:05:10 -0700365 * Dumps the current memory state of all eligible tasks. Tasks not in the same
366 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
367 * are not shown.
David Rientjesde34d962012-07-31 16:42:56 -0700368 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
369 * swapents, oom_score_adj value, and name.
David Rientjesfef1bdd2008-02-07 00:14:07 -0800370 */
Johannes Weiner2314b422014-12-10 15:44:33 -0800371static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
David Rientjesfef1bdd2008-02-07 00:14:07 -0800372{
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700373 struct task_struct *p;
374 struct task_struct *task;
David Rientjesfef1bdd2008-02-07 00:14:07 -0800375
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800376 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
David Rientjes6b0c81b2012-07-31 16:43:45 -0700377 rcu_read_lock();
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700378 for_each_process(p) {
Johannes Weiner72835c82012-01-12 17:18:32 -0800379 if (oom_unkillable_task(p, memcg, nodemask))
David Rientjesfef1bdd2008-02-07 00:14:07 -0800380 continue;
381
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700382 task = find_lock_task_mm(p);
383 if (!task) {
David Rientjes6d2661e2009-05-28 14:34:19 -0700384 /*
David Rientjes74ab7f12010-08-09 17:18:46 -0700385 * This is a kthread or all of p's threads have already
386 * detached their mm's. There's no need to report
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700387 * them; they can't be oom killed anyway.
David Rientjes6d2661e2009-05-28 14:34:19 -0700388 */
David Rientjes6d2661e2009-05-28 14:34:19 -0700389 continue;
390 }
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700391
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800392 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
Eric W. Biederman078de5f2012-02-08 07:00:08 -0800393 task->pid, from_kuid(&init_user_ns, task_uid(task)),
394 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800395 atomic_long_read(&task->mm->nr_ptes),
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800396 mm_nr_pmds(task->mm),
David Rientjesde34d962012-07-31 16:42:56 -0700397 get_mm_counter(task->mm, MM_SWAPENTS),
David Rientjesa63d83f2010-08-09 17:19:46 -0700398 task->signal->oom_score_adj, task->comm);
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700399 task_unlock(task);
400 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700401 rcu_read_unlock();
David Rientjesfef1bdd2008-02-07 00:14:07 -0800402}
403
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700404static void dump_header(struct oom_control *oc, struct task_struct *p)
David Rientjes1b604d72009-12-14 17:57:47 -0800405{
Michal Hocko82e7d3a2016-10-07 17:01:43 -0700406 nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;
407
408 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
409 current->comm, oc->gfp_mask, &oc->gfp_mask,
410 nodemask_pr_args(nm), oc->order,
David Rientjesa63d83f2010-08-09 17:19:46 -0700411 current->signal->oom_score_adj);
Michal Hocko92549902016-10-07 16:59:33 -0700412 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
413 pr_warn("COMPACTION is disabled!!!\n");
Vlastimil Babkaa0795cd2016-03-15 14:56:05 -0700414
David Rientjesda39da32015-11-05 18:48:05 -0800415 cpuset_print_current_mems_allowed();
David Rientjes1b604d72009-12-14 17:57:47 -0800416 dump_stack();
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700417 if (oc->memcg)
418 mem_cgroup_print_oom_info(oc->memcg, p);
Sha Zhengju58cf1882013-02-22 16:32:05 -0800419 else
420 show_mem(SHOW_MEM_FILTER_NODES);
David Rientjes1b604d72009-12-14 17:57:47 -0800421 if (sysctl_oom_dump_tasks)
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700422 dump_tasks(oc->memcg, oc->nodemask);
David Rientjes1b604d72009-12-14 17:57:47 -0800423}
424
Michal Hocko5695be12014-10-20 18:12:32 +0200425/*
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800426 * Number of OOM victims in flight
Michal Hocko5695be12014-10-20 18:12:32 +0200427 */
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800428static atomic_t oom_victims = ATOMIC_INIT(0);
429static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
Michal Hocko5695be12014-10-20 18:12:32 +0200430
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700431static bool oom_killer_disabled __read_mostly;
Michal Hocko5695be12014-10-20 18:12:32 +0200432
Michal Hockobc448e82016-03-25 14:20:30 -0700433#define K(x) ((x) << (PAGE_SHIFT-10))
434
Michal Hocko3ef22df2016-05-19 17:13:12 -0700435/*
436 * task->mm can be NULL if the task is the exited group leader. So to
437 * determine whether the task is using a particular mm, we examine all the
438 * task's threads: if one of those is using this mm then this task was also
439 * using it.
440 */
Michal Hocko44a70ade2016-07-28 15:44:43 -0700441bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
Michal Hocko3ef22df2016-05-19 17:13:12 -0700442{
443 struct task_struct *t;
444
445 for_each_thread(p, t) {
446 struct mm_struct *t_mm = READ_ONCE(t->mm);
447 if (t_mm)
448 return t_mm == mm;
449 }
450 return false;
451}
452
453
Michal Hockoaac45362016-03-25 14:20:24 -0700454#ifdef CONFIG_MMU
455/*
456 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
457 * victim (if that is possible) to help the OOM killer to move on.
458 */
459static struct task_struct *oom_reaper_th;
Michal Hockoaac45362016-03-25 14:20:24 -0700460static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
Vladimir Davydov29c696e2016-03-25 14:20:39 -0700461static struct task_struct *oom_reaper_list;
Michal Hocko03049262016-03-25 14:20:33 -0700462static DEFINE_SPINLOCK(oom_reaper_lock);
463
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700464static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
Michal Hockoaac45362016-03-25 14:20:24 -0700465{
466 struct mmu_gather tlb;
467 struct vm_area_struct *vma;
468 struct zap_details details = {.check_swap_entries = true,
469 .ignore_dirty = true};
470 bool ret = true;
471
Michal Hocko36324a92016-03-25 14:20:27 -0700472 /*
Michal Hockoe2fe1452016-05-27 14:27:35 -0700473 * We have to make sure to not race with the victim exit path
474 * and cause premature new oom victim selection:
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700475 * __oom_reap_task_mm exit_mm
Michal Hockoe5e3f4c2016-07-26 15:24:50 -0700476 * mmget_not_zero
Michal Hockoe2fe1452016-05-27 14:27:35 -0700477 * mmput
478 * atomic_dec_and_test
479 * exit_oom_victim
480 * [...]
481 * out_of_memory
482 * select_bad_process
483 * # no TIF_MEMDIE task selects new victim
484 * unmap_page_range # frees some memory
485 */
486 mutex_lock(&oom_lock);
487
Michal Hockoaac45362016-03-25 14:20:24 -0700488 if (!down_read_trylock(&mm->mmap_sem)) {
489 ret = false;
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700490 goto unlock_oom;
Michal Hockoe5e3f4c2016-07-26 15:24:50 -0700491 }
492
493 /*
494 * increase mm_users only after we know we will reap something so
495 * that the mmput_async is called only when we have reaped something
496 * and delayed __mmput doesn't matter that much
497 */
498 if (!mmget_not_zero(mm)) {
499 up_read(&mm->mmap_sem);
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700500 goto unlock_oom;
Michal Hockoaac45362016-03-25 14:20:24 -0700501 }
502
Michal Hocko3f70dc32016-10-07 16:59:06 -0700503 /*
504 * Tell all users of get_user/copy_from_user etc... that the content
505 * is no longer stable. No barriers really needed because unmapping
506 * should imply barriers already and the reader would hit a page fault
507 * if it stumbled over a reaped memory.
508 */
509 set_bit(MMF_UNSTABLE, &mm->flags);
510
Michal Hockoaac45362016-03-25 14:20:24 -0700511 tlb_gather_mmu(&tlb, mm, 0, -1);
512 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
513 if (is_vm_hugetlb_page(vma))
514 continue;
515
516 /*
517 * mlocked VMAs require explicit munlocking before unmap.
518 * Let's keep it simple here and skip such VMAs.
519 */
520 if (vma->vm_flags & VM_LOCKED)
521 continue;
522
523 /*
524 * Only anonymous pages have a good chance to be dropped
525 * without additional steps which we cannot afford as we
526 * are OOM already.
527 *
528 * We do not even care about fs backed pages because all
529 * which are reclaimable have already been reclaimed and
530 * we do not want to block exit_mmap by keeping mm ref
531 * count elevated without a good reason.
532 */
533 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
534 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
535 &details);
536 }
537 tlb_finish_mmu(&tlb, 0, -1);
Michal Hockobc448e82016-03-25 14:20:30 -0700538 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
539 task_pid_nr(tsk), tsk->comm,
540 K(get_mm_counter(mm, MM_ANONPAGES)),
541 K(get_mm_counter(mm, MM_FILEPAGES)),
542 K(get_mm_counter(mm, MM_SHMEMPAGES)));
Michal Hockoaac45362016-03-25 14:20:24 -0700543 up_read(&mm->mmap_sem);
Michal Hocko36324a92016-03-25 14:20:27 -0700544
545 /*
Michal Hockoec8d7c12016-05-20 16:57:21 -0700546 * Drop our reference but make sure the mmput slow path is called from a
547 * different context because we shouldn't risk we get stuck there and
548 * put the oom_reaper out of the way.
549 */
Michal Hockoe5e3f4c2016-07-26 15:24:50 -0700550 mmput_async(mm);
Michal Hockoe5e3f4c2016-07-26 15:24:50 -0700551unlock_oom:
552 mutex_unlock(&oom_lock);
Michal Hockoaac45362016-03-25 14:20:24 -0700553 return ret;
554}
555
Michal Hockobc448e82016-03-25 14:20:30 -0700556#define MAX_OOM_REAP_RETRIES 10
Michal Hocko36324a92016-03-25 14:20:27 -0700557static void oom_reap_task(struct task_struct *tsk)
Michal Hockoaac45362016-03-25 14:20:24 -0700558{
559 int attempts = 0;
Michal Hocko26db62f2016-10-07 16:58:51 -0700560 struct mm_struct *mm = tsk->signal->oom_mm;
Michal Hockoaac45362016-03-25 14:20:24 -0700561
562 /* Retry the down_read_trylock(mmap_sem) a few times */
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700563 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
Michal Hockoaac45362016-03-25 14:20:24 -0700564 schedule_timeout_idle(HZ/10);
565
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700566 if (attempts <= MAX_OOM_REAP_RETRIES)
567 goto done;
Michal Hocko11a410d2016-07-28 15:44:58 -0700568
Tetsuo Handa8496afa2016-10-07 16:58:48 -0700569
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700570 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
571 task_pid_nr(tsk), tsk->comm);
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700572 debug_show_all_locks();
Michal Hockobc448e82016-03-25 14:20:30 -0700573
Tetsuo Handa7ebffa42016-10-07 16:58:45 -0700574done:
Michal Hocko449d7772016-05-19 17:13:15 -0700575 tsk->oom_reaper_list = NULL;
Michal Hocko449d7772016-05-19 17:13:15 -0700576
Michal Hocko26db62f2016-10-07 16:58:51 -0700577 /*
578 * Hide this mm from OOM killer because it has been either reaped or
579 * somebody can't call up_write(mmap_sem).
580 */
Michal Hocko862e3072016-10-07 16:58:57 -0700581 set_bit(MMF_OOM_SKIP, &mm->flags);
Michal Hocko26db62f2016-10-07 16:58:51 -0700582
Michal Hockoaac45362016-03-25 14:20:24 -0700583 /* Drop a reference taken by wake_oom_reaper */
Michal Hocko36324a92016-03-25 14:20:27 -0700584 put_task_struct(tsk);
Michal Hockoaac45362016-03-25 14:20:24 -0700585}
586
587static int oom_reaper(void *unused)
588{
589 while (true) {
Michal Hocko03049262016-03-25 14:20:33 -0700590 struct task_struct *tsk = NULL;
Michal Hockoaac45362016-03-25 14:20:24 -0700591
Vladimir Davydov29c696e2016-03-25 14:20:39 -0700592 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
Michal Hocko03049262016-03-25 14:20:33 -0700593 spin_lock(&oom_reaper_lock);
Vladimir Davydov29c696e2016-03-25 14:20:39 -0700594 if (oom_reaper_list != NULL) {
595 tsk = oom_reaper_list;
596 oom_reaper_list = tsk->oom_reaper_list;
Michal Hocko03049262016-03-25 14:20:33 -0700597 }
598 spin_unlock(&oom_reaper_lock);
599
600 if (tsk)
601 oom_reap_task(tsk);
Michal Hockoaac45362016-03-25 14:20:24 -0700602 }
603
604 return 0;
605}
606
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700607static void wake_oom_reaper(struct task_struct *tsk)
Michal Hockoaac45362016-03-25 14:20:24 -0700608{
Michal Hockoaf8e15c2016-04-01 14:31:34 -0700609 if (!oom_reaper_th)
610 return;
611
612 /* tsk is already queued? */
613 if (tsk == oom_reaper_list || tsk->oom_reaper_list)
Michal Hockoaac45362016-03-25 14:20:24 -0700614 return;
615
Michal Hocko36324a92016-03-25 14:20:27 -0700616 get_task_struct(tsk);
Michal Hockoaac45362016-03-25 14:20:24 -0700617
Michal Hocko03049262016-03-25 14:20:33 -0700618 spin_lock(&oom_reaper_lock);
Vladimir Davydov29c696e2016-03-25 14:20:39 -0700619 tsk->oom_reaper_list = oom_reaper_list;
620 oom_reaper_list = tsk;
Michal Hocko03049262016-03-25 14:20:33 -0700621 spin_unlock(&oom_reaper_lock);
622 wake_up(&oom_reaper_wait);
Michal Hockoaac45362016-03-25 14:20:24 -0700623}
624
625static int __init oom_init(void)
626{
627 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
628 if (IS_ERR(oom_reaper_th)) {
629 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
630 PTR_ERR(oom_reaper_th));
631 oom_reaper_th = NULL;
632 }
633 return 0;
634}
635subsys_initcall(oom_init)
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700636#else
637static inline void wake_oom_reaper(struct task_struct *tsk)
638{
639}
640#endif /* CONFIG_MMU */
Michal Hockoaac45362016-03-25 14:20:24 -0700641
Michal Hocko49550b62015-02-11 15:26:12 -0800642/**
Johannes Weiner16e95192015-06-24 16:57:07 -0700643 * mark_oom_victim - mark the given task as OOM victim
Michal Hocko49550b62015-02-11 15:26:12 -0800644 * @tsk: task to mark
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800645 *
Johannes Weinerdc564012015-06-24 16:57:19 -0700646 * Has to be called with oom_lock held and never after
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800647 * oom has been disabled already.
Michal Hocko26db62f2016-10-07 16:58:51 -0700648 *
649 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
650 * under task_lock or operate on the current).
Michal Hocko49550b62015-02-11 15:26:12 -0800651 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700652static void mark_oom_victim(struct task_struct *tsk)
Michal Hocko49550b62015-02-11 15:26:12 -0800653{
Michal Hocko26db62f2016-10-07 16:58:51 -0700654 struct mm_struct *mm = tsk->mm;
655
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800656 WARN_ON(oom_killer_disabled);
657 /* OOM killer might race with memcg OOM */
658 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
659 return;
Michal Hocko26db62f2016-10-07 16:58:51 -0700660
Michal Hocko26db62f2016-10-07 16:58:51 -0700661 /* oom_mm is bound to the signal struct life time. */
662 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
663 atomic_inc(&tsk->signal->oom_mm->mm_count);
664
Michal Hocko63a8ca92015-02-11 15:26:15 -0800665 /*
666 * Make sure that the task is woken up from uninterruptible sleep
667 * if it is frozen because OOM killer wouldn't be able to free
668 * any memory and livelock. freezing_slow_path will tell the freezer
669 * that TIF_MEMDIE tasks should be ignored.
670 */
671 __thaw_task(tsk);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800672 atomic_inc(&oom_victims);
Michal Hocko49550b62015-02-11 15:26:12 -0800673}
674
675/**
Johannes Weiner16e95192015-06-24 16:57:07 -0700676 * exit_oom_victim - note the exit of an OOM victim
Michal Hocko49550b62015-02-11 15:26:12 -0800677 */
Tetsuo Handa38531202016-10-07 16:59:03 -0700678void exit_oom_victim(void)
Michal Hocko49550b62015-02-11 15:26:12 -0800679{
Tetsuo Handa38531202016-10-07 16:59:03 -0700680 clear_thread_flag(TIF_MEMDIE);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800681
Johannes Weinerc38f1022015-06-24 16:57:13 -0700682 if (!atomic_dec_return(&oom_victims))
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800683 wake_up_all(&oom_victims_wait);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800684}
685
686/**
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700687 * oom_killer_enable - enable OOM killer
688 */
689void oom_killer_enable(void)
690{
691 oom_killer_disabled = false;
692}
693
694/**
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800695 * oom_killer_disable - disable OOM killer
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700696 * @timeout: maximum timeout to wait for oom victims in jiffies
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800697 *
698 * Forces all page allocations to fail rather than trigger OOM killer.
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700699 * Will block and wait until all OOM victims are killed or the given
700 * timeout expires.
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800701 *
702 * The function cannot be called when there are runnable user tasks because
703 * the userspace would see unexpected allocation failures as a result. Any
704 * new usage of this function should be consulted with MM people.
705 *
706 * Returns true if successful and false if the OOM killer cannot be
707 * disabled.
708 */
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700709bool oom_killer_disable(signed long timeout)
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800710{
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700711 signed long ret;
712
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800713 /*
Tetsuo Handa6afcf282016-03-17 14:20:45 -0700714 * Make sure to not race with an ongoing OOM killer. Check that the
715 * current is not killed (possibly due to sharing the victim's memory).
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800716 */
Tetsuo Handa6afcf282016-03-17 14:20:45 -0700717 if (mutex_lock_killable(&oom_lock))
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800718 return false;
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800719 oom_killer_disabled = true;
Johannes Weinerdc564012015-06-24 16:57:19 -0700720 mutex_unlock(&oom_lock);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800721
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700722 ret = wait_event_interruptible_timeout(oom_victims_wait,
723 !atomic_read(&oom_victims), timeout);
724 if (ret <= 0) {
725 oom_killer_enable();
726 return false;
727 }
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800728
729 return true;
730}
731
Michal Hocko1af8bb42016-07-28 15:44:52 -0700732static inline bool __task_will_free_mem(struct task_struct *task)
733{
734 struct signal_struct *sig = task->signal;
735
736 /*
737 * A coredumping process may sleep for an extended period in exit_mm(),
738 * so the oom killer cannot assume that the process will promptly exit
739 * and release memory.
740 */
741 if (sig->flags & SIGNAL_GROUP_COREDUMP)
742 return false;
743
744 if (sig->flags & SIGNAL_GROUP_EXIT)
745 return true;
746
747 if (thread_group_empty(task) && (task->flags & PF_EXITING))
748 return true;
749
750 return false;
751}
752
753/*
754 * Checks whether the given task is dying or exiting and likely to
755 * release its address space. This means that all threads and processes
756 * sharing the same mm have to be killed or exiting.
Michal Hocko091f3622016-07-28 15:45:04 -0700757 * Caller has to make sure that task->mm is stable (hold task_lock or
758 * it operates on the current).
Michal Hocko1af8bb42016-07-28 15:44:52 -0700759 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700760static bool task_will_free_mem(struct task_struct *task)
Michal Hocko1af8bb42016-07-28 15:44:52 -0700761{
Michal Hocko091f3622016-07-28 15:45:04 -0700762 struct mm_struct *mm = task->mm;
Michal Hocko1af8bb42016-07-28 15:44:52 -0700763 struct task_struct *p;
Geert Uytterhoevenf33e6f02016-08-11 15:33:09 -0700764 bool ret = true;
Michal Hocko1af8bb42016-07-28 15:44:52 -0700765
Michal Hocko091f3622016-07-28 15:45:04 -0700766 /*
767 * Skip tasks without mm because it might have passed its exit_mm and
768 * exit_oom_victim. oom_reaper could have rescued that but do not rely
769 * on that for now. We can consider find_lock_task_mm in future.
770 */
771 if (!mm)
772 return false;
773
Michal Hocko1af8bb42016-07-28 15:44:52 -0700774 if (!__task_will_free_mem(task))
775 return false;
776
777 /*
Michal Hocko696453e2016-07-28 15:44:55 -0700778 * This task has already been drained by the oom reaper so there are
779 * only small chances it will free some more
780 */
Michal Hocko862e3072016-10-07 16:58:57 -0700781 if (test_bit(MMF_OOM_SKIP, &mm->flags))
Michal Hocko696453e2016-07-28 15:44:55 -0700782 return false;
Michal Hocko696453e2016-07-28 15:44:55 -0700783
Michal Hocko091f3622016-07-28 15:45:04 -0700784 if (atomic_read(&mm->mm_users) <= 1)
Michal Hocko1af8bb42016-07-28 15:44:52 -0700785 return true;
Michal Hocko1af8bb42016-07-28 15:44:52 -0700786
787 /*
Michal Hocko5870c2e2016-10-07 16:57:32 -0700788 * Make sure that all tasks which share the mm with the given tasks
789 * are dying as well to make sure that a) nobody pins its mm and
790 * b) the task is also reapable by the oom reaper.
Michal Hocko1af8bb42016-07-28 15:44:52 -0700791 */
792 rcu_read_lock();
793 for_each_process(p) {
794 if (!process_shares_mm(p, mm))
795 continue;
796 if (same_thread_group(task, p))
797 continue;
798 ret = __task_will_free_mem(p);
799 if (!ret)
800 break;
801 }
802 rcu_read_unlock();
Michal Hocko1af8bb42016-07-28 15:44:52 -0700803
804 return ret;
805}
806
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700807static void oom_kill_process(struct oom_control *oc, const char *message)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700809 struct task_struct *p = oc->chosen;
810 unsigned int points = oc->chosen_points;
Linus Torvalds52d3c032011-03-14 15:17:07 -0700811 struct task_struct *victim = p;
David Rientjes5e9d8342010-08-09 17:18:51 -0700812 struct task_struct *child;
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800813 struct task_struct *t;
David Rientjes647f2bd2012-03-21 16:33:46 -0700814 struct mm_struct *mm;
Linus Torvalds52d3c032011-03-14 15:17:07 -0700815 unsigned int victim_points = 0;
David Rientjesdc3f21e2012-03-21 16:33:47 -0700816 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
817 DEFAULT_RATELIMIT_BURST);
Tetsuo Handabb299022016-03-25 14:20:44 -0700818 bool can_oom_reap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700820 /*
821 * If the task is already exiting, don't alarm the sysadmin or kill
822 * its children or threads, just set TIF_MEMDIE so it can die quickly
823 */
Michal Hocko091f3622016-07-28 15:45:04 -0700824 task_lock(p);
Michal Hocko1af8bb42016-07-28 15:44:52 -0700825 if (task_will_free_mem(p)) {
Johannes Weiner16e95192015-06-24 16:57:07 -0700826 mark_oom_victim(p);
Michal Hocko1af8bb42016-07-28 15:44:52 -0700827 wake_oom_reaper(p);
Michal Hocko091f3622016-07-28 15:45:04 -0700828 task_unlock(p);
David Rientjes6b0c81b2012-07-31 16:43:45 -0700829 put_task_struct(p);
David Rientjes2a1c9b12012-03-21 16:33:46 -0700830 return;
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700831 }
Michal Hocko091f3622016-07-28 15:45:04 -0700832 task_unlock(p);
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700833
David Rientjesdc3f21e2012-03-21 16:33:47 -0700834 if (__ratelimit(&oom_rs))
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700835 dump_header(oc, p);
David Rientjes8447d952012-03-21 16:33:47 -0700836
Wang Longf0d66472015-06-24 16:58:01 -0700837 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
David Rientjes5e9d8342010-08-09 17:18:51 -0700838 message, task_pid_nr(p), p->comm, points);
Nick Pigginf3af38d2006-12-06 20:31:51 -0800839
David Rientjes5e9d8342010-08-09 17:18:51 -0700840 /*
841 * If any of p's children has a different mm and is eligible for kill,
David Rientjes11239832011-07-25 17:12:17 -0700842 * the one with the highest oom_badness() score is sacrificed for its
David Rientjes5e9d8342010-08-09 17:18:51 -0700843 * parent. This attempts to lose the minimal amount of work done while
844 * still freeing memory.
845 */
David Rientjes6b0c81b2012-07-31 16:43:45 -0700846 read_lock(&tasklist_lock);
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800847 for_each_thread(p, t) {
David Rientjes5e9d8342010-08-09 17:18:51 -0700848 list_for_each_entry(child, &t->children, sibling) {
David Rientjesa63d83f2010-08-09 17:19:46 -0700849 unsigned int child_points;
David Rientjes5e9d8342010-08-09 17:18:51 -0700850
Oleg Nesterov4d7b3392015-11-05 18:48:26 -0800851 if (process_shares_mm(child, p->mm))
David Rientjesedd45542011-03-22 16:30:12 -0700852 continue;
David Rientjesa63d83f2010-08-09 17:19:46 -0700853 /*
854 * oom_badness() returns 0 if the thread is unkillable
855 */
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700856 child_points = oom_badness(child,
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700857 oc->memcg, oc->nodemask, oc->totalpages);
David Rientjes5e9d8342010-08-09 17:18:51 -0700858 if (child_points > victim_points) {
David Rientjes6b0c81b2012-07-31 16:43:45 -0700859 put_task_struct(victim);
David Rientjes5e9d8342010-08-09 17:18:51 -0700860 victim = child;
861 victim_points = child_points;
David Rientjes6b0c81b2012-07-31 16:43:45 -0700862 get_task_struct(victim);
David Rientjes5e9d8342010-08-09 17:18:51 -0700863 }
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700864 }
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800865 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700866 read_unlock(&tasklist_lock);
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700867
David Rientjes6b0c81b2012-07-31 16:43:45 -0700868 p = find_lock_task_mm(victim);
869 if (!p) {
David Rientjes6b0c81b2012-07-31 16:43:45 -0700870 put_task_struct(victim);
David Rientjes647f2bd2012-03-21 16:33:46 -0700871 return;
David Rientjes6b0c81b2012-07-31 16:43:45 -0700872 } else if (victim != p) {
873 get_task_struct(p);
874 put_task_struct(victim);
875 victim = p;
876 }
David Rientjes647f2bd2012-03-21 16:33:46 -0700877
Tetsuo Handa880b76892015-11-05 18:47:51 -0800878 /* Get a reference to safely compare mm after task_unlock(victim) */
David Rientjes647f2bd2012-03-21 16:33:46 -0700879 mm = victim->mm;
Tetsuo Handa880b76892015-11-05 18:47:51 -0800880 atomic_inc(&mm->mm_count);
Tetsuo Handa426fb5e2015-11-05 18:47:44 -0800881 /*
882 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
883 * the OOM victim from depleting the memory reserves from the user
884 * space under its control.
885 */
886 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
Johannes Weiner16e95192015-06-24 16:57:07 -0700887 mark_oom_victim(victim);
Jerome Marchandeca56ff2016-01-14 15:19:26 -0800888 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
David Rientjes647f2bd2012-03-21 16:33:46 -0700889 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
890 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
Jerome Marchandeca56ff2016-01-14 15:19:26 -0800891 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
892 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
David Rientjes647f2bd2012-03-21 16:33:46 -0700893 task_unlock(victim);
894
895 /*
896 * Kill all user processes sharing victim->mm in other thread groups, if
897 * any. They don't get access to memory reserves, though, to avoid
898 * depletion of all memory. This prevents mm->mmap_sem livelock when an
899 * oom killed thread cannot exit because it requires the semaphore and
900 * its contended by another thread trying to allocate memory itself.
901 * That thread will now get access to memory reserves since it has a
902 * pending fatal signal.
903 */
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800904 rcu_read_lock();
Oleg Nesterovc3190252015-11-05 18:48:23 -0800905 for_each_process(p) {
Oleg Nesterov4d7b3392015-11-05 18:48:26 -0800906 if (!process_shares_mm(p, mm))
Oleg Nesterovc3190252015-11-05 18:48:23 -0800907 continue;
908 if (same_thread_group(p, victim))
909 continue;
Michal Hocko1b51e652016-10-07 16:59:09 -0700910 if (is_global_init(p)) {
Michal Hockoaac45362016-03-25 14:20:24 -0700911 can_oom_reap = false;
Michal Hocko862e3072016-10-07 16:58:57 -0700912 set_bit(MMF_OOM_SKIP, &mm->flags);
Michal Hockoa3739662016-07-28 15:45:01 -0700913 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
914 task_pid_nr(victim), victim->comm,
915 task_pid_nr(p), p->comm);
Oleg Nesterovc3190252015-11-05 18:48:23 -0800916 continue;
Michal Hockoaac45362016-03-25 14:20:24 -0700917 }
Michal Hocko1b51e652016-10-07 16:59:09 -0700918 /*
919 * No use_mm() user needs to read from the userspace so we are
920 * ok to reap it.
921 */
922 if (unlikely(p->flags & PF_KTHREAD))
923 continue;
Oleg Nesterovc3190252015-11-05 18:48:23 -0800924 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
925 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700926 rcu_read_unlock();
David Rientjes647f2bd2012-03-21 16:33:46 -0700927
Michal Hockoaac45362016-03-25 14:20:24 -0700928 if (can_oom_reap)
Michal Hocko36324a92016-03-25 14:20:27 -0700929 wake_oom_reaper(victim);
Michal Hockoaac45362016-03-25 14:20:24 -0700930
Tetsuo Handa880b76892015-11-05 18:47:51 -0800931 mmdrop(mm);
David Rientjes6b0c81b2012-07-31 16:43:45 -0700932 put_task_struct(victim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933}
David Rientjes647f2bd2012-03-21 16:33:46 -0700934#undef K
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
David Rientjes309ed882010-08-09 17:18:54 -0700936/*
937 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
938 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700939static void check_panic_on_oom(struct oom_control *oc,
940 enum oom_constraint constraint)
David Rientjes309ed882010-08-09 17:18:54 -0700941{
942 if (likely(!sysctl_panic_on_oom))
943 return;
944 if (sysctl_panic_on_oom != 2) {
945 /*
946 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
947 * does not panic for cpuset, mempolicy, or memcg allocation
948 * failures.
949 */
950 if (constraint != CONSTRAINT_NONE)
951 return;
952 }
David Rientjes071a4be2015-09-08 15:00:42 -0700953 /* Do not panic for oom kills triggered by sysrq */
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800954 if (is_sysrq_oom(oc))
David Rientjes071a4be2015-09-08 15:00:42 -0700955 return;
Vladimir Davydov2a966b72016-07-26 15:22:33 -0700956 dump_header(oc, NULL);
David Rientjes309ed882010-08-09 17:18:54 -0700957 panic("Out of memory: %s panic_on_oom is enabled\n",
958 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
959}
960
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700961static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
962
963int register_oom_notifier(struct notifier_block *nb)
964{
965 return blocking_notifier_chain_register(&oom_notify_list, nb);
966}
967EXPORT_SYMBOL_GPL(register_oom_notifier);
968
969int unregister_oom_notifier(struct notifier_block *nb)
970{
971 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
972}
973EXPORT_SYMBOL_GPL(unregister_oom_notifier);
974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975/**
David Rientjes6e0fc462015-09-08 15:00:36 -0700976 * out_of_memory - kill the "best" process when we run out of memory
977 * @oc: pointer to struct oom_control
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 *
979 * If we run out of memory, we have the choice between either
980 * killing a random task (bad), letting the system crash (worse)
981 * OR try to be smart about which process to kill. Note that we
982 * don't have to be perfect here, we just have to be good.
983 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700984bool out_of_memory(struct oom_control *oc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700986 unsigned long freed = 0;
David Rientjese3658932010-08-09 17:18:55 -0700987 enum oom_constraint constraint = CONSTRAINT_NONE;
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700988
Johannes Weinerdc564012015-06-24 16:57:19 -0700989 if (oom_killer_disabled)
990 return false;
991
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700992 if (!is_memcg_oom(oc)) {
993 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
994 if (freed > 0)
995 /* Got some memory back in the last second. */
996 return true;
997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
David Rientjes7b98c2e2010-08-09 17:18:48 -0700999 /*
David Rientjes9ff48682012-12-11 16:01:30 -08001000 * If current has a pending SIGKILL or is exiting, then automatically
1001 * select it. The goal is to allow it to allocate so that it may
1002 * quickly exit and free its memory.
David Rientjes7b98c2e2010-08-09 17:18:48 -07001003 */
Michal Hocko091f3622016-07-28 15:45:04 -07001004 if (task_will_free_mem(current)) {
Johannes Weiner16e95192015-06-24 16:57:07 -07001005 mark_oom_victim(current);
Michal Hocko1af8bb42016-07-28 15:44:52 -07001006 wake_oom_reaper(current);
David Rientjes75e8f8b2015-09-08 15:00:47 -07001007 return true;
David Rientjes7b98c2e2010-08-09 17:18:48 -07001008 }
1009
Christoph Lameter9b0f8b02006-02-20 18:27:52 -08001010 /*
Michal Hocko3da88fb32016-05-19 17:13:09 -07001011 * The OOM killer does not compensate for IO-less reclaim.
1012 * pagefault_out_of_memory lost its gfp context so we have to
1013 * make sure exclude 0 mask - all other users should have at least
1014 * ___GFP_DIRECT_RECLAIM to get here.
1015 */
1016 if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
1017 return true;
1018
1019 /*
Christoph Lameter9b0f8b02006-02-20 18:27:52 -08001020 * Check if there were limitations on the allocation (only relevant for
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001021 * NUMA and memcg) that may require different handling.
Christoph Lameter9b0f8b02006-02-20 18:27:52 -08001022 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001023 constraint = constrained_alloc(oc);
David Rientjes6e0fc462015-09-08 15:00:36 -07001024 if (constraint != CONSTRAINT_MEMORY_POLICY)
1025 oc->nodemask = NULL;
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001026 check_panic_on_oom(oc, constraint);
David Rientjes0aad4b32010-08-09 17:18:59 -07001027
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001028 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1029 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
David Rientjes121d1ba2012-07-31 16:42:55 -07001030 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
David Rientjes6b0c81b2012-07-31 16:43:45 -07001031 get_task_struct(current);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001032 oc->chosen = current;
1033 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
David Rientjes75e8f8b2015-09-08 15:00:47 -07001034 return true;
David Rientjes0aad4b32010-08-09 17:18:59 -07001035 }
1036
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001037 select_bad_process(oc);
David Rientjes0aad4b32010-08-09 17:18:59 -07001038 /* Found nothing?!?! Either we hang forever, or we panic. */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001039 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001040 dump_header(oc, NULL);
David Rientjes0aad4b32010-08-09 17:18:59 -07001041 panic("Out of memory and no killable processes...\n");
1042 }
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001043 if (oc->chosen && oc->chosen != (void *)-1UL) {
1044 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1045 "Memory cgroup out of memory");
David Rientjes75e8f8b2015-09-08 15:00:47 -07001046 /*
1047 * Give the killed process a good chance to exit before trying
1048 * to allocate memory again.
1049 */
David Rientjes4f774b92012-07-31 16:42:37 -07001050 schedule_timeout_killable(1);
David Rientjes75e8f8b2015-09-08 15:00:47 -07001051 }
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001052 return !!oc->chosen;
Michal Hockoc32b3cb2015-02-11 15:26:24 -08001053}
1054
David Rientjese3658932010-08-09 17:18:55 -07001055/*
1056 * The pagefault handler calls here because it is out of memory, so kill a
Vladimir Davydov798fd752016-07-26 15:22:30 -07001057 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1058 * killing is already in progress so do nothing.
David Rientjese3658932010-08-09 17:18:55 -07001059 */
1060void pagefault_out_of_memory(void)
1061{
David Rientjes6e0fc462015-09-08 15:00:36 -07001062 struct oom_control oc = {
1063 .zonelist = NULL,
1064 .nodemask = NULL,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001065 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07001066 .gfp_mask = 0,
1067 .order = 0,
David Rientjes6e0fc462015-09-08 15:00:36 -07001068 };
1069
Johannes Weiner49426422013-10-16 13:46:59 -07001070 if (mem_cgroup_oom_synchronize(true))
Johannes Weinerdc564012015-06-24 16:57:19 -07001071 return;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001072
Johannes Weinerdc564012015-06-24 16:57:19 -07001073 if (!mutex_trylock(&oom_lock))
1074 return;
Tetsuo Handaa1048082016-10-07 17:00:49 -07001075 out_of_memory(&oc);
Johannes Weinerdc564012015-06-24 16:57:19 -07001076 mutex_unlock(&oom_lock);
David Rientjese3658932010-08-09 17:18:55 -07001077}