blob: e3209dc536a110ba76db857531ac65a1344a552d [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* drivers/misc/lowmemorykiller.c
2 *
3 * The lowmemorykiller driver lets user-space specify a set of memory thresholds
David Rientjes940f77b2012-02-13 19:28:49 -08004 * where processes with a range of oom_score_adj values will get killed. Specify
5 * the minimum oom_score_adj values in
6 * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
7 * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
8 * separated list of numbers in ascending order.
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09009 *
10 * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
Marco Navarra3bf5d652011-12-22 13:28:23 +010011 * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
David Rientjes940f77b2012-02-13 19:28:49 -080012 * processes with a oom_score_adj value of 8 or higher when the free memory
13 * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
14 * higher when the free memory drops below 1024 pages.
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090015 *
16 * The driver considers memory used for caches to be free, but if a large
17 * percentage of the cached memory is locked this can be very inaccurate
18 * and processes may not get killed until the normal oom killer is triggered.
19 *
20 * Copyright (C) 2007-2008 Google, Inc.
21 *
22 * This software is licensed under the terms of the GNU General Public
23 * License version 2, as published by the Free Software Foundation, and
24 * may be copied, distributed, and modified under those terms.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 */
32
Colin Crossb79a59c2013-05-03 14:57:29 -070033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090035#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/oom.h>
39#include <linux/sched.h>
Arve Hjønnevåg231e29f2012-09-26 18:01:17 -070040#include <linux/swap.h>
Anton Vorontsov294b2712012-02-06 20:29:41 +040041#include <linux/rcupdate.h>
San Mehat4755b722010-05-05 11:38:42 -070042#include <linux/notifier.h>
Liam Mark39eaf732013-01-25 12:40:18 -080043#include <linux/mutex.h>
44#include <linux/delay.h>
seungho1.parkdca32102012-07-24 10:20:44 +090045#include <linux/swap.h>
Liam Mark04c5d592013-08-30 12:10:39 -070046#include <linux/fs.h>
Liam Mark398a1b42014-06-03 13:57:47 -070047#include <linux/cpuset.h>
Laura Abbott3c47bc22014-09-23 19:31:19 -070048#include <linux/show_mem_notifier.h>
Vinayak Menon0d145a02015-03-04 20:41:21 +053049#include <linux/vmpressure.h>
Steve Kondikf94910d2016-04-14 02:52:21 -070050#include <linux/zcache.h>
seungho1.parkdca32102012-07-24 10:20:44 +090051
Vinayak Menon0d145a02015-03-04 20:41:21 +053052#define CREATE_TRACE_POINTS
53#include <trace/events/almk.h>
54
seungho1.parkdca32102012-07-24 10:20:44 +090055#ifdef CONFIG_HIGHMEM
56#define _ZONE ZONE_HIGHMEM
57#else
58#define _ZONE ZONE_NORMAL
59#endif
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090060
Liam Mark2e186ce2013-01-21 09:28:14 -080061static uint32_t lowmem_debug_level = 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090062static int lowmem_adj[6] = {
63 0,
64 1,
65 6,
66 12,
67};
68static int lowmem_adj_size = 4;
Greg Kroah-Hartman624b2252012-03-07 13:21:23 -080069static int lowmem_minfree[6] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090070 3 * 512, /* 6MB */
71 2 * 1024, /* 8MB */
72 4 * 1024, /* 16MB */
73 16 * 1024, /* 64MB */
74};
75static int lowmem_minfree_size = 4;
seungho1.parkdca32102012-07-24 10:20:44 +090076static int lmk_fast_run = 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077
Arve Hjønnevåge5d79652012-01-13 22:21:25 +040078static unsigned long lowmem_deathpending_timeout;
San Mehat4755b722010-05-05 11:38:42 -070079
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090080#define lowmem_print(level, x...) \
81 do { \
82 if (lowmem_debug_level >= (level)) \
Colin Crossb79a59c2013-05-03 14:57:29 -070083 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090084 } while (0)
85
Vinayak Menon0d145a02015-03-04 20:41:21 +053086static atomic_t shift_adj = ATOMIC_INIT(0);
87static short adj_max_shift = 353;
88
89/* User knob to enable/disable adaptive lmk feature */
90static int enable_adaptive_lmk;
91module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int,
92 S_IRUGO | S_IWUSR);
93
94/*
95 * This parameter controls the behaviour of LMK when vmpressure is in
96 * the range of 90-94. Adaptive lmk triggers based on number of file
97 * pages wrt vmpressure_file_min, when vmpressure is in the range of
98 * 90-94. Usually this is a pseudo minfree value, higher than the
99 * highest configured value in minfree array.
100 */
101static int vmpressure_file_min;
102module_param_named(vmpressure_file_min, vmpressure_file_min, int,
103 S_IRUGO | S_IWUSR);
104
105enum {
106 VMPRESSURE_NO_ADJUST = 0,
107 VMPRESSURE_ADJUST_ENCROACH,
108 VMPRESSURE_ADJUST_NORMAL,
109};
110
111int adjust_minadj(short *min_score_adj)
112{
113 int ret = VMPRESSURE_NO_ADJUST;
114
115 if (!enable_adaptive_lmk)
116 return 0;
117
118 if (atomic_read(&shift_adj) &&
119 (*min_score_adj > adj_max_shift)) {
120 if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
121 ret = VMPRESSURE_ADJUST_ENCROACH;
122 else
123 ret = VMPRESSURE_ADJUST_NORMAL;
124 *min_score_adj = adj_max_shift;
125 }
126 atomic_set(&shift_adj, 0);
127
128 return ret;
129}
130
131static int lmk_vmpressure_notifier(struct notifier_block *nb,
132 unsigned long action, void *data)
133{
Vinayak Menoncdbb2a62015-08-19 16:11:04 +0530134 int other_free = 0, other_file = 0;
Vinayak Menon0d145a02015-03-04 20:41:21 +0530135 unsigned long pressure = action;
136 int array_size = ARRAY_SIZE(lowmem_adj);
137
138 if (!enable_adaptive_lmk)
139 return 0;
140
141 if (pressure >= 95) {
142 other_file = global_page_state(NR_FILE_PAGES) -
143 global_page_state(NR_SHMEM) -
144 total_swapcache_pages();
145 other_free = global_page_state(NR_FREE_PAGES);
146
147 atomic_set(&shift_adj, 1);
148 trace_almk_vmpressure(pressure, other_free, other_file);
149 } else if (pressure >= 90) {
150 if (lowmem_adj_size < array_size)
151 array_size = lowmem_adj_size;
152 if (lowmem_minfree_size < array_size)
153 array_size = lowmem_minfree_size;
154
155 other_file = global_page_state(NR_FILE_PAGES) -
156 global_page_state(NR_SHMEM) -
157 total_swapcache_pages();
158
159 other_free = global_page_state(NR_FREE_PAGES);
160
161 if ((other_free < lowmem_minfree[array_size - 1]) &&
162 (other_file < vmpressure_file_min)) {
163 atomic_set(&shift_adj, 1);
164 trace_almk_vmpressure(pressure, other_free,
165 other_file);
166 }
Vinayak Menoncdbb2a62015-08-19 16:11:04 +0530167 } else if (atomic_read(&shift_adj)) {
168 /*
169 * shift_adj would have been set by a previous invocation
170 * of notifier, which is not followed by a lowmem_shrink yet.
171 * Since vmpressure has improved, reset shift_adj to avoid
172 * false adaptive LMK trigger.
173 */
174 trace_almk_vmpressure(pressure, other_free, other_file);
175 atomic_set(&shift_adj, 0);
Vinayak Menon0d145a02015-03-04 20:41:21 +0530176 }
177
178 return 0;
179}
180
181static struct notifier_block lmk_vmpr_nb = {
182 .notifier_call = lmk_vmpressure_notifier,
183};
184
Liam Markdabca8d2012-09-20 14:42:28 -0700185static int test_task_flag(struct task_struct *p, int flag)
186{
Vinayak Menon1c837fc2015-05-21 20:44:14 +0530187 struct task_struct *t;
Liam Markdabca8d2012-09-20 14:42:28 -0700188
Vinayak Menon1c837fc2015-05-21 20:44:14 +0530189 for_each_thread(p, t) {
Liam Markdabca8d2012-09-20 14:42:28 -0700190 task_lock(t);
191 if (test_tsk_thread_flag(t, flag)) {
192 task_unlock(t);
193 return 1;
194 }
195 task_unlock(t);
Vinayak Menon1c837fc2015-05-21 20:44:14 +0530196 }
Liam Markdabca8d2012-09-20 14:42:28 -0700197
198 return 0;
199}
200
Liam Mark39eaf732013-01-25 12:40:18 -0800201static DEFINE_MUTEX(scan_mutex);
202
Liam Mark3f1e5512013-03-27 12:34:51 -0700203int can_use_cma_pages(gfp_t gfp_mask)
204{
205 int can_use = 0;
206 int mtype = allocflags_to_migratetype(gfp_mask);
207 int i = 0;
208 int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
209
210 if (is_migrate_cma(mtype)) {
211 can_use = 1;
212 } else {
213 for (i = 0;; i++) {
214 int fallbacktype = mtype_fallbacks[i];
215
216 if (is_migrate_cma(fallbacktype)) {
217 can_use = 1;
218 break;
219 }
220
221 if (fallbacktype == MIGRATE_RESERVE)
222 break;
223 }
224 }
225 return can_use;
226}
227
seungho1.parkdca32102012-07-24 10:20:44 +0900228void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
Liam Mark3f1e5512013-03-27 12:34:51 -0700229 int *other_free, int *other_file,
230 int use_cma_pages)
seungho1.parkdca32102012-07-24 10:20:44 +0900231{
232 struct zone *zone;
233 struct zoneref *zoneref;
234 int zone_idx;
235
236 for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
Liam Mark3f1e5512013-03-27 12:34:51 -0700237 zone_idx = zonelist_zone_idx(zoneref);
238 if (zone_idx == ZONE_MOVABLE) {
Susheel Khiani986fcbb2015-02-12 19:00:15 +0530239 if (!use_cma_pages && other_free)
Liam Mark3f1e5512013-03-27 12:34:51 -0700240 *other_free -=
241 zone_page_state(zone, NR_FREE_CMA_PAGES);
seungho1.parkdca32102012-07-24 10:20:44 +0900242 continue;
Liam Mark3f1e5512013-03-27 12:34:51 -0700243 }
seungho1.parkdca32102012-07-24 10:20:44 +0900244
245 if (zone_idx > classzone_idx) {
246 if (other_free != NULL)
247 *other_free -= zone_page_state(zone,
248 NR_FREE_PAGES);
249 if (other_file != NULL)
250 *other_file -= zone_page_state(zone,
251 NR_FILE_PAGES)
Liam Mark6ffc0402015-02-27 12:59:00 -0800252 - zone_page_state(zone, NR_SHMEM)
253 - zone_page_state(zone, NR_SWAPCACHE);
seungho1.parkdca32102012-07-24 10:20:44 +0900254 } else if (zone_idx < classzone_idx) {
Susheel Khiani986fcbb2015-02-12 19:00:15 +0530255 if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
256 other_free) {
Liam Mark3f1e5512013-03-27 12:34:51 -0700257 if (!use_cma_pages) {
258 *other_free -= min(
259 zone->lowmem_reserve[classzone_idx] +
260 zone_page_state(
261 zone, NR_FREE_CMA_PAGES),
262 zone_page_state(
263 zone, NR_FREE_PAGES));
264 } else {
265 *other_free -=
266 zone->lowmem_reserve[classzone_idx];
267 }
268 } else {
Susheel Khiani986fcbb2015-02-12 19:00:15 +0530269 if (other_free)
270 *other_free -=
271 zone_page_state(zone, NR_FREE_PAGES);
Liam Mark3f1e5512013-03-27 12:34:51 -0700272 }
seungho1.parkdca32102012-07-24 10:20:44 +0900273 }
274 }
275}
276
Liam Mark20cba1b2014-03-10 16:06:39 -0700277#ifdef CONFIG_HIGHMEM
278void adjust_gfp_mask(gfp_t *gfp_mask)
279{
280 struct zone *preferred_zone;
281 struct zonelist *zonelist;
282 enum zone_type high_zoneidx;
283
284 if (current_is_kswapd()) {
285 zonelist = node_zonelist(0, *gfp_mask);
286 high_zoneidx = gfp_zone(*gfp_mask);
287 first_zones_zonelist(zonelist, high_zoneidx, NULL,
288 &preferred_zone);
289
290 if (high_zoneidx == ZONE_NORMAL) {
291 if (zone_watermark_ok_safe(preferred_zone, 0,
292 high_wmark_pages(preferred_zone), 0,
293 0))
294 *gfp_mask |= __GFP_HIGHMEM;
295 } else if (high_zoneidx == ZONE_HIGHMEM) {
296 *gfp_mask |= __GFP_HIGHMEM;
297 }
298 }
299}
300#else
301void adjust_gfp_mask(gfp_t *unused)
302{
303}
304#endif
305
seungho1.parkdca32102012-07-24 10:20:44 +0900306void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
307{
308 gfp_t gfp_mask;
309 struct zone *preferred_zone;
310 struct zonelist *zonelist;
311 enum zone_type high_zoneidx, classzone_idx;
312 unsigned long balance_gap;
Liam Mark3f1e5512013-03-27 12:34:51 -0700313 int use_cma_pages;
seungho1.parkdca32102012-07-24 10:20:44 +0900314
315 gfp_mask = sc->gfp_mask;
Liam Mark20cba1b2014-03-10 16:06:39 -0700316 adjust_gfp_mask(&gfp_mask);
317
seungho1.parkdca32102012-07-24 10:20:44 +0900318 zonelist = node_zonelist(0, gfp_mask);
319 high_zoneidx = gfp_zone(gfp_mask);
320 first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
321 classzone_idx = zone_idx(preferred_zone);
Liam Mark3f1e5512013-03-27 12:34:51 -0700322 use_cma_pages = can_use_cma_pages(gfp_mask);
seungho1.parkdca32102012-07-24 10:20:44 +0900323
324 balance_gap = min(low_wmark_pages(preferred_zone),
325 (preferred_zone->present_pages +
326 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
327 KSWAPD_ZONE_BALANCE_GAP_RATIO);
328
329 if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
330 high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
331 balance_gap, 0, 0))) {
332 if (lmk_fast_run)
333 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700334 other_file, use_cma_pages);
seungho1.parkdca32102012-07-24 10:20:44 +0900335 else
336 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700337 NULL, use_cma_pages);
seungho1.parkdca32102012-07-24 10:20:44 +0900338
Liam Mark3f1e5512013-03-27 12:34:51 -0700339 if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
340 if (!use_cma_pages) {
341 *other_free -= min(
342 preferred_zone->lowmem_reserve[_ZONE]
343 + zone_page_state(
344 preferred_zone, NR_FREE_CMA_PAGES),
345 zone_page_state(
346 preferred_zone, NR_FREE_PAGES));
347 } else {
348 *other_free -=
349 preferred_zone->lowmem_reserve[_ZONE];
350 }
351 } else {
seungho1.parkdca32102012-07-24 10:20:44 +0900352 *other_free -= zone_page_state(preferred_zone,
353 NR_FREE_PAGES);
Liam Mark3f1e5512013-03-27 12:34:51 -0700354 }
355
seungho1.parkdca32102012-07-24 10:20:44 +0900356 lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
357 "ofree %d, %d\n", *other_free, *other_file);
358 } else {
359 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700360 other_file, use_cma_pages);
361
362 if (!use_cma_pages) {
363 *other_free -=
364 zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
365 }
seungho1.parkdca32102012-07-24 10:20:44 +0900366
367 lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
368 "%d\n", *other_free, *other_file);
369 }
370}
371
Colin Crosscae9bf12011-06-22 16:05:47 -0700372static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900373{
Anton Vorontsov95670002012-02-06 20:29:47 +0400374 struct task_struct *tsk;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900375 struct task_struct *selected = NULL;
376 int rem = 0;
377 int tasksize;
378 int i;
Vinayak Menon0d145a02015-03-04 20:41:21 +0530379 int ret = 0;
380 short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
Colin Crossb79a59c2013-05-03 14:57:29 -0700381 int minfree = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900382 int selected_tasksize = 0;
David Rientjes940f77b2012-02-13 19:28:49 -0800383 int selected_oom_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900384 int array_size = ARRAY_SIZE(lowmem_adj);
Liam Mark39eaf732013-01-25 12:40:18 -0800385 int other_free;
386 int other_file;
387 unsigned long nr_to_scan = sc->nr_to_scan;
388
389 if (nr_to_scan > 0) {
390 if (mutex_lock_interruptible(&scan_mutex) < 0)
391 return 0;
392 }
393
Arve Hjønnevåg231e29f2012-09-26 18:01:17 -0700394 other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
Liam Mark04c5d592013-08-30 12:10:39 -0700395
Shaohua Li2b899932013-02-22 16:34:37 -0800396 if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
Liam Mark04c5d592013-08-30 12:10:39 -0700397 global_page_state(NR_FILE_PAGES))
Steve Kondikf94910d2016-04-14 02:52:21 -0700398 other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
Liam Mark04c5d592013-08-30 12:10:39 -0700399 global_page_state(NR_SHMEM) -
Shaohua Li2b899932013-02-22 16:34:37 -0800400 total_swapcache_pages();
Liam Mark04c5d592013-08-30 12:10:39 -0700401 else
402 other_file = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900403
seungho1.parkdca32102012-07-24 10:20:44 +0900404 tune_lmk_param(&other_free, &other_file, sc);
405
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900406 if (lowmem_adj_size < array_size)
407 array_size = lowmem_adj_size;
408 if (lowmem_minfree_size < array_size)
409 array_size = lowmem_minfree_size;
410 for (i = 0; i < array_size; i++) {
Colin Crossb79a59c2013-05-03 14:57:29 -0700411 minfree = lowmem_minfree[i];
412 if (other_free < minfree && other_file < minfree) {
David Rientjes940f77b2012-02-13 19:28:49 -0800413 min_score_adj = lowmem_adj[i];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900414 break;
415 }
416 }
Vinayak Menon0d145a02015-03-04 20:41:21 +0530417 if (nr_to_scan > 0) {
418 ret = adjust_minadj(&min_score_adj);
419 lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800420 nr_to_scan, sc->gfp_mask, other_free,
David Rientjes940f77b2012-02-13 19:28:49 -0800421 other_file, min_score_adj);
Vinayak Menon0d145a02015-03-04 20:41:21 +0530422 }
423
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900424 rem = global_page_state(NR_ACTIVE_ANON) +
425 global_page_state(NR_ACTIVE_FILE) +
426 global_page_state(NR_INACTIVE_ANON) +
427 global_page_state(NR_INACTIVE_FILE);
Liam Mark39eaf732013-01-25 12:40:18 -0800428 if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
Colin Crosscae9bf12011-06-22 16:05:47 -0700429 lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800430 nr_to_scan, sc->gfp_mask, rem);
431
432 if (nr_to_scan > 0)
433 mutex_unlock(&scan_mutex);
434
Vinayak Menon0d145a02015-03-04 20:41:21 +0530435 if ((min_score_adj == OOM_SCORE_ADJ_MAX + 1) &&
436 (nr_to_scan > 0))
437 trace_almk_shrink(0, ret, other_free, other_file, 0);
438
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900439 return rem;
440 }
David Rientjes940f77b2012-02-13 19:28:49 -0800441 selected_oom_score_adj = min_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900442
Anton Vorontsov294b2712012-02-06 20:29:41 +0400443 rcu_read_lock();
Anton Vorontsov95670002012-02-06 20:29:47 +0400444 for_each_process(tsk) {
445 struct task_struct *p;
David Rientjes940f77b2012-02-13 19:28:49 -0800446 int oom_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900447
Anton Vorontsov9823ec92012-02-06 20:30:01 +0400448 if (tsk->flags & PF_KTHREAD)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900449 continue;
Anton Vorontsov9823ec92012-02-06 20:30:01 +0400450
Liam Mark3b23c1882013-02-07 14:31:36 -0800451 /* if task no longer has any memory ignore it */
452 if (test_task_flag(tsk, TIF_MM_RELEASED))
453 continue;
454
Liam Markdabca8d2012-09-20 14:42:28 -0700455 if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
456 if (test_task_flag(tsk, TIF_MEMDIE)) {
457 rcu_read_unlock();
Liam Mark39eaf732013-01-25 12:40:18 -0800458 /* give the system time to free up the memory */
459 msleep_interruptible(20);
460 mutex_unlock(&scan_mutex);
Liam Markdabca8d2012-09-20 14:42:28 -0700461 return 0;
462 }
463 }
464
Anton Vorontsov95670002012-02-06 20:29:47 +0400465 p = find_lock_task_mm(tsk);
466 if (!p)
467 continue;
468
David Rientjes940f77b2012-02-13 19:28:49 -0800469 oom_score_adj = p->signal->oom_score_adj;
470 if (oom_score_adj < min_score_adj) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900471 task_unlock(p);
472 continue;
473 }
Anton Vorontsov95670002012-02-06 20:29:47 +0400474 tasksize = get_mm_rss(p->mm);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900475 task_unlock(p);
476 if (tasksize <= 0)
477 continue;
478 if (selected) {
David Rientjes940f77b2012-02-13 19:28:49 -0800479 if (oom_score_adj < selected_oom_score_adj)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900480 continue;
David Rientjes940f77b2012-02-13 19:28:49 -0800481 if (oom_score_adj == selected_oom_score_adj &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900482 tasksize <= selected_tasksize)
483 continue;
484 }
485 selected = p;
486 selected_tasksize = tasksize;
David Rientjes940f77b2012-02-13 19:28:49 -0800487 selected_oom_score_adj = oom_score_adj;
Liam Mark398a1b42014-06-03 13:57:47 -0700488 lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
Colin Crossb79a59c2013-05-03 14:57:29 -0700489 p->comm, p->pid, oom_score_adj, tasksize);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900490 }
491 if (selected) {
Colin Crossb79a59c2013-05-03 14:57:29 -0700492 lowmem_print(1, "Killing '%s' (%d), adj %d,\n" \
493 " to free %ldkB on behalf of '%s' (%d) because\n" \
Liam Mark398a1b42014-06-03 13:57:47 -0700494 " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
495 " Free memory is %ldkB above reserved.\n" \
496 " Free CMA is %ldkB\n" \
497 " Total reserve is %ldkB\n" \
498 " Total free pages is %ldkB\n" \
499 " Total file cache is %ldkB\n" \
500 " Total zcache is %ldkB\n" \
Laura Abbott5d154252014-09-23 19:48:30 -0700501 " Slab Reclaimable is %ldkB\n" \
502 " Slab UnReclaimable is %ldkB\n" \
503 " Total Slab is %ldkB\n" \
Liam Mark398a1b42014-06-03 13:57:47 -0700504 " GFP mask is 0x%x\n",
505 selected->comm, selected->pid,
506 selected_oom_score_adj,
507 selected_tasksize * (long)(PAGE_SIZE / 1024),
508 current->comm, current->pid,
509 other_file * (long)(PAGE_SIZE / 1024),
510 minfree * (long)(PAGE_SIZE / 1024),
511 min_score_adj,
512 other_free * (long)(PAGE_SIZE / 1024),
513 global_page_state(NR_FREE_CMA_PAGES) *
514 (long)(PAGE_SIZE / 1024),
515 totalreserve_pages * (long)(PAGE_SIZE / 1024),
516 global_page_state(NR_FREE_PAGES) *
517 (long)(PAGE_SIZE / 1024),
518 global_page_state(NR_FILE_PAGES) *
519 (long)(PAGE_SIZE / 1024),
520 (long)zcache_pages() * (long)(PAGE_SIZE / 1024),
Laura Abbott5d154252014-09-23 19:48:30 -0700521 global_page_state(NR_SLAB_RECLAIMABLE) *
522 (long)(PAGE_SIZE / 1024),
523 global_page_state(NR_SLAB_UNRECLAIMABLE) *
524 (long)(PAGE_SIZE / 1024),
525 global_page_state(NR_SLAB_RECLAIMABLE) *
526 (long)(PAGE_SIZE / 1024) +
527 global_page_state(NR_SLAB_UNRECLAIMABLE) *
528 (long)(PAGE_SIZE / 1024),
Liam Mark398a1b42014-06-03 13:57:47 -0700529 sc->gfp_mask);
530
531 if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
532 show_mem(SHOW_MEM_FILTER_NODES);
533 dump_tasks(NULL, NULL);
Laura Abbott3c47bc22014-09-23 19:31:19 -0700534 show_mem_call_notifiers();
Liam Mark398a1b42014-06-03 13:57:47 -0700535 }
536
Arve Hjønnevåge5d79652012-01-13 22:21:25 +0400537 lowmem_deathpending_timeout = jiffies + HZ;
Anton Vorontsov294b2712012-02-06 20:29:41 +0400538 send_sig(SIGKILL, selected, 0);
David Rientjes83dbbdb2012-04-09 16:56:18 -0700539 set_tsk_thread_flag(selected, TIF_MEMDIE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900540 rem -= selected_tasksize;
Liam Mark3b23c1882013-02-07 14:31:36 -0800541 rcu_read_unlock();
Liam Mark39eaf732013-01-25 12:40:18 -0800542 /* give the system time to free up the memory */
543 msleep_interruptible(20);
Vinayak Menon0d145a02015-03-04 20:41:21 +0530544 trace_almk_shrink(selected_tasksize, ret,
545 other_free, other_file, selected_oom_score_adj);
546 } else {
547 trace_almk_shrink(1, ret, other_free, other_file, 0);
Liam Mark3b23c1882013-02-07 14:31:36 -0800548 rcu_read_unlock();
Vinayak Menon0d145a02015-03-04 20:41:21 +0530549 }
Liam Mark3b23c1882013-02-07 14:31:36 -0800550
Colin Crosscae9bf12011-06-22 16:05:47 -0700551 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800552 nr_to_scan, sc->gfp_mask, rem);
Liam Mark39eaf732013-01-25 12:40:18 -0800553 mutex_unlock(&scan_mutex);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554 return rem;
555}
556
557static struct shrinker lowmem_shrinker = {
558 .shrink = lowmem_shrink,
559 .seeks = DEFAULT_SEEKS * 16
560};
561
562static int __init lowmem_init(void)
563{
564 register_shrinker(&lowmem_shrinker);
Vinayak Menon0d145a02015-03-04 20:41:21 +0530565 vmpressure_notifier_register(&lmk_vmpr_nb);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900566 return 0;
567}
568
569static void __exit lowmem_exit(void)
570{
571 unregister_shrinker(&lowmem_shrinker);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900572}
573
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700574#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
575static int lowmem_oom_adj_to_oom_score_adj(int oom_adj)
576{
577 if (oom_adj == OOM_ADJUST_MAX)
578 return OOM_SCORE_ADJ_MAX;
579 else
580 return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
581}
582
583static void lowmem_autodetect_oom_adj_values(void)
584{
585 int i;
586 int oom_adj;
587 int oom_score_adj;
588 int array_size = ARRAY_SIZE(lowmem_adj);
589
590 if (lowmem_adj_size < array_size)
591 array_size = lowmem_adj_size;
592
593 if (array_size <= 0)
594 return;
595
596 oom_adj = lowmem_adj[array_size - 1];
597 if (oom_adj > OOM_ADJUST_MAX)
598 return;
599
600 oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
601 if (oom_score_adj <= OOM_ADJUST_MAX)
602 return;
603
604 lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
605 for (i = 0; i < array_size; i++) {
606 oom_adj = lowmem_adj[i];
607 oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
608 lowmem_adj[i] = oom_score_adj;
609 lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
610 oom_adj, oom_score_adj);
611 }
612}
613
614static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
615{
616 int ret;
617
618 ret = param_array_ops.set(val, kp);
619
620 /* HACK: Autodetect oom_adj values in lowmem_adj array */
621 lowmem_autodetect_oom_adj_values();
622
623 return ret;
624}
625
626static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
627{
628 return param_array_ops.get(buffer, kp);
629}
630
631static void lowmem_adj_array_free(void *arg)
632{
633 param_array_ops.free(arg);
634}
635
636static struct kernel_param_ops lowmem_adj_array_ops = {
637 .set = lowmem_adj_array_set,
638 .get = lowmem_adj_array_get,
639 .free = lowmem_adj_array_free,
640};
641
642static const struct kparam_array __param_arr_adj = {
643 .max = ARRAY_SIZE(lowmem_adj),
644 .num = &lowmem_adj_size,
645 .ops = &param_ops_int,
646 .elemsize = sizeof(lowmem_adj[0]),
647 .elem = lowmem_adj,
648};
649#endif
650
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900651module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700652#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
653__module_param_call(MODULE_PARAM_PREFIX, adj,
654 &lowmem_adj_array_ops,
655 .arr = &__param_arr_adj,
656 S_IRUGO | S_IWUSR, -1);
657__MODULE_PARM_TYPE(adj, "array of int");
658#else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900659module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
660 S_IRUGO | S_IWUSR);
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700661#endif
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900662module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
663 S_IRUGO | S_IWUSR);
664module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
seungho1.parkdca32102012-07-24 10:20:44 +0900665module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900666
667module_init(lowmem_init);
668module_exit(lowmem_exit);
669
670MODULE_LICENSE("GPL");
671