blob: b9c0bce3ff140b367576c7b65322cd0f3804414b [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* drivers/misc/lowmemorykiller.c
2 *
3 * The lowmemorykiller driver lets user-space specify a set of memory thresholds
David Rientjes940f77b2012-02-13 19:28:49 -08004 * where processes with a range of oom_score_adj values will get killed. Specify
5 * the minimum oom_score_adj values in
6 * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
7 * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
8 * separated list of numbers in ascending order.
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09009 *
10 * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
Marco Navarra3bf5d652011-12-22 13:28:23 +010011 * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
David Rientjes940f77b2012-02-13 19:28:49 -080012 * processes with a oom_score_adj value of 8 or higher when the free memory
13 * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
14 * higher when the free memory drops below 1024 pages.
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090015 *
16 * The driver considers memory used for caches to be free, but if a large
17 * percentage of the cached memory is locked this can be very inaccurate
18 * and processes may not get killed until the normal oom killer is triggered.
19 *
20 * Copyright (C) 2007-2008 Google, Inc.
21 *
22 * This software is licensed under the terms of the GNU General Public
23 * License version 2, as published by the Free Software Foundation, and
24 * may be copied, distributed, and modified under those terms.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 */
32
Colin Crossb79a59c2013-05-03 14:57:29 -070033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090035#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/oom.h>
39#include <linux/sched.h>
Arve Hjønnevåg231e29f2012-09-26 18:01:17 -070040#include <linux/swap.h>
Anton Vorontsov294b2712012-02-06 20:29:41 +040041#include <linux/rcupdate.h>
San Mehat4755b722010-05-05 11:38:42 -070042#include <linux/notifier.h>
Liam Mark39eaf732013-01-25 12:40:18 -080043#include <linux/mutex.h>
44#include <linux/delay.h>
seungho1.parkdca32102012-07-24 10:20:44 +090045#include <linux/swap.h>
Liam Mark04c5d592013-08-30 12:10:39 -070046#include <linux/fs.h>
Steve Kondikf94910d2016-04-14 02:52:21 -070047#include <linux/zcache.h>
seungho1.parkdca32102012-07-24 10:20:44 +090048
49#ifdef CONFIG_HIGHMEM
50#define _ZONE ZONE_HIGHMEM
51#else
52#define _ZONE ZONE_NORMAL
53#endif
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054
Liam Mark2e186ce2013-01-21 09:28:14 -080055static uint32_t lowmem_debug_level = 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090056static int lowmem_adj[6] = {
57 0,
58 1,
59 6,
60 12,
61};
62static int lowmem_adj_size = 4;
Greg Kroah-Hartman624b2252012-03-07 13:21:23 -080063static int lowmem_minfree[6] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090064 3 * 512, /* 6MB */
65 2 * 1024, /* 8MB */
66 4 * 1024, /* 16MB */
67 16 * 1024, /* 64MB */
68};
69static int lowmem_minfree_size = 4;
seungho1.parkdca32102012-07-24 10:20:44 +090070static int lmk_fast_run = 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090071
Arve Hjønnevåge5d79652012-01-13 22:21:25 +040072static unsigned long lowmem_deathpending_timeout;
San Mehat4755b722010-05-05 11:38:42 -070073
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090074#define lowmem_print(level, x...) \
75 do { \
76 if (lowmem_debug_level >= (level)) \
Colin Crossb79a59c2013-05-03 14:57:29 -070077 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090078 } while (0)
79
Liam Markdabca8d2012-09-20 14:42:28 -070080static int test_task_flag(struct task_struct *p, int flag)
81{
82 struct task_struct *t = p;
83
84 do {
85 task_lock(t);
86 if (test_tsk_thread_flag(t, flag)) {
87 task_unlock(t);
88 return 1;
89 }
90 task_unlock(t);
91 } while_each_thread(p, t);
92
93 return 0;
94}
95
Liam Mark39eaf732013-01-25 12:40:18 -080096static DEFINE_MUTEX(scan_mutex);
97
Liam Mark3f1e5512013-03-27 12:34:51 -070098int can_use_cma_pages(gfp_t gfp_mask)
99{
100 int can_use = 0;
101 int mtype = allocflags_to_migratetype(gfp_mask);
102 int i = 0;
103 int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
104
105 if (is_migrate_cma(mtype)) {
106 can_use = 1;
107 } else {
108 for (i = 0;; i++) {
109 int fallbacktype = mtype_fallbacks[i];
110
111 if (is_migrate_cma(fallbacktype)) {
112 can_use = 1;
113 break;
114 }
115
116 if (fallbacktype == MIGRATE_RESERVE)
117 break;
118 }
119 }
120 return can_use;
121}
122
seungho1.parkdca32102012-07-24 10:20:44 +0900123void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
Liam Mark3f1e5512013-03-27 12:34:51 -0700124 int *other_free, int *other_file,
125 int use_cma_pages)
seungho1.parkdca32102012-07-24 10:20:44 +0900126{
127 struct zone *zone;
128 struct zoneref *zoneref;
129 int zone_idx;
130
131 for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
Liam Mark3f1e5512013-03-27 12:34:51 -0700132 zone_idx = zonelist_zone_idx(zoneref);
133 if (zone_idx == ZONE_MOVABLE) {
134 if (!use_cma_pages)
135 *other_free -=
136 zone_page_state(zone, NR_FREE_CMA_PAGES);
seungho1.parkdca32102012-07-24 10:20:44 +0900137 continue;
Liam Mark3f1e5512013-03-27 12:34:51 -0700138 }
seungho1.parkdca32102012-07-24 10:20:44 +0900139
140 if (zone_idx > classzone_idx) {
141 if (other_free != NULL)
142 *other_free -= zone_page_state(zone,
143 NR_FREE_PAGES);
144 if (other_file != NULL)
145 *other_file -= zone_page_state(zone,
146 NR_FILE_PAGES)
147 - zone_page_state(zone, NR_SHMEM);
148 } else if (zone_idx < classzone_idx) {
Liam Mark3f1e5512013-03-27 12:34:51 -0700149 if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
150 if (!use_cma_pages) {
151 *other_free -= min(
152 zone->lowmem_reserve[classzone_idx] +
153 zone_page_state(
154 zone, NR_FREE_CMA_PAGES),
155 zone_page_state(
156 zone, NR_FREE_PAGES));
157 } else {
158 *other_free -=
159 zone->lowmem_reserve[classzone_idx];
160 }
161 } else {
seungho1.parkdca32102012-07-24 10:20:44 +0900162 *other_free -=
Liam Mark3f1e5512013-03-27 12:34:51 -0700163 zone_page_state(zone, NR_FREE_PAGES);
164 }
seungho1.parkdca32102012-07-24 10:20:44 +0900165 }
166 }
167}
168
Liam Mark20cba1b2014-03-10 16:06:39 -0700169#ifdef CONFIG_HIGHMEM
170void adjust_gfp_mask(gfp_t *gfp_mask)
171{
172 struct zone *preferred_zone;
173 struct zonelist *zonelist;
174 enum zone_type high_zoneidx;
175
176 if (current_is_kswapd()) {
177 zonelist = node_zonelist(0, *gfp_mask);
178 high_zoneidx = gfp_zone(*gfp_mask);
179 first_zones_zonelist(zonelist, high_zoneidx, NULL,
180 &preferred_zone);
181
182 if (high_zoneidx == ZONE_NORMAL) {
183 if (zone_watermark_ok_safe(preferred_zone, 0,
184 high_wmark_pages(preferred_zone), 0,
185 0))
186 *gfp_mask |= __GFP_HIGHMEM;
187 } else if (high_zoneidx == ZONE_HIGHMEM) {
188 *gfp_mask |= __GFP_HIGHMEM;
189 }
190 }
191}
192#else
193void adjust_gfp_mask(gfp_t *unused)
194{
195}
196#endif
197
seungho1.parkdca32102012-07-24 10:20:44 +0900198void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
199{
200 gfp_t gfp_mask;
201 struct zone *preferred_zone;
202 struct zonelist *zonelist;
203 enum zone_type high_zoneidx, classzone_idx;
204 unsigned long balance_gap;
Liam Mark3f1e5512013-03-27 12:34:51 -0700205 int use_cma_pages;
seungho1.parkdca32102012-07-24 10:20:44 +0900206
207 gfp_mask = sc->gfp_mask;
Liam Mark20cba1b2014-03-10 16:06:39 -0700208 adjust_gfp_mask(&gfp_mask);
209
seungho1.parkdca32102012-07-24 10:20:44 +0900210 zonelist = node_zonelist(0, gfp_mask);
211 high_zoneidx = gfp_zone(gfp_mask);
212 first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
213 classzone_idx = zone_idx(preferred_zone);
Liam Mark3f1e5512013-03-27 12:34:51 -0700214 use_cma_pages = can_use_cma_pages(gfp_mask);
seungho1.parkdca32102012-07-24 10:20:44 +0900215
216 balance_gap = min(low_wmark_pages(preferred_zone),
217 (preferred_zone->present_pages +
218 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
219 KSWAPD_ZONE_BALANCE_GAP_RATIO);
220
221 if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
222 high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
223 balance_gap, 0, 0))) {
224 if (lmk_fast_run)
225 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700226 other_file, use_cma_pages);
seungho1.parkdca32102012-07-24 10:20:44 +0900227 else
228 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700229 NULL, use_cma_pages);
seungho1.parkdca32102012-07-24 10:20:44 +0900230
Liam Mark3f1e5512013-03-27 12:34:51 -0700231 if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
232 if (!use_cma_pages) {
233 *other_free -= min(
234 preferred_zone->lowmem_reserve[_ZONE]
235 + zone_page_state(
236 preferred_zone, NR_FREE_CMA_PAGES),
237 zone_page_state(
238 preferred_zone, NR_FREE_PAGES));
239 } else {
240 *other_free -=
241 preferred_zone->lowmem_reserve[_ZONE];
242 }
243 } else {
seungho1.parkdca32102012-07-24 10:20:44 +0900244 *other_free -= zone_page_state(preferred_zone,
245 NR_FREE_PAGES);
Liam Mark3f1e5512013-03-27 12:34:51 -0700246 }
247
seungho1.parkdca32102012-07-24 10:20:44 +0900248 lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
249 "ofree %d, %d\n", *other_free, *other_file);
250 } else {
251 tune_lmk_zone_param(zonelist, classzone_idx, other_free,
Liam Mark3f1e5512013-03-27 12:34:51 -0700252 other_file, use_cma_pages);
253
254 if (!use_cma_pages) {
255 *other_free -=
256 zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
257 }
seungho1.parkdca32102012-07-24 10:20:44 +0900258
259 lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
260 "%d\n", *other_free, *other_file);
261 }
262}
263
Colin Crosscae9bf12011-06-22 16:05:47 -0700264static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265{
Anton Vorontsov95670002012-02-06 20:29:47 +0400266 struct task_struct *tsk;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900267 struct task_struct *selected = NULL;
268 int rem = 0;
269 int tasksize;
270 int i;
David Rientjes940f77b2012-02-13 19:28:49 -0800271 int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
Colin Crossb79a59c2013-05-03 14:57:29 -0700272 int minfree = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900273 int selected_tasksize = 0;
David Rientjes940f77b2012-02-13 19:28:49 -0800274 int selected_oom_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900275 int array_size = ARRAY_SIZE(lowmem_adj);
Liam Mark39eaf732013-01-25 12:40:18 -0800276 int other_free;
277 int other_file;
278 unsigned long nr_to_scan = sc->nr_to_scan;
279
280 if (nr_to_scan > 0) {
281 if (mutex_lock_interruptible(&scan_mutex) < 0)
282 return 0;
283 }
284
Arve Hjønnevåg231e29f2012-09-26 18:01:17 -0700285 other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
Liam Mark04c5d592013-08-30 12:10:39 -0700286
Shaohua Li2b899932013-02-22 16:34:37 -0800287 if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
Liam Mark04c5d592013-08-30 12:10:39 -0700288 global_page_state(NR_FILE_PAGES))
Steve Kondikf94910d2016-04-14 02:52:21 -0700289 other_file = global_page_state(NR_FILE_PAGES) + zcache_pages() -
Liam Mark04c5d592013-08-30 12:10:39 -0700290 global_page_state(NR_SHMEM) -
Shaohua Li2b899932013-02-22 16:34:37 -0800291 total_swapcache_pages();
Liam Mark04c5d592013-08-30 12:10:39 -0700292 else
293 other_file = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900294
seungho1.parkdca32102012-07-24 10:20:44 +0900295 tune_lmk_param(&other_free, &other_file, sc);
296
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900297 if (lowmem_adj_size < array_size)
298 array_size = lowmem_adj_size;
299 if (lowmem_minfree_size < array_size)
300 array_size = lowmem_minfree_size;
301 for (i = 0; i < array_size; i++) {
Colin Crossb79a59c2013-05-03 14:57:29 -0700302 minfree = lowmem_minfree[i];
303 if (other_free < minfree && other_file < minfree) {
David Rientjes940f77b2012-02-13 19:28:49 -0800304 min_score_adj = lowmem_adj[i];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900305 break;
306 }
307 }
Liam Mark39eaf732013-01-25 12:40:18 -0800308 if (nr_to_scan > 0)
Colin Crosscae9bf12011-06-22 16:05:47 -0700309 lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800310 nr_to_scan, sc->gfp_mask, other_free,
David Rientjes940f77b2012-02-13 19:28:49 -0800311 other_file, min_score_adj);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900312 rem = global_page_state(NR_ACTIVE_ANON) +
313 global_page_state(NR_ACTIVE_FILE) +
314 global_page_state(NR_INACTIVE_ANON) +
315 global_page_state(NR_INACTIVE_FILE);
Liam Mark39eaf732013-01-25 12:40:18 -0800316 if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
Colin Crosscae9bf12011-06-22 16:05:47 -0700317 lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800318 nr_to_scan, sc->gfp_mask, rem);
319
320 if (nr_to_scan > 0)
321 mutex_unlock(&scan_mutex);
322
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900323 return rem;
324 }
David Rientjes940f77b2012-02-13 19:28:49 -0800325 selected_oom_score_adj = min_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900326
Anton Vorontsov294b2712012-02-06 20:29:41 +0400327 rcu_read_lock();
Anton Vorontsov95670002012-02-06 20:29:47 +0400328 for_each_process(tsk) {
329 struct task_struct *p;
David Rientjes940f77b2012-02-13 19:28:49 -0800330 int oom_score_adj;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900331
Anton Vorontsov9823ec92012-02-06 20:30:01 +0400332 if (tsk->flags & PF_KTHREAD)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900333 continue;
Anton Vorontsov9823ec92012-02-06 20:30:01 +0400334
Liam Mark3b23c1882013-02-07 14:31:36 -0800335 /* if task no longer has any memory ignore it */
336 if (test_task_flag(tsk, TIF_MM_RELEASED))
337 continue;
338
Liam Markdabca8d2012-09-20 14:42:28 -0700339 if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
340 if (test_task_flag(tsk, TIF_MEMDIE)) {
341 rcu_read_unlock();
Liam Mark39eaf732013-01-25 12:40:18 -0800342 /* give the system time to free up the memory */
343 msleep_interruptible(20);
344 mutex_unlock(&scan_mutex);
Liam Markdabca8d2012-09-20 14:42:28 -0700345 return 0;
346 }
347 }
348
Anton Vorontsov95670002012-02-06 20:29:47 +0400349 p = find_lock_task_mm(tsk);
350 if (!p)
351 continue;
352
David Rientjes940f77b2012-02-13 19:28:49 -0800353 oom_score_adj = p->signal->oom_score_adj;
354 if (oom_score_adj < min_score_adj) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900355 task_unlock(p);
356 continue;
357 }
Anton Vorontsov95670002012-02-06 20:29:47 +0400358 tasksize = get_mm_rss(p->mm);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900359 task_unlock(p);
360 if (tasksize <= 0)
361 continue;
362 if (selected) {
David Rientjes940f77b2012-02-13 19:28:49 -0800363 if (oom_score_adj < selected_oom_score_adj)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900364 continue;
David Rientjes940f77b2012-02-13 19:28:49 -0800365 if (oom_score_adj == selected_oom_score_adj &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366 tasksize <= selected_tasksize)
367 continue;
368 }
369 selected = p;
370 selected_tasksize = tasksize;
David Rientjes940f77b2012-02-13 19:28:49 -0800371 selected_oom_score_adj = oom_score_adj;
Colin Crossb79a59c2013-05-03 14:57:29 -0700372 lowmem_print(2, "select '%s' (%d), adj %d, size %d, to kill\n",
373 p->comm, p->pid, oom_score_adj, tasksize);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900374 }
375 if (selected) {
Colin Crossb79a59c2013-05-03 14:57:29 -0700376 lowmem_print(1, "Killing '%s' (%d), adj %d,\n" \
377 " to free %ldkB on behalf of '%s' (%d) because\n" \
378 " cache %ldkB is below limit %ldkB for oom_score_adj %d\n" \
379 " Free memory is %ldkB above reserved\n",
380 selected->comm, selected->pid,
381 selected_oom_score_adj,
382 selected_tasksize * (long)(PAGE_SIZE / 1024),
383 current->comm, current->pid,
384 other_file * (long)(PAGE_SIZE / 1024),
385 minfree * (long)(PAGE_SIZE / 1024),
386 min_score_adj,
387 other_free * (long)(PAGE_SIZE / 1024));
Arve Hjønnevåge5d79652012-01-13 22:21:25 +0400388 lowmem_deathpending_timeout = jiffies + HZ;
Anton Vorontsov294b2712012-02-06 20:29:41 +0400389 send_sig(SIGKILL, selected, 0);
David Rientjes83dbbdb2012-04-09 16:56:18 -0700390 set_tsk_thread_flag(selected, TIF_MEMDIE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900391 rem -= selected_tasksize;
Liam Mark3b23c1882013-02-07 14:31:36 -0800392 rcu_read_unlock();
Liam Mark39eaf732013-01-25 12:40:18 -0800393 /* give the system time to free up the memory */
394 msleep_interruptible(20);
Liam Mark3b23c1882013-02-07 14:31:36 -0800395 } else
396 rcu_read_unlock();
397
Colin Crosscae9bf12011-06-22 16:05:47 -0700398 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
Liam Mark39eaf732013-01-25 12:40:18 -0800399 nr_to_scan, sc->gfp_mask, rem);
Liam Mark39eaf732013-01-25 12:40:18 -0800400 mutex_unlock(&scan_mutex);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900401 return rem;
402}
403
404static struct shrinker lowmem_shrinker = {
405 .shrink = lowmem_shrink,
406 .seeks = DEFAULT_SEEKS * 16
407};
408
409static int __init lowmem_init(void)
410{
411 register_shrinker(&lowmem_shrinker);
412 return 0;
413}
414
415static void __exit lowmem_exit(void)
416{
417 unregister_shrinker(&lowmem_shrinker);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900418}
419
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700420#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
421static int lowmem_oom_adj_to_oom_score_adj(int oom_adj)
422{
423 if (oom_adj == OOM_ADJUST_MAX)
424 return OOM_SCORE_ADJ_MAX;
425 else
426 return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
427}
428
429static void lowmem_autodetect_oom_adj_values(void)
430{
431 int i;
432 int oom_adj;
433 int oom_score_adj;
434 int array_size = ARRAY_SIZE(lowmem_adj);
435
436 if (lowmem_adj_size < array_size)
437 array_size = lowmem_adj_size;
438
439 if (array_size <= 0)
440 return;
441
442 oom_adj = lowmem_adj[array_size - 1];
443 if (oom_adj > OOM_ADJUST_MAX)
444 return;
445
446 oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
447 if (oom_score_adj <= OOM_ADJUST_MAX)
448 return;
449
450 lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
451 for (i = 0; i < array_size; i++) {
452 oom_adj = lowmem_adj[i];
453 oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
454 lowmem_adj[i] = oom_score_adj;
455 lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
456 oom_adj, oom_score_adj);
457 }
458}
459
460static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
461{
462 int ret;
463
464 ret = param_array_ops.set(val, kp);
465
466 /* HACK: Autodetect oom_adj values in lowmem_adj array */
467 lowmem_autodetect_oom_adj_values();
468
469 return ret;
470}
471
472static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
473{
474 return param_array_ops.get(buffer, kp);
475}
476
477static void lowmem_adj_array_free(void *arg)
478{
479 param_array_ops.free(arg);
480}
481
482static struct kernel_param_ops lowmem_adj_array_ops = {
483 .set = lowmem_adj_array_set,
484 .get = lowmem_adj_array_get,
485 .free = lowmem_adj_array_free,
486};
487
488static const struct kparam_array __param_arr_adj = {
489 .max = ARRAY_SIZE(lowmem_adj),
490 .num = &lowmem_adj_size,
491 .ops = &param_ops_int,
492 .elemsize = sizeof(lowmem_adj[0]),
493 .elem = lowmem_adj,
494};
495#endif
496
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900497module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700498#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
499__module_param_call(MODULE_PARAM_PREFIX, adj,
500 &lowmem_adj_array_ops,
501 .arr = &__param_arr_adj,
502 S_IRUGO | S_IWUSR, -1);
503__MODULE_PARM_TYPE(adj, "array of int");
504#else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900505module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
506 S_IRUGO | S_IWUSR);
Arve Hjønnevåg227b6be2012-09-25 17:37:14 -0700507#endif
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900508module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
509 S_IRUGO | S_IWUSR);
510module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
seungho1.parkdca32102012-07-24 10:20:44 +0900511module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900512
513module_init(lowmem_init);
514module_exit(lowmem_exit);
515
516MODULE_LICENSE("GPL");
517