blob: 830bb87d02e9e2a0f59e85d691539fbfb17ba5eb [file] [log] [blame]
Todd Poynor3948f802013-07-09 19:35:14 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "lowmemorykiller"
18
19#include <errno.h>
Robert Beneac47f2992017-08-21 15:18:31 -070020#include <inttypes.h>
Mark Salyzyncfd5b082016-10-17 14:28:00 -070021#include <sched.h>
Todd Poynor3948f802013-07-09 19:35:14 -070022#include <signal.h>
Todd Poynor3948f802013-07-09 19:35:14 -070023#include <stdlib.h>
24#include <string.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070025#include <sys/cdefs.h>
Todd Poynor3948f802013-07-09 19:35:14 -070026#include <sys/epoll.h>
27#include <sys/eventfd.h>
Colin Crossb28ff912014-07-11 17:15:44 -070028#include <sys/mman.h>
Todd Poynor3948f802013-07-09 19:35:14 -070029#include <sys/socket.h>
30#include <sys/types.h>
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -080031#include <sys/sysinfo.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070032#include <unistd.h>
33
Robert Benea58891d52017-07-31 17:15:20 -070034#include <cutils/properties.h>
Todd Poynor3948f802013-07-09 19:35:14 -070035#include <cutils/sockets.h>
Suren Baghdasaryan0f100512018-01-24 16:51:41 -080036#include <lmkd.h>
Mark Salyzyn30f991f2017-01-10 13:19:54 -080037#include <log/log.h>
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070038
Rajeev Kumar70450032018-01-31 17:54:56 -080039#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -080040#include <statslog.h>
41#endif
42
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080043/*
44 * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
45 * to profile and correlate with OOM kills
46 */
47#ifdef LMKD_TRACE_KILLS
48
49#define ATRACE_TAG ATRACE_TAG_ALWAYS
50#include <cutils/trace.h>
51
52#define TRACE_KILL_START(pid) ATRACE_INT(__FUNCTION__, pid);
53#define TRACE_KILL_END() ATRACE_INT(__FUNCTION__, 0);
54
55#else /* LMKD_TRACE_KILLS */
56
Daniel Colascione347f6b42018-02-12 11:24:47 -080057#define TRACE_KILL_START(pid) ((void)(pid))
58#define TRACE_KILL_END() ((void)0)
Suren Baghdasaryanc7135592018-01-04 10:43:58 -080059
60#endif /* LMKD_TRACE_KILLS */
61
Mark Salyzyne6ed68b2014-04-30 13:36:35 -070062#ifndef __unused
63#define __unused __attribute__((__unused__))
64#endif
Todd Poynor3948f802013-07-09 19:35:14 -070065
66#define MEMCG_SYSFS_PATH "/dev/memcg/"
Robert Beneac47f2992017-08-21 15:18:31 -070067#define MEMCG_MEMORY_USAGE "/dev/memcg/memory.usage_in_bytes"
68#define MEMCG_MEMORYSW_USAGE "/dev/memcg/memory.memsw.usage_in_bytes"
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -070069#define ZONEINFO_PATH "/proc/zoneinfo"
70#define MEMINFO_PATH "/proc/meminfo"
Todd Poynor3948f802013-07-09 19:35:14 -070071#define LINE_MAX 128
72
73#define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
74#define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
75
76#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
Robert Benea673e2762017-06-01 16:32:31 -070077#define EIGHT_MEGA (1 << 23)
Todd Poynor3948f802013-07-09 19:35:14 -070078
Greg Kaiserd6d84712018-03-23 14:16:12 -070079#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
80#define STRINGIFY_INTERNAL(x) #x
81
Todd Poynor3948f802013-07-09 19:35:14 -070082/* default to old in-kernel interface if no memory pressure events */
83static int use_inkernel_interface = 1;
Robert Benea164baeb2017-09-11 16:53:28 -070084static bool has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -070085
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080086/* memory pressure levels */
87enum vmpressure_level {
88 VMPRESS_LEVEL_LOW = 0,
89 VMPRESS_LEVEL_MEDIUM,
90 VMPRESS_LEVEL_CRITICAL,
91 VMPRESS_LEVEL_COUNT
92};
Todd Poynor3948f802013-07-09 19:35:14 -070093
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -080094static const char *level_name[] = {
95 "low",
96 "medium",
97 "critical"
98};
99
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800100struct mem_size {
101 int free_mem;
102 int free_swap;
103};
104
105struct {
106 int min_free; /* recorded but not used yet */
107 int max_free;
108} low_pressure_mem = { -1, -1 };
109
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800110static int level_oomadj[VMPRESS_LEVEL_COUNT];
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -0800111static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
Robert Beneac47f2992017-08-21 15:18:31 -0700112static bool debug_process_killing;
113static bool enable_pressure_upgrade;
114static int64_t upgrade_pressure;
Robert Benea6e8e7102017-09-13 15:20:30 -0700115static int64_t downgrade_pressure;
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -0700116static bool low_ram_device;
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800117static bool kill_heaviest_task;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -0800118static unsigned long kill_timeout_ms;
Robert Benea58891d52017-07-31 17:15:20 -0700119
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800120/* data required to handle events */
121struct event_handler_info {
122 int data;
123 void (*handler)(int data, uint32_t events);
124};
Todd Poynor3948f802013-07-09 19:35:14 -0700125
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800126/* data required to handle socket events */
127struct sock_event_handler_info {
128 int sock;
129 struct event_handler_info handler_info;
130};
131
132/* max supported number of data connections */
133#define MAX_DATA_CONN 2
134
135/* socket event handler data */
136static struct sock_event_handler_info ctrl_sock;
137static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
138
139/* vmpressure event handler data */
140static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
141
142/* 3 memory pressure levels, 1 ctrl listen socket, 2 ctrl data socket */
143#define MAX_EPOLL_EVENTS (1 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT)
Todd Poynor3948f802013-07-09 19:35:14 -0700144static int epollfd;
145static int maxevents;
146
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700147/* OOM score values used by both kernel and framework */
Todd Poynor16b60992013-09-16 19:26:47 -0700148#define OOM_SCORE_ADJ_MIN (-1000)
149#define OOM_SCORE_ADJ_MAX 1000
150
Todd Poynor3948f802013-07-09 19:35:14 -0700151static int lowmem_adj[MAX_TARGETS];
152static int lowmem_minfree[MAX_TARGETS];
153static int lowmem_targets_size;
154
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700155/* Fields to parse in /proc/zoneinfo */
156enum zoneinfo_field {
157 ZI_NR_FREE_PAGES = 0,
158 ZI_NR_FILE_PAGES,
159 ZI_NR_SHMEM,
160 ZI_NR_UNEVICTABLE,
161 ZI_WORKINGSET_REFAULT,
162 ZI_HIGH,
163 ZI_FIELD_COUNT
164};
165
166static const char* const zoneinfo_field_names[ZI_FIELD_COUNT] = {
167 "nr_free_pages",
168 "nr_file_pages",
169 "nr_shmem",
170 "nr_unevictable",
171 "workingset_refault",
172 "high",
173};
174
175union zoneinfo {
176 struct {
177 int64_t nr_free_pages;
178 int64_t nr_file_pages;
179 int64_t nr_shmem;
180 int64_t nr_unevictable;
181 int64_t workingset_refault;
182 int64_t high;
183 /* fields below are calculated rather than read from the file */
184 int64_t totalreserve_pages;
185 } field;
186 int64_t arr[ZI_FIELD_COUNT];
187};
188
189/* Fields to parse in /proc/meminfo */
190enum meminfo_field {
191 MI_NR_FREE_PAGES = 0,
192 MI_CACHED,
193 MI_SWAP_CACHED,
194 MI_BUFFERS,
195 MI_SHMEM,
196 MI_UNEVICTABLE,
197 MI_FREE_SWAP,
198 MI_DIRTY,
199 MI_FIELD_COUNT
200};
201
202static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
203 "MemFree:",
204 "Cached:",
205 "SwapCached:",
206 "Buffers:",
207 "Shmem:",
208 "Unevictable:",
209 "SwapFree:",
210 "Dirty:",
211};
212
213union meminfo {
214 struct {
215 int64_t nr_free_pages;
216 int64_t cached;
217 int64_t swap_cached;
218 int64_t buffers;
219 int64_t shmem;
220 int64_t unevictable;
221 int64_t free_swap;
222 int64_t dirty;
223 /* fields below are calculated rather than read from the file */
224 int64_t nr_file_pages;
225 } field;
226 int64_t arr[MI_FIELD_COUNT];
227};
228
229enum field_match_result {
230 NO_MATCH,
231 PARSE_FAIL,
232 PARSE_SUCCESS
233};
234
Todd Poynor3948f802013-07-09 19:35:14 -0700235struct sysmeminfo {
236 int nr_free_pages;
237 int nr_file_pages;
238 int nr_shmem;
239 int totalreserve_pages;
240};
241
242struct adjslot_list {
243 struct adjslot_list *next;
244 struct adjslot_list *prev;
245};
246
247struct proc {
248 struct adjslot_list asl;
249 int pid;
Colin Crossfbb78c62014-06-13 14:52:43 -0700250 uid_t uid;
Todd Poynor3948f802013-07-09 19:35:14 -0700251 int oomadj;
252 struct proc *pidhash_next;
253};
254
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700255struct reread_data {
256 const char* const filename;
257 int fd;
258};
259
Rajeev Kumar70450032018-01-31 17:54:56 -0800260#ifdef LMKD_LOG_STATS
Rajeev Kumar70450032018-01-31 17:54:56 -0800261static bool enable_stats_log;
262static android_log_context log_ctx;
263#endif
264
Todd Poynor3948f802013-07-09 19:35:14 -0700265#define PIDHASH_SZ 1024
266static struct proc *pidhash[PIDHASH_SZ];
267#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
268
Chih-Hung Hsiehdaa13ea2016-05-19 16:02:22 -0700269#define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
Chong Zhang0a4acdf2015-10-14 16:19:53 -0700270static struct adjslot_list procadjslot_list[ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1];
Todd Poynor3948f802013-07-09 19:35:14 -0700271
Todd Poynor3948f802013-07-09 19:35:14 -0700272/* PAGE_SIZE / 1024 */
273static long page_k;
274
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700275static bool parse_int64(const char* str, int64_t* ret) {
276 char* endptr;
277 long long val = strtoll(str, &endptr, 10);
278 if (str == endptr || val > INT64_MAX) {
279 return false;
280 }
281 *ret = (int64_t)val;
282 return true;
283}
284
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700285static enum field_match_result match_field(const char* cp, const char* ap,
286 const char* const field_names[],
287 int field_count, int64_t* field,
288 int *field_idx) {
289 int64_t val;
290 int i;
291
292 for (i = 0; i < field_count; i++) {
293 if (!strcmp(cp, field_names[i])) {
294 *field_idx = i;
295 return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
296 }
297 }
298 return NO_MATCH;
299}
300
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700301/*
302 * Read file content from the beginning up to max_len bytes or EOF
303 * whichever happens first.
304 */
Colin Crossce85d952014-07-11 17:53:27 -0700305static ssize_t read_all(int fd, char *buf, size_t max_len)
306{
307 ssize_t ret = 0;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700308 off_t offset = 0;
Colin Crossce85d952014-07-11 17:53:27 -0700309
310 while (max_len > 0) {
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700311 ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
Colin Crossce85d952014-07-11 17:53:27 -0700312 if (r == 0) {
313 break;
314 }
315 if (r == -1) {
316 return -1;
317 }
318 ret += r;
319 buf += r;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700320 offset += r;
Colin Crossce85d952014-07-11 17:53:27 -0700321 max_len -= r;
322 }
323
324 return ret;
325}
326
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700327/*
328 * Read a new or already opened file from the beginning.
329 * If the file has not been opened yet data->fd should be set to -1.
330 * To be used with files which are read often and possibly during high
331 * memory pressure to minimize file opening which by itself requires kernel
332 * memory allocation and might result in a stall on memory stressed system.
333 */
334static int reread_file(struct reread_data *data, char *buf, size_t buf_size) {
335 ssize_t size;
336
337 if (data->fd == -1) {
338 data->fd = open(data->filename, O_RDONLY | O_CLOEXEC);
339 if (data->fd == -1) {
340 ALOGE("%s open: %s", data->filename, strerror(errno));
341 return -1;
342 }
343 }
344
345 size = read_all(data->fd, buf, buf_size - 1);
346 if (size < 0) {
347 ALOGE("%s read: %s", data->filename, strerror(errno));
348 close(data->fd);
349 data->fd = -1;
350 return -1;
351 }
352 ALOG_ASSERT((size_t)size < buf_size - 1, data->filename " too large");
353 buf[size] = 0;
354
355 return 0;
356}
357
Todd Poynor3948f802013-07-09 19:35:14 -0700358static struct proc *pid_lookup(int pid) {
359 struct proc *procp;
360
361 for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
362 procp = procp->pidhash_next)
363 ;
364
365 return procp;
366}
367
368static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new)
369{
370 struct adjslot_list *next = head->next;
371 new->prev = head;
372 new->next = next;
373 next->prev = new;
374 head->next = new;
375}
376
377static void adjslot_remove(struct adjslot_list *old)
378{
379 struct adjslot_list *prev = old->prev;
380 struct adjslot_list *next = old->next;
381 next->prev = prev;
382 prev->next = next;
383}
384
385static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
386 struct adjslot_list *asl = head->prev;
387
388 return asl == head ? NULL : asl;
389}
390
391static void proc_slot(struct proc *procp) {
392 int adjslot = ADJTOSLOT(procp->oomadj);
393
394 adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
395}
396
397static void proc_unslot(struct proc *procp) {
398 adjslot_remove(&procp->asl);
399}
400
401static void proc_insert(struct proc *procp) {
402 int hval = pid_hashfn(procp->pid);
403
404 procp->pidhash_next = pidhash[hval];
405 pidhash[hval] = procp;
406 proc_slot(procp);
407}
408
409static int pid_remove(int pid) {
410 int hval = pid_hashfn(pid);
411 struct proc *procp;
412 struct proc *prevp;
413
414 for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
415 procp = procp->pidhash_next)
416 prevp = procp;
417
418 if (!procp)
419 return -1;
420
421 if (!prevp)
422 pidhash[hval] = procp->pidhash_next;
423 else
424 prevp->pidhash_next = procp->pidhash_next;
425
426 proc_unslot(procp);
427 free(procp);
428 return 0;
429}
430
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800431static void writefilestring(const char *path, char *s) {
Nick Kralevichc68c8862015-12-18 20:52:37 -0800432 int fd = open(path, O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -0700433 int len = strlen(s);
434 int ret;
435
436 if (fd < 0) {
437 ALOGE("Error opening %s; errno=%d", path, errno);
438 return;
439 }
440
441 ret = write(fd, s, len);
442 if (ret < 0) {
443 ALOGE("Error writing %s; errno=%d", path, errno);
444 } else if (ret < len) {
445 ALOGE("Short write on %s; length=%d", path, ret);
446 }
447
448 close(fd);
449}
450
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800451static void cmd_procprio(LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700452 struct proc *procp;
453 char path[80];
454 char val[20];
Robert Benea673e2762017-06-01 16:32:31 -0700455 int soft_limit_mult;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800456 struct lmk_procprio params;
Todd Poynor3948f802013-07-09 19:35:14 -0700457
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800458 lmkd_pack_get_procprio(packet, &params);
459
460 if (params.oomadj < OOM_SCORE_ADJ_MIN ||
461 params.oomadj > OOM_SCORE_ADJ_MAX) {
462 ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700463 return;
464 }
465
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800466 snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
467 snprintf(val, sizeof(val), "%d", params.oomadj);
Todd Poynor3948f802013-07-09 19:35:14 -0700468 writefilestring(path, val);
469
470 if (use_inkernel_interface)
471 return;
472
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800473 if (params.oomadj >= 900) {
Robert Benea673e2762017-06-01 16:32:31 -0700474 soft_limit_mult = 0;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800475 } else if (params.oomadj >= 800) {
Robert Benea673e2762017-06-01 16:32:31 -0700476 soft_limit_mult = 0;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800477 } else if (params.oomadj >= 700) {
Robert Benea673e2762017-06-01 16:32:31 -0700478 soft_limit_mult = 0;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800479 } else if (params.oomadj >= 600) {
Robert Beneacaeaa652017-08-11 16:03:20 -0700480 // Launcher should be perceptible, don't kill it.
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800481 params.oomadj = 200;
Robert Beneacaeaa652017-08-11 16:03:20 -0700482 soft_limit_mult = 1;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800483 } else if (params.oomadj >= 500) {
Robert Benea673e2762017-06-01 16:32:31 -0700484 soft_limit_mult = 0;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800485 } else if (params.oomadj >= 400) {
Robert Benea673e2762017-06-01 16:32:31 -0700486 soft_limit_mult = 0;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800487 } else if (params.oomadj >= 300) {
Robert Benea673e2762017-06-01 16:32:31 -0700488 soft_limit_mult = 1;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800489 } else if (params.oomadj >= 200) {
Robert Benea673e2762017-06-01 16:32:31 -0700490 soft_limit_mult = 2;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800491 } else if (params.oomadj >= 100) {
Robert Benea673e2762017-06-01 16:32:31 -0700492 soft_limit_mult = 10;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800493 } else if (params.oomadj >= 0) {
Robert Benea673e2762017-06-01 16:32:31 -0700494 soft_limit_mult = 20;
495 } else {
496 // Persistent processes will have a large
497 // soft limit 512MB.
498 soft_limit_mult = 64;
499 }
500
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800501 snprintf(path, sizeof(path),
502 "/dev/memcg/apps/uid_%d/pid_%d/memory.soft_limit_in_bytes",
503 params.uid, params.pid);
Robert Benea673e2762017-06-01 16:32:31 -0700504 snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
505 writefilestring(path, val);
506
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800507 procp = pid_lookup(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700508 if (!procp) {
509 procp = malloc(sizeof(struct proc));
510 if (!procp) {
511 // Oh, the irony. May need to rebuild our state.
512 return;
513 }
514
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800515 procp->pid = params.pid;
516 procp->uid = params.uid;
517 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700518 proc_insert(procp);
519 } else {
520 proc_unslot(procp);
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800521 procp->oomadj = params.oomadj;
Todd Poynor3948f802013-07-09 19:35:14 -0700522 proc_slot(procp);
523 }
524}
525
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800526static void cmd_procremove(LMKD_CTRL_PACKET packet) {
527 struct lmk_procremove params;
528
Todd Poynor3948f802013-07-09 19:35:14 -0700529 if (use_inkernel_interface)
530 return;
531
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800532 lmkd_pack_get_procremove(packet, &params);
533 pid_remove(params.pid);
Todd Poynor3948f802013-07-09 19:35:14 -0700534}
535
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800536static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
Todd Poynor3948f802013-07-09 19:35:14 -0700537 int i;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800538 struct lmk_target target;
Todd Poynor3948f802013-07-09 19:35:14 -0700539
540 if (ntargets > (int)ARRAY_SIZE(lowmem_adj))
541 return;
542
543 for (i = 0; i < ntargets; i++) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800544 lmkd_pack_get_target(packet, i, &target);
545 lowmem_minfree[i] = target.minfree;
546 lowmem_adj[i] = target.oom_adj_score;
Todd Poynor3948f802013-07-09 19:35:14 -0700547 }
548
549 lowmem_targets_size = ntargets;
550
Robert Benea164baeb2017-09-11 16:53:28 -0700551 if (has_inkernel_module) {
Todd Poynor3948f802013-07-09 19:35:14 -0700552 char minfreestr[128];
553 char killpriostr[128];
554
555 minfreestr[0] = '\0';
556 killpriostr[0] = '\0';
557
558 for (i = 0; i < lowmem_targets_size; i++) {
559 char val[40];
560
561 if (i) {
562 strlcat(minfreestr, ",", sizeof(minfreestr));
563 strlcat(killpriostr, ",", sizeof(killpriostr));
564 }
565
Robert Benea164baeb2017-09-11 16:53:28 -0700566 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700567 strlcat(minfreestr, val, sizeof(minfreestr));
Robert Benea164baeb2017-09-11 16:53:28 -0700568 snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
Todd Poynor3948f802013-07-09 19:35:14 -0700569 strlcat(killpriostr, val, sizeof(killpriostr));
570 }
571
572 writefilestring(INKERNEL_MINFREE_PATH, minfreestr);
573 writefilestring(INKERNEL_ADJ_PATH, killpriostr);
574 }
575}
576
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800577static void ctrl_data_close(int dsock_idx) {
578 struct epoll_event epev;
579
580 ALOGI("closing lmkd data connection");
581 if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
582 // Log a warning and keep going
583 ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
584 }
Todd Poynor3948f802013-07-09 19:35:14 -0700585 maxevents--;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800586
587 close(data_sock[dsock_idx].sock);
588 data_sock[dsock_idx].sock = -1;
Todd Poynor3948f802013-07-09 19:35:14 -0700589}
590
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800591static int ctrl_data_read(int dsock_idx, char *buf, size_t bufsz) {
Todd Poynor3948f802013-07-09 19:35:14 -0700592 int ret = 0;
593
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -0700594 ret = TEMP_FAILURE_RETRY(read(data_sock[dsock_idx].sock, buf, bufsz));
Todd Poynor3948f802013-07-09 19:35:14 -0700595
596 if (ret == -1) {
597 ALOGE("control data socket read failed; errno=%d", errno);
598 } else if (ret == 0) {
599 ALOGE("Got EOF on control data socket");
600 ret = -1;
601 }
602
603 return ret;
604}
605
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800606static void ctrl_command_handler(int dsock_idx) {
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800607 LMKD_CTRL_PACKET packet;
Todd Poynor3948f802013-07-09 19:35:14 -0700608 int len;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800609 enum lmk_cmd cmd;
Todd Poynor3948f802013-07-09 19:35:14 -0700610 int nargs;
611 int targets;
612
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800613 len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE);
Todd Poynor3948f802013-07-09 19:35:14 -0700614 if (len <= 0)
615 return;
616
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800617 if (len < (int)sizeof(int)) {
618 ALOGE("Wrong control socket read length len=%d", len);
619 return;
620 }
621
622 cmd = lmkd_pack_get_cmd(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700623 nargs = len / sizeof(int) - 1;
624 if (nargs < 0)
625 goto wronglen;
626
Todd Poynor3948f802013-07-09 19:35:14 -0700627 switch(cmd) {
628 case LMK_TARGET:
629 targets = nargs / 2;
630 if (nargs & 0x1 || targets > (int)ARRAY_SIZE(lowmem_adj))
631 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800632 cmd_target(targets, packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700633 break;
634 case LMK_PROCPRIO:
Colin Crossfbb78c62014-06-13 14:52:43 -0700635 if (nargs != 3)
Todd Poynor3948f802013-07-09 19:35:14 -0700636 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800637 cmd_procprio(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700638 break;
639 case LMK_PROCREMOVE:
640 if (nargs != 1)
641 goto wronglen;
Suren Baghdasaryan0f100512018-01-24 16:51:41 -0800642 cmd_procremove(packet);
Todd Poynor3948f802013-07-09 19:35:14 -0700643 break;
644 default:
645 ALOGE("Received unknown command code %d", cmd);
646 return;
647 }
648
649 return;
650
651wronglen:
652 ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
653}
654
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800655static void ctrl_data_handler(int data, uint32_t events) {
656 if (events & EPOLLIN) {
657 ctrl_command_handler(data);
Todd Poynor3948f802013-07-09 19:35:14 -0700658 }
659}
660
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800661static int get_free_dsock() {
662 for (int i = 0; i < MAX_DATA_CONN; i++) {
663 if (data_sock[i].sock < 0) {
664 return i;
665 }
666 }
667 return -1;
668}
Todd Poynor3948f802013-07-09 19:35:14 -0700669
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800670static void ctrl_connect_handler(int data __unused, uint32_t events __unused) {
671 struct epoll_event epev;
672 int free_dscock_idx = get_free_dsock();
673
674 if (free_dscock_idx < 0) {
675 /*
676 * Number of data connections exceeded max supported. This should not
677 * happen but if it does we drop all existing connections and accept
678 * the new one. This prevents inactive connections from monopolizing
679 * data socket and if we drop ActivityManager connection it will
680 * immediately reconnect.
681 */
682 for (int i = 0; i < MAX_DATA_CONN; i++) {
683 ctrl_data_close(i);
684 }
685 free_dscock_idx = 0;
Todd Poynor3948f802013-07-09 19:35:14 -0700686 }
687
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800688 data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
689 if (data_sock[free_dscock_idx].sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700690 ALOGE("lmkd control socket accept failed; errno=%d", errno);
691 return;
692 }
693
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800694 ALOGI("lmkd data connection established");
695 /* use data to store data connection idx */
696 data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
697 data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
Todd Poynor3948f802013-07-09 19:35:14 -0700698 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800699 epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
700 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -0700701 ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800702 ctrl_data_close(free_dscock_idx);
Todd Poynor3948f802013-07-09 19:35:14 -0700703 return;
704 }
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -0800705 maxevents++;
Todd Poynor3948f802013-07-09 19:35:14 -0700706}
707
Rajeev Kumar70450032018-01-31 17:54:56 -0800708#ifdef LMKD_LOG_STATS
709static void memory_stat_parse_line(char *line, struct memory_stat *mem_st) {
Greg Kaiserd6d84712018-03-23 14:16:12 -0700710 char key[LINE_MAX + 1];
Rajeev Kumar70450032018-01-31 17:54:56 -0800711 int64_t value;
712
Greg Kaiserd6d84712018-03-23 14:16:12 -0700713 sscanf(line, "%" STRINGIFY(LINE_MAX) "s %" SCNd64 "", key, &value);
Rajeev Kumar70450032018-01-31 17:54:56 -0800714
715 if (strcmp(key, "total_") < 0) {
716 return;
717 }
718
719 if (!strcmp(key, "total_pgfault"))
720 mem_st->pgfault = value;
721 else if (!strcmp(key, "total_pgmajfault"))
722 mem_st->pgmajfault = value;
723 else if (!strcmp(key, "total_rss"))
724 mem_st->rss_in_bytes = value;
725 else if (!strcmp(key, "total_cache"))
726 mem_st->cache_in_bytes = value;
727 else if (!strcmp(key, "total_swap"))
728 mem_st->swap_in_bytes = value;
729}
730
731static int memory_stat_parse(struct memory_stat *mem_st, int pid, uid_t uid) {
732 FILE *fp;
733 char buf[PATH_MAX];
734
735 snprintf(buf, sizeof(buf), MEMCG_PROCESS_MEMORY_STAT_PATH, uid, pid);
736
737 fp = fopen(buf, "r");
738
739 if (fp == NULL) {
Rajeev Kumar92b659b2018-02-21 19:08:15 -0800740 ALOGE("%s open failed: %s", buf, strerror(errno));
Rajeev Kumar70450032018-01-31 17:54:56 -0800741 return -1;
742 }
743
744 while (fgets(buf, PAGE_SIZE, fp) != NULL ) {
745 memory_stat_parse_line(buf, mem_st);
746 }
747 fclose(fp);
748
749 return 0;
750}
751#endif
752
Suren Baghdasaryan8b9deaf2018-04-13 13:11:51 -0700753/* /prop/zoneinfo parsing routines */
754static int64_t zoneinfo_parse_protection(char *cp) {
755 int64_t max = 0;
756 long long zoneval;
757 char *save_ptr;
758
759 for (cp = strtok_r(cp, "(), ", &save_ptr); cp;
760 cp = strtok_r(NULL, "), ", &save_ptr)) {
761 zoneval = strtoll(cp, &cp, 0);
762 if (zoneval > max) {
763 max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
764 }
765 }
766
767 return max;
768}
769
770static bool zoneinfo_parse_line(char *line, union zoneinfo *zi) {
771 char *cp = line;
772 char *ap;
773 char *save_ptr;
774 int64_t val;
775 int field_idx;
776
777 cp = strtok_r(line, " ", &save_ptr);
778 if (!cp) {
779 return true;
780 }
781
782 if (!strcmp(cp, "protection:")) {
783 ap = strtok_r(NULL, ")", &save_ptr);
784 } else {
785 ap = strtok_r(NULL, " ", &save_ptr);
786 }
787
788 if (!ap) {
789 return true;
790 }
791
792 switch (match_field(cp, ap, zoneinfo_field_names,
793 ZI_FIELD_COUNT, &val, &field_idx)) {
794 case (PARSE_SUCCESS):
795 zi->arr[field_idx] += val;
796 break;
797 case (NO_MATCH):
798 if (!strcmp(cp, "protection:")) {
799 zi->field.totalreserve_pages +=
800 zoneinfo_parse_protection(ap);
801 }
802 break;
803 case (PARSE_FAIL):
804 default:
805 return false;
806 }
807 return true;
808}
809
810static int zoneinfo_parse(union zoneinfo *zi) {
811 static struct reread_data file_data = {
812 .filename = ZONEINFO_PATH,
813 .fd = -1,
814 };
815 char buf[PAGE_SIZE];
816 char *save_ptr;
817 char *line;
818
819 memset(zi, 0, sizeof(union zoneinfo));
820
821 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
822 return -1;
823 }
824
825 for (line = strtok_r(buf, "\n", &save_ptr); line;
826 line = strtok_r(NULL, "\n", &save_ptr)) {
827 if (!zoneinfo_parse_line(line, zi)) {
828 ALOGE("%s parse error", file_data.filename);
829 return -1;
830 }
831 }
832 zi->field.totalreserve_pages += zi->field.high;
833
834 return 0;
835}
836
837/* /prop/meminfo parsing routines */
838static bool meminfo_parse_line(char *line, union meminfo *mi) {
839 char *cp = line;
840 char *ap;
841 char *save_ptr;
842 int64_t val;
843 int field_idx;
844 enum field_match_result match_res;
845
846 cp = strtok_r(line, " ", &save_ptr);
847 if (!cp) {
848 return false;
849 }
850
851 ap = strtok_r(NULL, " ", &save_ptr);
852 if (!ap) {
853 return false;
854 }
855
856 match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
857 &val, &field_idx);
858 if (match_res == PARSE_SUCCESS) {
859 mi->arr[field_idx] = val / page_k;
860 }
861 return (match_res != PARSE_FAIL);
862}
863
864static int meminfo_parse(union meminfo *mi) {
865 static struct reread_data file_data = {
866 .filename = MEMINFO_PATH,
867 .fd = -1,
868 };
869 char buf[PAGE_SIZE];
870 char *save_ptr;
871 char *line;
872
873 memset(mi, 0, sizeof(union meminfo));
874
875 if (reread_file(&file_data, buf, sizeof(buf)) < 0) {
876 return -1;
877 }
878
879 for (line = strtok_r(buf, "\n", &save_ptr); line;
880 line = strtok_r(NULL, "\n", &save_ptr)) {
881 if (!meminfo_parse_line(line, mi)) {
882 ALOGE("%s parse error", file_data.filename);
883 return -1;
884 }
885 }
886 mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
887 mi->field.buffers;
888
889 return 0;
890}
891
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -0800892static int get_free_memory(struct mem_size *ms) {
893 struct sysinfo si;
894
895 if (sysinfo(&si) < 0)
896 return -1;
897
898 ms->free_mem = (int)(si.freeram * si.mem_unit / PAGE_SIZE);
899 ms->free_swap = (int)(si.freeswap * si.mem_unit / PAGE_SIZE);
900
901 return 0;
902}
903
Todd Poynor3948f802013-07-09 19:35:14 -0700904static int proc_get_size(int pid) {
905 char path[PATH_MAX];
906 char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700907 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700908 int rss = 0;
909 int total;
Colin Crossce85d952014-07-11 17:53:27 -0700910 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700911
912 snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800913 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700914 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700915 return -1;
Colin Crossce85d952014-07-11 17:53:27 -0700916
917 ret = read_all(fd, line, sizeof(line) - 1);
918 if (ret < 0) {
919 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700920 return -1;
921 }
922
923 sscanf(line, "%d %d ", &total, &rss);
Colin Crossce85d952014-07-11 17:53:27 -0700924 close(fd);
Todd Poynor3948f802013-07-09 19:35:14 -0700925 return rss;
926}
927
928static char *proc_get_name(int pid) {
929 char path[PATH_MAX];
930 static char line[LINE_MAX];
Colin Crossce85d952014-07-11 17:53:27 -0700931 int fd;
Todd Poynor3948f802013-07-09 19:35:14 -0700932 char *cp;
Colin Crossce85d952014-07-11 17:53:27 -0700933 ssize_t ret;
Todd Poynor3948f802013-07-09 19:35:14 -0700934
935 snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
Nick Kralevichc68c8862015-12-18 20:52:37 -0800936 fd = open(path, O_RDONLY | O_CLOEXEC);
Colin Crossce85d952014-07-11 17:53:27 -0700937 if (fd == -1)
Todd Poynor3948f802013-07-09 19:35:14 -0700938 return NULL;
Colin Crossce85d952014-07-11 17:53:27 -0700939 ret = read_all(fd, line, sizeof(line) - 1);
940 close(fd);
941 if (ret < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -0700942 return NULL;
943 }
944
945 cp = strchr(line, ' ');
946 if (cp)
947 *cp = '\0';
948
949 return line;
950}
951
952static struct proc *proc_adj_lru(int oomadj) {
953 return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
954}
955
Suren Baghdasaryan662492a2017-12-08 13:17:06 -0800956static struct proc *proc_get_heaviest(int oomadj) {
957 struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
958 struct adjslot_list *curr = head->next;
959 struct proc *maxprocp = NULL;
960 int maxsize = 0;
961 while (curr != head) {
962 int pid = ((struct proc *)curr)->pid;
963 int tasksize = proc_get_size(pid);
964 if (tasksize <= 0) {
965 struct adjslot_list *next = curr->next;
966 pid_remove(pid);
967 curr = next;
968 } else {
969 if (tasksize > maxsize) {
970 maxsize = tasksize;
971 maxprocp = (struct proc *)curr;
972 }
973 curr = curr->next;
974 }
975 }
976 return maxprocp;
977}
978
Colin Cross16b09462014-07-14 12:39:56 -0700979/* Kill one process specified by procp. Returns the size of the process killed */
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -0800980static int kill_one_process(struct proc* procp, int min_score_adj,
981 enum vmpressure_level level) {
Colin Cross16b09462014-07-14 12:39:56 -0700982 int pid = procp->pid;
983 uid_t uid = procp->uid;
984 char *taskname;
985 int tasksize;
986 int r;
987
Rajeev Kumar70450032018-01-31 17:54:56 -0800988#ifdef LMKD_LOG_STATS
Rajeev Kumar92b659b2018-02-21 19:08:15 -0800989 struct memory_stat mem_st = {};
Rajeev Kumar70450032018-01-31 17:54:56 -0800990 int memory_stat_parse_result = -1;
991#endif
992
Colin Cross16b09462014-07-14 12:39:56 -0700993 taskname = proc_get_name(pid);
994 if (!taskname) {
995 pid_remove(pid);
996 return -1;
997 }
998
999 tasksize = proc_get_size(pid);
1000 if (tasksize <= 0) {
1001 pid_remove(pid);
1002 return -1;
1003 }
1004
Rajeev Kumar70450032018-01-31 17:54:56 -08001005#ifdef LMKD_LOG_STATS
1006 if (enable_stats_log) {
1007 memory_stat_parse_result = memory_stat_parse(&mem_st, pid, uid);
1008 }
1009#endif
1010
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001011 TRACE_KILL_START(pid);
1012
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001013 r = kill(pid, SIGKILL);
Robert Beneacaeaa652017-08-11 16:03:20 -07001014 ALOGI(
1015 "Killing '%s' (%d), uid %d, adj %d\n"
1016 " to free %ldkB because system is under %s memory pressure oom_adj %d\n",
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001017 taskname, pid, uid, procp->oomadj, tasksize * page_k,
1018 level_name[level], min_score_adj);
Colin Cross16b09462014-07-14 12:39:56 -07001019 pid_remove(pid);
1020
Suren Baghdasaryanc7135592018-01-04 10:43:58 -08001021 TRACE_KILL_END();
1022
Colin Cross16b09462014-07-14 12:39:56 -07001023 if (r) {
Mark Salyzyn919f5382018-02-04 15:27:23 -08001024 ALOGE("kill(%d): errno=%d", pid, errno);
Colin Cross16b09462014-07-14 12:39:56 -07001025 return -1;
Rajeev Kumar70450032018-01-31 17:54:56 -08001026 } else {
1027#ifdef LMKD_LOG_STATS
1028 if (memory_stat_parse_result == 0) {
1029 stats_write_lmk_kill_occurred(log_ctx, LMK_KILL_OCCURRED, uid, taskname,
1030 procp->oomadj, mem_st.pgfault, mem_st.pgmajfault, mem_st.rss_in_bytes,
1031 mem_st.cache_in_bytes, mem_st.swap_in_bytes);
1032 }
1033#endif
1034 return tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001035 }
Mark Salyzyn919f5382018-02-04 15:27:23 -08001036
1037 return tasksize;
Colin Cross16b09462014-07-14 12:39:56 -07001038}
1039
1040/*
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001041 * Find processes to kill to free required number of pages.
1042 * If pages_to_free is set to 0 only one process will be killed.
1043 * Returns the size of the killed processes.
Colin Cross16b09462014-07-14 12:39:56 -07001044 */
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001045static int find_and_kill_processes(enum vmpressure_level level,
1046 int pages_to_free) {
Colin Cross16b09462014-07-14 12:39:56 -07001047 int i;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001048 int killed_size;
1049 int pages_freed = 0;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001050 int min_score_adj = level_oomadj[level];
Colin Cross16b09462014-07-14 12:39:56 -07001051
Rajeev Kumar70450032018-01-31 17:54:56 -08001052#ifdef LMKD_LOG_STATS
1053 if (enable_stats_log) {
1054 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_START);
1055 }
1056#endif
1057
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001058 for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
Colin Cross16b09462014-07-14 12:39:56 -07001059 struct proc *procp;
1060
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001061 while (true) {
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001062 procp = kill_heaviest_task ?
1063 proc_get_heaviest(i) : proc_adj_lru(i);
Colin Cross16b09462014-07-14 12:39:56 -07001064
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001065 if (!procp)
1066 break;
1067
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001068 killed_size = kill_one_process(procp, min_score_adj, level);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001069 if (killed_size >= 0) {
1070 pages_freed += killed_size;
1071 if (pages_freed >= pages_to_free) {
Rajeev Kumar70450032018-01-31 17:54:56 -08001072
1073#ifdef LMKD_LOG_STATS
1074 if (enable_stats_log) {
1075 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED,
1076 LMK_STATE_CHANGE_STOP);
1077 }
1078#endif
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001079 return pages_freed;
1080 }
Colin Cross16b09462014-07-14 12:39:56 -07001081 }
1082 }
1083 }
1084
Rajeev Kumar70450032018-01-31 17:54:56 -08001085#ifdef LMKD_LOG_STATS
1086 if (enable_stats_log) {
1087 stats_write_lmk_state_changed(log_ctx, LMK_STATE_CHANGED, LMK_STATE_CHANGE_STOP);
1088 }
1089#endif
1090
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001091 return pages_freed;
Colin Cross16b09462014-07-14 12:39:56 -07001092}
1093
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001094static int64_t get_memory_usage(struct reread_data *file_data) {
Robert Beneac47f2992017-08-21 15:18:31 -07001095 int ret;
1096 int64_t mem_usage;
1097 char buf[32];
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001098
1099 if (reread_file(file_data, buf, sizeof(buf)) < 0) {
Robert Beneac47f2992017-08-21 15:18:31 -07001100 return -1;
1101 }
1102
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001103 if (!parse_int64(buf, &mem_usage)) {
1104 ALOGE("%s parse error", file_data->filename);
Robert Beneac47f2992017-08-21 15:18:31 -07001105 return -1;
1106 }
Robert Beneac47f2992017-08-21 15:18:31 -07001107 if (mem_usage == 0) {
1108 ALOGE("No memory!");
1109 return -1;
1110 }
1111 return mem_usage;
1112}
1113
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001114void record_low_pressure_levels(struct mem_size *free_mem) {
1115 if (low_pressure_mem.min_free == -1 ||
1116 low_pressure_mem.min_free > free_mem->free_mem) {
1117 if (debug_process_killing) {
1118 ALOGI("Low pressure min memory update from %d to %d",
1119 low_pressure_mem.min_free, free_mem->free_mem);
1120 }
1121 low_pressure_mem.min_free = free_mem->free_mem;
1122 }
1123 /*
1124 * Free memory at low vmpressure events occasionally gets spikes,
1125 * possibly a stale low vmpressure event with memory already
1126 * freed up (no memory pressure should have been reported).
1127 * Ignore large jumps in max_free that would mess up our stats.
1128 */
1129 if (low_pressure_mem.max_free == -1 ||
1130 (low_pressure_mem.max_free < free_mem->free_mem &&
1131 free_mem->free_mem - low_pressure_mem.max_free < low_pressure_mem.max_free * 0.1)) {
1132 if (debug_process_killing) {
1133 ALOGI("Low pressure max memory update from %d to %d",
1134 low_pressure_mem.max_free, free_mem->free_mem);
1135 }
1136 low_pressure_mem.max_free = free_mem->free_mem;
1137 }
1138}
1139
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001140enum vmpressure_level upgrade_level(enum vmpressure_level level) {
1141 return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
1142 level + 1 : level);
1143}
1144
1145enum vmpressure_level downgrade_level(enum vmpressure_level level) {
1146 return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
1147 level - 1 : level);
1148}
1149
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001150static inline unsigned long get_time_diff_ms(struct timeval *from,
1151 struct timeval *to) {
1152 return (to->tv_sec - from->tv_sec) * 1000 +
1153 (to->tv_usec - from->tv_usec) / 1000;
1154}
1155
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001156static void mp_event_common(int data, uint32_t events __unused) {
Todd Poynor3948f802013-07-09 19:35:14 -07001157 int ret;
1158 unsigned long long evcount;
Robert Beneac47f2992017-08-21 15:18:31 -07001159 int64_t mem_usage, memsw_usage;
Robert Benea6e8e7102017-09-13 15:20:30 -07001160 int64_t mem_pressure;
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001161 enum vmpressure_level lvl;
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001162 struct mem_size free_mem;
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001163 static struct timeval last_report_tm;
1164 static unsigned long skip_count = 0;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001165 enum vmpressure_level level = (enum vmpressure_level)data;
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001166 static struct reread_data mem_usage_file_data = {
1167 .filename = MEMCG_MEMORY_USAGE,
1168 .fd = -1,
1169 };
1170 static struct reread_data memsw_usage_file_data = {
1171 .filename = MEMCG_MEMORYSW_USAGE,
1172 .fd = -1,
1173 };
Todd Poynor3948f802013-07-09 19:35:14 -07001174
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001175 /*
1176 * Check all event counters from low to critical
1177 * and upgrade to the highest priority one. By reading
1178 * eventfd we also reset the event counters.
1179 */
1180 for (lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
1181 if (mpevfd[lvl] != -1 &&
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001182 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
1183 &evcount, sizeof(evcount))) > 0 &&
Suren Baghdasaryane82e15c2018-01-04 09:16:21 -08001184 evcount > 0 && lvl > level) {
1185 level = lvl;
1186 }
1187 }
Todd Poynor3948f802013-07-09 19:35:14 -07001188
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001189 if (kill_timeout_ms) {
1190 struct timeval curr_tm;
1191 gettimeofday(&curr_tm, NULL);
1192 if (get_time_diff_ms(&last_report_tm, &curr_tm) < kill_timeout_ms) {
1193 skip_count++;
1194 return;
1195 }
1196 }
1197
1198 if (skip_count > 0) {
1199 if (debug_process_killing) {
1200 ALOGI("%lu memory pressure events were skipped after a kill!",
1201 skip_count);
1202 }
1203 skip_count = 0;
1204 }
1205
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001206 if (get_free_memory(&free_mem) == 0) {
1207 if (level == VMPRESS_LEVEL_LOW) {
1208 record_low_pressure_levels(&free_mem);
1209 }
1210 } else {
1211 ALOGE("Failed to get free memory!");
1212 return;
1213 }
1214
1215 if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
1216 /* Do not monitor this pressure level */
1217 return;
1218 }
1219
Suren Baghdasaryanb2d59ee2018-04-13 12:43:41 -07001220 if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
1221 goto do_kill;
1222 }
1223 if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001224 goto do_kill;
Robert Benea6e8e7102017-09-13 15:20:30 -07001225 }
Robert Beneac47f2992017-08-21 15:18:31 -07001226
Robert Benea6e8e7102017-09-13 15:20:30 -07001227 // Calculate percent for swappinness.
1228 mem_pressure = (mem_usage * 100) / memsw_usage;
1229
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001230 if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001231 // We are swapping too much.
1232 if (mem_pressure < upgrade_pressure) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001233 level = upgrade_level(level);
1234 if (debug_process_killing) {
1235 ALOGI("Event upgraded to %s", level_name[level]);
1236 }
Robert Beneac47f2992017-08-21 15:18:31 -07001237 }
1238 }
1239
Robert Benea6e8e7102017-09-13 15:20:30 -07001240 // If the pressure is larger than downgrade_pressure lmk will not
1241 // kill any process, since enough memory is available.
1242 if (mem_pressure > downgrade_pressure) {
1243 if (debug_process_killing) {
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001244 ALOGI("Ignore %s memory pressure", level_name[level]);
Robert Benea6e8e7102017-09-13 15:20:30 -07001245 }
1246 return;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001247 } else if (level == VMPRESS_LEVEL_CRITICAL &&
1248 mem_pressure > upgrade_pressure) {
Robert Benea6e8e7102017-09-13 15:20:30 -07001249 if (debug_process_killing) {
1250 ALOGI("Downgrade critical memory pressure");
1251 }
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001252 // Downgrade event, since enough memory available.
1253 level = downgrade_level(level);
Robert Benea6e8e7102017-09-13 15:20:30 -07001254 }
1255
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001256do_kill:
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001257 if (low_ram_device) {
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001258 /* For Go devices kill only one task */
1259 if (find_and_kill_processes(level, 0) == 0) {
1260 if (debug_process_killing) {
1261 ALOGI("Nothing to kill");
1262 }
1263 }
1264 } else {
1265 /* If pressure level is less than critical and enough free swap then ignore */
1266 if (level < VMPRESS_LEVEL_CRITICAL && free_mem.free_swap > low_pressure_mem.max_free) {
1267 if (debug_process_killing) {
1268 ALOGI("Ignoring pressure since %d swap pages are available ", free_mem.free_swap);
1269 }
1270 return;
1271 }
1272
1273 /* Free up enough memory to downgrate the memory pressure to low level */
1274 if (free_mem.free_mem < low_pressure_mem.max_free) {
1275 int pages_to_free = low_pressure_mem.max_free - free_mem.free_mem;
1276 if (debug_process_killing) {
1277 ALOGI("Trying to free %d pages", pages_to_free);
1278 }
1279 int pages_freed = find_and_kill_processes(level, pages_to_free);
1280 if (pages_freed < pages_to_free) {
1281 if (debug_process_killing) {
1282 ALOGI("Unable to free enough memory (pages freed=%d)",
1283 pages_freed);
1284 }
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001285 } else {
1286 gettimeofday(&last_report_tm, NULL);
Suren Baghdasaryan65f54a22018-01-17 17:17:44 -08001287 }
Robert Beneacaeaa652017-08-11 16:03:20 -07001288 }
Colin Crossf8857cc2014-07-11 17:16:56 -07001289 }
Todd Poynor3948f802013-07-09 19:35:14 -07001290}
1291
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001292static bool init_mp_common(enum vmpressure_level level) {
Todd Poynor3948f802013-07-09 19:35:14 -07001293 int mpfd;
1294 int evfd;
1295 int evctlfd;
1296 char buf[256];
1297 struct epoll_event epev;
1298 int ret;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001299 int level_idx = (int)level;
1300 const char *levelstr = level_name[level_idx];
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001301
Nick Kralevichc68c8862015-12-18 20:52:37 -08001302 mpfd = open(MEMCG_SYSFS_PATH "memory.pressure_level", O_RDONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001303 if (mpfd < 0) {
1304 ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
1305 goto err_open_mpfd;
1306 }
1307
Nick Kralevichc68c8862015-12-18 20:52:37 -08001308 evctlfd = open(MEMCG_SYSFS_PATH "cgroup.event_control", O_WRONLY | O_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001309 if (evctlfd < 0) {
1310 ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
1311 goto err_open_evctlfd;
1312 }
1313
Nick Kralevichc68c8862015-12-18 20:52:37 -08001314 evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
Todd Poynor3948f802013-07-09 19:35:14 -07001315 if (evfd < 0) {
1316 ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
1317 goto err_eventfd;
1318 }
1319
1320 ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
1321 if (ret >= (ssize_t)sizeof(buf)) {
1322 ALOGE("cgroup.event_control line overflow for level %s", levelstr);
1323 goto err;
1324 }
1325
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001326 ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
Todd Poynor3948f802013-07-09 19:35:14 -07001327 if (ret == -1) {
1328 ALOGE("cgroup.event_control write failed for level %s; errno=%d",
1329 levelstr, errno);
1330 goto err;
1331 }
1332
1333 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001334 /* use data to store event level */
1335 vmpressure_hinfo[level_idx].data = level_idx;
1336 vmpressure_hinfo[level_idx].handler = mp_event_common;
1337 epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
Todd Poynor3948f802013-07-09 19:35:14 -07001338 ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
1339 if (ret == -1) {
1340 ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
1341 goto err;
1342 }
1343 maxevents++;
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001344 mpevfd[level] = evfd;
Suren Baghdasaryan1bd2fc42018-01-04 08:54:53 -08001345 close(evctlfd);
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001346 return true;
Todd Poynor3948f802013-07-09 19:35:14 -07001347
1348err:
1349 close(evfd);
1350err_eventfd:
1351 close(evctlfd);
1352err_open_evctlfd:
1353 close(mpfd);
1354err_open_mpfd:
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001355 return false;
Robert Benea673e2762017-06-01 16:32:31 -07001356}
1357
Todd Poynor3948f802013-07-09 19:35:14 -07001358static int init(void) {
1359 struct epoll_event epev;
1360 int i;
1361 int ret;
1362
1363 page_k = sysconf(_SC_PAGESIZE);
1364 if (page_k == -1)
1365 page_k = PAGE_SIZE;
1366 page_k /= 1024;
1367
1368 epollfd = epoll_create(MAX_EPOLL_EVENTS);
1369 if (epollfd == -1) {
1370 ALOGE("epoll_create failed (errno=%d)", errno);
1371 return -1;
1372 }
1373
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001374 // mark data connections as not connected
1375 for (int i = 0; i < MAX_DATA_CONN; i++) {
1376 data_sock[i].sock = -1;
1377 }
1378
1379 ctrl_sock.sock = android_get_control_socket("lmkd");
1380 if (ctrl_sock.sock < 0) {
Todd Poynor3948f802013-07-09 19:35:14 -07001381 ALOGE("get lmkd control socket failed");
1382 return -1;
1383 }
1384
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001385 ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
Todd Poynor3948f802013-07-09 19:35:14 -07001386 if (ret < 0) {
1387 ALOGE("lmkd control socket listen failed (errno=%d)", errno);
1388 return -1;
1389 }
1390
1391 epev.events = EPOLLIN;
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001392 ctrl_sock.handler_info.handler = ctrl_connect_handler;
1393 epev.data.ptr = (void *)&(ctrl_sock.handler_info);
1394 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
Todd Poynor3948f802013-07-09 19:35:14 -07001395 ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
1396 return -1;
1397 }
1398 maxevents++;
1399
Robert Benea164baeb2017-09-11 16:53:28 -07001400 has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
Suren Baghdasaryan979591b2018-01-18 17:27:30 -08001401 use_inkernel_interface = has_inkernel_module;
Todd Poynor3948f802013-07-09 19:35:14 -07001402
1403 if (use_inkernel_interface) {
1404 ALOGI("Using in-kernel low memory killer interface");
1405 } else {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001406 if (!init_mp_common(VMPRESS_LEVEL_LOW) ||
1407 !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
1408 !init_mp_common(VMPRESS_LEVEL_CRITICAL)) {
Todd Poynor3948f802013-07-09 19:35:14 -07001409 ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001410 return -1;
1411 }
Todd Poynor3948f802013-07-09 19:35:14 -07001412 }
1413
Chong Zhang0a4acdf2015-10-14 16:19:53 -07001414 for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
Todd Poynor3948f802013-07-09 19:35:14 -07001415 procadjslot_list[i].next = &procadjslot_list[i];
1416 procadjslot_list[i].prev = &procadjslot_list[i];
1417 }
1418
1419 return 0;
1420}
1421
1422static void mainloop(void) {
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001423 struct event_handler_info* handler_info;
1424 struct epoll_event *evt;
1425
Todd Poynor3948f802013-07-09 19:35:14 -07001426 while (1) {
1427 struct epoll_event events[maxevents];
1428 int nevents;
1429 int i;
1430
Todd Poynor3948f802013-07-09 19:35:14 -07001431 nevents = epoll_wait(epollfd, events, maxevents, -1);
1432
1433 if (nevents == -1) {
1434 if (errno == EINTR)
1435 continue;
1436 ALOGE("epoll_wait failed (errno=%d)", errno);
1437 continue;
1438 }
1439
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001440 /*
1441 * First pass to see if any data socket connections were dropped.
1442 * Dropped connection should be handled before any other events
1443 * to deallocate data connection and correctly handle cases when
1444 * connection gets dropped and reestablished in the same epoll cycle.
1445 * In such cases it's essential to handle connection closures first.
1446 */
1447 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1448 if ((evt->events & EPOLLHUP) && evt->data.ptr) {
1449 ALOGI("lmkd data connection dropped");
1450 handler_info = (struct event_handler_info*)evt->data.ptr;
1451 ctrl_data_close(handler_info->data);
1452 }
1453 }
1454
1455 /* Second pass to handle all other events */
1456 for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
1457 if (evt->events & EPOLLERR)
Todd Poynor3948f802013-07-09 19:35:14 -07001458 ALOGD("EPOLLERR on event #%d", i);
Suren Baghdasaryan3cfb2c82018-01-26 12:51:19 -08001459 if (evt->events & EPOLLHUP) {
1460 /* This case was handled in the first pass */
1461 continue;
1462 }
1463 if (evt->data.ptr) {
1464 handler_info = (struct event_handler_info*)evt->data.ptr;
1465 handler_info->handler(handler_info->data, evt->events);
1466 }
Todd Poynor3948f802013-07-09 19:35:14 -07001467 }
1468 }
1469}
1470
Mark Salyzyne6ed68b2014-04-30 13:36:35 -07001471int main(int argc __unused, char **argv __unused) {
Colin Cross1a0d9be2014-07-14 14:31:15 -07001472 struct sched_param param = {
1473 .sched_priority = 1,
1474 };
1475
Suren Baghdasaryan96bf3a62017-12-08 12:58:52 -08001476 /* By default disable low level vmpressure events */
1477 level_oomadj[VMPRESS_LEVEL_LOW] =
1478 property_get_int32("ro.lmk.low", OOM_SCORE_ADJ_MAX + 1);
1479 level_oomadj[VMPRESS_LEVEL_MEDIUM] =
1480 property_get_int32("ro.lmk.medium", 800);
1481 level_oomadj[VMPRESS_LEVEL_CRITICAL] =
1482 property_get_int32("ro.lmk.critical", 0);
Robert Beneacaeaa652017-08-11 16:03:20 -07001483 debug_process_killing = property_get_bool("ro.lmk.debug", false);
Suren Baghdasaryanad2fd912017-12-08 13:08:41 -08001484
1485 /* By default disable upgrade/downgrade logic */
1486 enable_pressure_upgrade =
1487 property_get_bool("ro.lmk.critical_upgrade", false);
1488 upgrade_pressure =
1489 (int64_t)property_get_int32("ro.lmk.upgrade_pressure", 100);
1490 downgrade_pressure =
1491 (int64_t)property_get_int32("ro.lmk.downgrade_pressure", 100);
Suren Baghdasaryan662492a2017-12-08 13:17:06 -08001492 kill_heaviest_task =
Suren Baghdasaryan9ff66ff2018-04-13 11:49:54 -07001493 property_get_bool("ro.lmk.kill_heaviest_task", false);
Suren Baghdasaryan39a22e72018-04-13 11:45:38 -07001494 low_ram_device = property_get_bool("ro.config.low_ram", false);
Suren Baghdasaryancaa2dc52018-01-17 17:28:01 -08001495 kill_timeout_ms =
1496 (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);
Robert Benea58891d52017-07-31 17:15:20 -07001497
Rajeev Kumar70450032018-01-31 17:54:56 -08001498#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001499 statslog_init(&log_ctx, &enable_stats_log);
Rajeev Kumar70450032018-01-31 17:54:56 -08001500#endif
1501
Daniel Colascioned39adf22018-01-05 14:59:55 -08001502 // MCL_ONFAULT pins pages as they fault instead of loading
1503 // everything immediately all at once. (Which would be bad,
1504 // because as of this writing, we have a lot of mapped pages we
1505 // never use.) Old kernels will see MCL_ONFAULT and fail with
1506 // EINVAL; we ignore this failure.
1507 //
1508 // N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
1509 // pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
1510 // in pages.
1511 if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && errno != EINVAL)
Daniel Colascione4dd5d002018-01-03 12:01:02 -08001512 ALOGW("mlockall failed: errno=%d", errno);
1513
Colin Cross1a0d9be2014-07-14 14:31:15 -07001514 sched_setscheduler(0, SCHED_FIFO, &param);
Todd Poynor3948f802013-07-09 19:35:14 -07001515 if (!init())
1516 mainloop();
1517
Rajeev Kumar70450032018-01-31 17:54:56 -08001518#ifdef LMKD_LOG_STATS
Rajeev Kumar1c669f72018-03-09 15:20:56 -08001519 statslog_destroy(&log_ctx);
Rajeev Kumar70450032018-01-31 17:54:56 -08001520#endif
1521
Todd Poynor3948f802013-07-09 19:35:14 -07001522 ALOGI("exiting");
1523 return 0;
1524}