diff options
author | Suren Baghdasaryan <surenb@google.com> | 2018-01-17 17:28:01 -0800 |
---|---|---|
committer | Suren Baghdasaryan <surenb@google.com> | 2018-03-02 01:15:01 +0000 |
commit | 63dadcf79e788a8850c4113eaa2bc58b7efe545a (patch) | |
tree | cfbe56c854c56e14091eecaa2fdf529acfabb7ff | |
parent | aa73bafea7515794eca4fb27ffe634e65c450915 (diff) |
lmkd: Implement kill timeout
(cherry pick from commit caa2dc56fd52d8d773aa8b902fc605b453111976)
New ro.lmk.kill_timeout_ms property defines timeout in ms after a
successful kill cycle for more kills to be considered. This is
necessary because memory pressure after a kill does not go down
instantly and system needs time to reflect new memory state. This
timeout prevents extra kills in the period immediately after a
kill cycle. By default it is set to 0 which disables this feature.
Bug: 63631020
Test: alloc-stress
Change-Id: Ia847118c8c4a659a7fc38cd5cd0042acb514ae28
Merged-In: Ia847118c8c4a659a7fc38cd5cd0042acb514ae28
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
-rw-r--r-- | lmkd/lmkd.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/lmkd/lmkd.c b/lmkd/lmkd.c index cedd44431..7e940ec11 100644 --- a/lmkd/lmkd.c +++ b/lmkd/lmkd.c @@ -120,6 +120,7 @@ static int64_t upgrade_pressure; static int64_t downgrade_pressure; static bool is_go_device; static bool kill_heaviest_task; +static unsigned long kill_timeout_ms; /* control socket listen and data */ static int ctrl_lfd; @@ -795,6 +796,12 @@ enum vmpressure_level downgrade_level(enum vmpressure_level level) { level - 1 : level); } +static inline unsigned long get_time_diff_ms(struct timeval *from, + struct timeval *to) { + return (to->tv_sec - from->tv_sec) * 1000 + + (to->tv_usec - from->tv_usec) / 1000; +} + static void mp_event_common(enum vmpressure_level level) { int ret; unsigned long long evcount; @@ -802,6 +809,8 @@ static void mp_event_common(enum vmpressure_level level) { int64_t mem_pressure; enum vmpressure_level lvl; struct mem_size free_mem; + static struct timeval last_report_tm; + static unsigned long skip_count = 0; /* * Check all event counters from low to critical @@ -816,6 +825,23 @@ static void mp_event_common(enum vmpressure_level level) { } } + if (kill_timeout_ms) { + struct timeval curr_tm; + gettimeofday(&curr_tm, NULL); + if (get_time_diff_ms(&last_report_tm, &curr_tm) < kill_timeout_ms) { + skip_count++; + return; + } + } + + if (skip_count > 0) { + if (debug_process_killing) { + ALOGI("%lu memory pressure events were skipped after a kill!", + skip_count); + } + skip_count = 0; + } + if (get_free_memory(&free_mem) == 0) { if (level == VMPRESS_LEVEL_LOW) { record_low_pressure_levels(&free_mem); @@ -894,6 +920,8 @@ do_kill: ALOGI("Unable to free enough memory (pages freed=%d)", pages_freed); } + } else { + gettimeofday(&last_report_tm, NULL); } } } @@ -1081,6 +1109,8 @@ int main(int argc __unused, char **argv __unused) { kill_heaviest_task = property_get_bool("ro.lmk.kill_heaviest_task", true); is_go_device = property_get_bool("ro.config.low_ram", false); + kill_timeout_ms = + (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0); // MCL_ONFAULT pins pages as they fault instead of loading // everything immediately all at once. (Which would be bad, |