From 047e6647f580a7c9bed2ac547bc9b15154d5da4c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 28 Oct 2009 02:25:01 +0900 Subject: [PATCH] oom: oom-score bonus by run_time use proportional value Currently, oom-score bonus by run_time use the fomula of "sqrt(sqrt(runtime / 1024)))". It mean process got 1/3 times oom-score per day. This feature exist for protect sevaral important system daemon. However, typical desktop user reboot the system everyday. then its bonus is too small. This bonus only works well on server systems. IOW typical uptime strongly depend on use-case. it shouldn't use for oom modifier. Instead, This patch use proportional run_time value against uptime. Signed-off-by: KOSAKI Motohiro --- fs/proc/base.c | 1 + mm/oom_kill.c | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/fs/proc/base.c b/fs/proc/base.c index 837469a..17d6fd4 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -446,6 +446,7 @@ static int proc_oom_score(struct task_struct *task, char *buffer) struct timespec uptime; do_posix_clock_monotonic_gettime(&uptime); + monotonic_to_bootbased(&uptime); read_lock(&tasklist_lock); points = badness(task->group_leader, uptime.tv_sec); read_unlock(&tasklist_lock); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ea2147d..3c1b3a3 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -69,10 +69,10 @@ static int has_intersects_mems_allowed(struct task_struct *tsk) * algorithm has been meticulously tuned to meet the principle * of least surprise ... (be careful when you change it) */ - unsigned long badness(struct task_struct *p, unsigned long uptime) { - unsigned long points, cpu_time, run_time; + unsigned long points, cpu_time; + unsigned long run_time = 0; struct mm_struct *mm; struct task_struct *child; int oom_adj = p->signal->oom_adj; @@ -130,17 +130,20 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) utime = cputime_to_jiffies(task_time.utime); stime = cputime_to_jiffies(task_time.stime); cpu_time = (utime + stime) >> (SHIFT_HZ + 3); - - - if (uptime >= p->start_time.tv_sec) - run_time = (uptime - p->start_time.tv_sec) >> 10; - else - run_time = 0; - if (cpu_time) points /= int_sqrt(cpu_time); - if (run_time) - points /= int_sqrt(int_sqrt(run_time)); + + if (uptime <= p->real_start_time.tv_sec) { + /* Baby process may be not so important. */ + points *= 2; + } else { + run_time = (uptime - p->real_start_time.tv_sec); + if (!run_time) + run_time = 1; + + run_time = ((run_time * 100) / uptime) + 1; + points /= int_sqrt(run_time); + } /* * Niced processes are most likely less important, so double @@ -233,6 +236,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints, *ppoints = 0; do_posix_clock_monotonic_gettime(&uptime); + monotonic_to_bootbased(&uptime); for_each_process(p) { unsigned long points; -- 1.6.2.5