Re: [PATCH v5 3/3] sched/cputime: Add steal time support to full dynticks CPU time accounting
From: Wanpeng Li
Date: Sun Jun 12 2016 - 23:38:17 EST
2016-06-08 18:14 GMT+08:00 Paolo Bonzini <pbonzini@xxxxxxxxxx>:
>
>
> On 08/06/2016 05:05, Wanpeng Li wrote:
>> From: Wanpeng Li <wanpeng.li@xxxxxxxxxxx>
>>
>> This patch adds guest steal-time support to full dynticks CPU
>> time accounting. After the following commit:
>>
>> ff9a9b4c4334 ("sched, time: Switch VIRT_CPU_ACCOUNTING_GEN to jiffy granularity")
>>
>> ... time sampling became jiffy based, even if it's still listened
>> to ring boundaries, so steal_account_process_tick() is reused
>> to account how many 'ticks' are stolen-time, after the last accumulation.
>>
>> Suggested-by: Rik van Riel <riel@xxxxxxxxxx>
>> Cc: Ingo Molnar <mingo@xxxxxxxxxx>
>> Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
>> Cc: Rik van Riel <riel@xxxxxxxxxx>
>> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
>> Cc: Frederic Weisbecker <fweisbec@xxxxxxxxx>
>> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
>> Cc: Radim KrÄmÃÅ <rkrcmar@xxxxxxxxxx>
>> Signed-off-by: Wanpeng Li <wanpeng.li@xxxxxxxxxxx>
>> ---
>> v4 -> v5:
>> * apply same logic to account_idle_time, so change get_vtime_delta instead
>> v3 -> v4:
>> * fix grammar errors, thanks Ingo
>> * cleanup fragile codes, thanks Ingo
>> v2 -> v3:
>> * convert steal time jiffies to cputime
>> v1 -> v2:
>> * fix divide zero bug, thanks Rik
>>
>> kernel/sched/cputime.c | 13 +++++++++----
>> 1 file changed, 9 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
>> index 75f98c5..b62f9f8 100644
>> --- a/kernel/sched/cputime.c
>> +++ b/kernel/sched/cputime.c
>> @@ -257,7 +257,7 @@ void account_idle_time(cputime_t cputime)
>> cpustat[CPUTIME_IDLE] += (__force u64) cputime;
>> }
>>
>> -static __always_inline bool steal_account_process_tick(void)
>> +static __always_inline unsigned long steal_account_process_tick(void)
>> {
>> #ifdef CONFIG_PARAVIRT
>> if (static_key_false(¶virt_steal_enabled)) {
>> @@ -279,7 +279,7 @@ static __always_inline bool steal_account_process_tick(void)
>> return steal_jiffies;
>> }
>> #endif
>> - return false;
>> + return 0;
>> }
>>
>> /*
>> @@ -681,12 +681,17 @@ static cputime_t vtime_delta(struct task_struct *tsk)
>> static cputime_t get_vtime_delta(struct task_struct *tsk)
>> {
>> unsigned long now = READ_ONCE(jiffies);
>> - unsigned long delta = now - tsk->vtime_snap;
>> + cputime_t delta_time, steal_time;
>>
>> + steal_time = jiffies_to_cputime(steal_account_process_tick());
>> + delta_time = jiffies_to_cputime(now - tsk->vtime_snap);
>> WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
>> tsk->vtime_snap = now;
>>
>> - return jiffies_to_cputime(delta);
>> + if (steal_time < delta_time)
>> + delta_time -= steal_time;
>> +
>> + return delta_time;
>
> I think this is wrong. If you get more steal time than delta time
> (which as Rik noticed can happen due to partial jiffies), you will end
> up accounting things twice, once in steal_account_process_tick and once
> here. In other words you'll get the exact bug you're trying to fix.
>
> The right thing is to add a max_jiffies argument to
> steal_account_process_tick. steal_account_process_tick will not attempt
> to remove more than max_jiffies. Here you pass delta_jiffies (i.e. now
> - tsk->vtime_snap) to steal_account_process_tick, existing callers can
> pass ULONG_MAX. You can then
>
> return jiffies_to_cputime(delta_jiffies - steal_jiffies);
>
> in get_vtime_delta and not worry about underflow.
Do you mean something like below, actually I see delta_jiffies <
steal_jiffies sometimes.
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 75f98c5..a7606a9 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -257,7 +257,7 @@ void account_idle_time(cputime_t cputime)
cpustat[CPUTIME_IDLE] += (__force u64) cputime;
}
-static __always_inline bool steal_account_process_tick(void)
+static __always_inline unsigned long
steal_account_process_tick(unsigned long max_jiffies)
{
#ifdef CONFIG_PARAVIRT
if (static_key_false(¶virt_steal_enabled)) {
@@ -272,14 +272,14 @@ static __always_inline bool
steal_account_process_tick(void)
* time in jiffies. Lets cast the result to jiffies
* granularity and account the rest on the next rounds.
*/
- steal_jiffies = nsecs_to_jiffies(steal);
+ steal_jiffies = min(nsecs_to_jiffies(steal), max_jiffies);
this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
account_steal_time(jiffies_to_cputime(steal_jiffies));
return steal_jiffies;
}
#endif
- return false;
+ return 0;
}
/*
@@ -346,7 +346,7 @@ static void irqtime_account_process_tick(struct
task_struct *p, int user_tick,
u64 cputime = (__force u64) cputime_one_jiffy;
u64 *cpustat = kcpustat_this_cpu->cpustat;
- if (steal_account_process_tick())
+ if (steal_account_process_tick(ULONG_MAX))
return;
cputime *= ticks;
@@ -477,7 +477,7 @@ void account_process_tick(struct task_struct *p,
int user_tick)
return;
}
- if (steal_account_process_tick())
+ if (steal_account_process_tick(ULONG_MAX))
return;
if (user_tick)
@@ -681,12 +681,14 @@ static cputime_t vtime_delta(struct task_struct *tsk)
static cputime_t get_vtime_delta(struct task_struct *tsk)
{
unsigned long now = READ_ONCE(jiffies);
- unsigned long delta = now - tsk->vtime_snap;
+ unsigned long delta_jiffies, steal_jiffies;
+ delta_jiffies = jiffies_to_cputime(now - tsk->vtime_snap);
+ steal_jiffies = jiffies_to_cputime(steal_account_process_tick(delta_jiffies));
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
tsk->vtime_snap = now;
- return jiffies_to_cputime(delta);
+ return jiffies_to_cputime(delta_jiffies - steal_jiffies);
}
static void __vtime_account_system(struct task_struct *tsk)