[PATCH] sched/clock: use static_branch_likely() check at sched_clock_running

From: Zhenzhong Duan
Date: Wed Nov 27 2019 - 03:40:45 EST


sched_clock_running is enabled early at bootup stage and never
disabled. So hints that to compiler by using static_branch_likely()
rather than static_branch_unlikely().

Fixes: 46457ea464f5 ("sched/clock: Use static key for sched_clock_running")
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@xxxxxxxxxx>
---
kernel/sched/clock.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 1152259..12bca64 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -370,7 +370,7 @@ u64 sched_clock_cpu(int cpu)
if (sched_clock_stable())
return sched_clock() + __sched_clock_offset;

- if (!static_branch_unlikely(&sched_clock_running))
+ if (!static_branch_likely(&sched_clock_running))
return sched_clock();

preempt_disable_notrace();
@@ -393,7 +393,7 @@ void sched_clock_tick(void)
if (sched_clock_stable())
return;

- if (!static_branch_unlikely(&sched_clock_running))
+ if (!static_branch_likely(&sched_clock_running))
return;

lockdep_assert_irqs_disabled();
@@ -460,7 +460,7 @@ void __init sched_clock_init(void)

u64 sched_clock_cpu(int cpu)
{
- if (!static_branch_unlikely(&sched_clock_running))
+ if (!static_branch_likely(&sched_clock_running))
return 0;

return sched_clock();
--
1.8.3.1