[PATCH v4 24/27] shed: fix kernel-doc markup

From: Mauro Carvalho Chehab
Date: Mon Nov 16 2020 - 05:20:49 EST


Kernel-doc requires that a kernel-doc markup to be immediatly
below the function prototype, as otherwise it will rename it.
So, move sys_sched_yield() markup to the right place.

Also fix the cpu_util() markup: Kernel-doc markups
should use this format:
identifier - description

Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@xxxxxxxxxx>
---
kernel/sched/core.c | 16 ++++++++--------
kernel/sched/fair.c | 2 +-
2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 02076e6d3792..9d41378ae8f3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6560,65 +6560,65 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
if (len & (sizeof(unsigned long)-1))
return -EINVAL;

if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;

ret = sched_getaffinity(pid, mask);
if (ret == 0) {
unsigned int retlen = min(len, cpumask_size());

if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);

return ret;
}

-/**
- * sys_sched_yield - yield the current processor to other threads.
- *
- * This function yields the current CPU to other tasks. If there are no
- * other threads running on this CPU then this function will return.
- *
- * Return: 0.
- */
static void do_sched_yield(void)
{
struct rq_flags rf;
struct rq *rq;

rq = this_rq_lock_irq(&rf);

schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);

preempt_disable();
rq_unlock_irq(rq, &rf);
sched_preempt_enable_no_resched();

schedule();
}

+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
+ */
SYSCALL_DEFINE0(sched_yield)
{
do_sched_yield();
return 0;
}

#ifndef CONFIG_PREEMPTION
int __sched _cond_resched(void)
{
if (should_resched(0)) {
preempt_schedule_common();
return 1;
}
rcu_all_qs();
return 0;
}
EXPORT_SYMBOL(_cond_resched);
#endif

/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 27536f37ba1a..cb7cd7d8a28f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6310,41 +6310,41 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
sd = rcu_dereference(per_cpu(sd_llc, target));
if (!sd)
return target;

i = select_idle_core(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;

i = select_idle_cpu(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;

i = select_idle_smt(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;

return target;
}

/**
- * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+ * cpu_util - Estimates the amount of capacity of a CPU used by CFS tasks.
* @cpu: the CPU to get the utilization of
*
* The unit of the return value must be the one of capacity so we can compare
* the utilization with the capacity of the CPU that is available for CFS task
* (ie cpu_capacity).
*
* cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
* recent utilization of currently non-runnable tasks on a CPU. It represents
* the amount of utilization of a CPU in the range [0..capacity_orig] where
* capacity_orig is the cpu_capacity available at the highest frequency
* (arch_scale_freq_capacity()).
* The utilization of a CPU converges towards a sum equal to or less than the
* current capacity (capacity_curr <= capacity_orig) of the CPU because it is
* the running time on this CPU scaled by capacity_curr.
*
* The estimated utilization of a CPU is defined to be the maximum between its
* cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
* currently RUNNABLE on that CPU.
* This allows to properly represent the expected utilization of a CPU which
* has just got a big task running since a long sleep period. At the same time
--
2.28.0