Re: [PATCH RFC] rcu/kfree: Do not request RCU when not needed

From: Joel Fernandes
Date: Sat Oct 29 2022 - 09:39:24 EST


On Sat, Oct 29, 2022 at 9:29 AM Joel Fernandes (Google)
<joel@xxxxxxxxxxxxxxxxx> wrote:
>
> On ChromeOS, I am (almost) always seeing the optimization trigger.
> Tested boot up and trace_printk'ing how often it triggers.
>
> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
> ---
> kernel/rcu/tree.c | 18 +++++++++++++++++-
> 1 file changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 591187b6352e..3e4c50b9fd33 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2935,6 +2935,7 @@ struct kfree_rcu_cpu_work {
>
> /**
> * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
> + * @rdp: The rdp of the CPU that this kfree_rcu corresponds to.
> * @head: List of kfree_rcu() objects not yet waiting for a grace period
> * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period

Yuck. I forgot to add last_gp_seq to the comments. Will await feedback
before re-spin.

- Joel


> * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> @@ -2964,6 +2965,8 @@ struct kfree_rcu_cpu {
> struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> raw_spinlock_t lock;
> struct delayed_work monitor_work;
> + struct rcu_data *rdp;
> + unsigned long last_gp_seq;
> bool initialized;
> int count;
>
> @@ -3167,6 +3170,7 @@ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
> mod_delayed_work(system_wq, &krcp->monitor_work, delay);
> return;
> }
> + krcp->last_gp_seq = krcp->rdp->gp_seq;
> queue_delayed_work(system_wq, &krcp->monitor_work, delay);
> }
>
> @@ -3217,7 +3221,17 @@ static void kfree_rcu_monitor(struct work_struct *work)
> // be that the work is in the pending state when
> // channels have been detached following by each
> // other.
> - queue_rcu_work(system_wq, &krwp->rcu_work);
> + //
> + // NOTE about gp_seq wrap: In case of gp_seq overflow,
> + // it is possible for rdp->gp_seq to be less than
> + // krcp->last_gp_seq even though a GP might be over. In
> + // this rare case, we would just have one extra GP.
> + if (krcp->last_gp_seq &&
> + rcu_seq_completed_gp(krcp->last_gp_seq, krcp->rdp->gp_seq)) {
> + queue_work(system_wq, &krwp->rcu_work.work);
> + } else {
> + queue_rcu_work(system_wq, &krwp->rcu_work);
> + }
> }
> }
>
> @@ -4802,6 +4816,8 @@ static void __init kfree_rcu_batch_init(void)
> for_each_possible_cpu(cpu) {
> struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
>
> + krcp->rdp = per_cpu_ptr(&rcu_data, cpu);
> + krcp->last_gp_seq = 0;
> for (i = 0; i < KFREE_N_BATCHES; i++) {
> INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
> krcp->krw_arr[i].krcp = krcp;
> --
> 2.38.1.273.g43a17bfeac-goog
>