Re: [PATCH v2 07/23] sched/cache: Introduce per runqueue task LLC preference counter
From: Tim Chen
Date: Tue Dec 09 2025 - 18:19:36 EST
On Tue, 2025-12-09 at 14:06 +0100, Peter Zijlstra wrote:
> On Wed, Dec 03, 2025 at 03:07:26PM -0800, Tim Chen wrote:
>
> > +#ifdef CONFIG_SCHED_CACHE
> > +
> > +static unsigned int *alloc_new_pref_llcs(unsigned int *old, unsigned int **gc)
> > +{
> > + unsigned int *new = NULL;
> > +
> > + new = kcalloc(new_max_llcs, sizeof(unsigned int),
> > + GFP_KERNEL | __GFP_NOWARN);
> > +
> > + if (!new) {
> > + *gc = NULL;
> > + } else {
> > + /*
> > + * Place old entry in garbage collector
> > + * for later disposal.
> > + */
> > + *gc = old;
> > + }
> > + return new;
> > +}
> > +
> > +static void populate_new_pref_llcs(unsigned int *old, unsigned int *new)
> > +{
> > + int i;
> > +
> > + if (!old)
> > + return;
> > +
> > + for (i = 0; i < max_llcs; i++)
> > + new[i] = old[i];
> > +}
> > +
> > +static int resize_llc_pref(void)
> > +{
> > + unsigned int *__percpu *tmp_llc_pref;
> > + int i, ret = 0;
> > +
> > + if (new_max_llcs <= max_llcs)
> > + return 0;
> > +
> > + /*
> > + * Allocate temp percpu pointer for old llc_pref,
> > + * which will be released after switching to the
> > + * new buffer.
> > + */
> > + tmp_llc_pref = alloc_percpu_noprof(unsigned int *);
> > + if (!tmp_llc_pref)
> > + return -ENOMEM;
> > +
> > + for_each_present_cpu(i)
> > + *per_cpu_ptr(tmp_llc_pref, i) = NULL;
> > +
> > + /*
> > + * Resize the per rq nr_pref_llc buffer and
> > + * switch to this new buffer.
> > + */
> > + for_each_present_cpu(i) {
> > + struct rq_flags rf;
> > + unsigned int *new;
> > + struct rq *rq;
> > +
> > + rq = cpu_rq(i);
> > + new = alloc_new_pref_llcs(rq->nr_pref_llc, per_cpu_ptr(tmp_llc_pref, i));
> > + if (!new) {
> > + ret = -ENOMEM;
> > +
> > + goto release_old;
> > + }
> > +
> > + /*
> > + * Locking rq ensures that rq->nr_pref_llc values
> > + * don't change with new task enqueue/dequeue
> > + * when we repopulate the newly enlarged array.
> > + */
>
> guard(rq_lock_irq)(rq);
>
> Notably, this cannot be with IRQs disabled, as you're doing allocations.
Okay.
>
> > + rq_lock_irqsave(rq, &rf);
> > + populate_new_pref_llcs(rq->nr_pref_llc, new);
> > + rq->nr_pref_llc = new;
> > + rq_unlock_irqrestore(rq, &rf);
> > + }
> > +
> > +release_old:
> > + /*
> > + * Load balance is done under rcu_lock.
> > + * Wait for load balance before and during resizing to
> > + * be done. They may refer to old nr_pref_llc[]
> > + * that hasn't been resized.
> > + */
> > + synchronize_rcu();
> > + for_each_present_cpu(i)
> > + kfree(*per_cpu_ptr(tmp_llc_pref, i));
> > +
> > + free_percpu(tmp_llc_pref);
> > +
> > + /* succeed and update */
> > + if (!ret)
> > + max_llcs = new_max_llcs;
> > +
> > + return ret;
> > +}
>
> I think you need at least cpus_read_lock(), because present_cpu is
> dynamic -- but I'm not quite sure what lock is used to serialize it.
Let me check on what is the right lock for making sure present_cpu
is not chenged. Thanks.
Tim