Re: [PATCH 4/5] locking/lockdep: Reuse free chain_hlocks entries
From: Peter Zijlstra
Date: Fri Dec 13 2019 - 05:25:38 EST
On Thu, Dec 12, 2019 at 05:35:24PM -0500, Waiman Long wrote:
> diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
> index 4c23ab7d27c2..999cd714e0d1 100644
> --- a/kernel/locking/lockdep_internals.h
> +++ b/kernel/locking/lockdep_internals.h
> @@ -107,8 +107,15 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
> #endif
>
> #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
> -
> #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
> +#define MAX_CHAIN_HLOCKS_BLOCKS 1024
> +#define CHAIN_HLOCKS_HASH 8
> +
> +struct chain_hlocks_block {
> + unsigned int depth: 8,
> + base :24;
> + struct chain_hlocks_block *next;
> +};
>
> extern struct list_head all_lock_classes;
> extern struct lock_chain lock_chains[];
This doesn't need to be in the header, there are no users outside of the
stuff you wrote below.
> +#ifdef CONFIG_PROVE_LOCKING
> +static struct chain_hlocks_block chain_hlocks_blocks[MAX_CHAIN_HLOCKS_BLOCKS];
> +static struct chain_hlocks_block *chain_hlocks_block_hash[CHAIN_HLOCKS_HASH];
> +static struct chain_hlocks_block *free_chain_hlocks_blocks;
> +
> +/*
> + * Graph lock must be held before calling the chain_hlocks_block functions.
> + * Chain hlocks of depth 1-(CHAIN_HLOCKS_HASH-1) is mapped directly to
> + * chain_hlocks_block_hash[1-(CHAIN_HLOCKS_HASH-1)]. All other sizes
> + * are mapped to chain_hlocks_block_hash[0].
> + */
> +static inline struct chain_hlocks_block *alloc_chain_hlocks_block(void)
> +{
> + struct chain_hlocks_block *block = free_chain_hlocks_blocks;
> +
> + WARN_ONCE(!debug_locks_silent && !block,
> + "Running out of chain_hlocks_block\n");
> + free_chain_hlocks_blocks = block ? block->next : NULL;
> + return block;
> +}
> +
> +static inline void free_chain_hlocks_block(struct chain_hlocks_block *block)
> +{
> + block->next = free_chain_hlocks_blocks;
> + free_chain_hlocks_blocks = block;
> +}
> +
> +static inline void push_chain_hlocks_block(struct chain_hlocks_block *block)
> +{
> + int hash, depth = block->depth;
> +
> + hash = (depth >= CHAIN_HLOCKS_HASH) ? 0 : depth;
> + block->next = chain_hlocks_block_hash[hash];
> + chain_hlocks_block_hash[hash] = block;
> + nr_free_chain_hlocks += depth;
> +}
I would argue this is not a hash, these are buckets. You're doing
regular size buckets.
> +static inline struct chain_hlocks_block *pop_chain_hlocks_block(int depth)
> +{
> + struct chain_hlocks_block *curr, **pprev;
> +
> + if (!nr_free_chain_hlocks)
> + return NULL;
> +
> + if (depth < CHAIN_HLOCKS_HASH) {
> + curr = chain_hlocks_block_hash[depth];
> + if (curr) {
> + chain_hlocks_block_hash[depth] = curr->next;
> + nr_free_chain_hlocks -= depth;
> + }
> + return curr;
> + }
> +
> + /*
> + * For depth >= CHAIN_HLOCKS_HASH, it is not really a pop operation.
> + * Instead, the first entry with the right size is returned.
> + */
> + curr = chain_hlocks_block_hash[0];
> + pprev = chain_hlocks_block_hash;
> +
> + while (curr) {
> + if (curr->depth == depth)
> + break;
> + pprev = &(curr->next);
> + curr = curr->next;
> + }
> +
> + if (curr) {
> + *pprev = curr->next;
> + nr_free_chain_hlocks -= depth;
> + }
> + return curr;
> +}
> +#else
> +static inline void free_chain_hlocks_block(struct chain_hlocks_block *block) { }
> +static inline struct chain_hlocks_block *pop_chain_hlocks_block(int depth)
> +{
> + return NULL;
> +}
> +#endif /* CONFIG_PROVE_LOCKING */
You've made a bit of a mess of things though. Why is that push/pop crud
exposed? Why didn't you build a self contained allocator?
All you really want is:
u16 *alloc_chain(int size);
void free_chain(int size, u16 *chain);
An implementation would be something along these lines (completely
untested, fresh from the keyboard):
struct chain_block {
int size;
int base;
struct chain_block *next;
};
struct chain_block chain_blocks[MAX_BLOCKS];
struct chain_block *free_blocks;
struct chain_block init_block = {
.size = MAX_LOCKDEP_CHAIN_HLOCKS,
.base = 0,
.next = NULL,
};
struct chain_block *free_list[MAX_BUCKETS] = {
&init_block, /* 0 */
};
void free_block(struct chain_block *b)
{
b->next = free_blocks;
free_blocks = b;
}
struct chain_block *alloc_block(void)
{
struct chain_block *block = free_blocks;
free_blocks = block->next;
return block;
}
struct chain_block *pop_block(struct chain_block **bp)
{
struct chain_block *b = *bp;
if (!b)
return NULL;
*bp = b->next;
}
void push_block(struct chain_block **bp, struct chain_block *b)
{
b->next = *bp;
*bp = b;
}
/* could contemplate ilog2() buckets */
int size2bucket(int size)
{
return size >= MAX_BUCKET ? 0 : size;
}
/* bucket[0] is mixed size */
struct chain_block *pop_block_0(struct chain_block **bp, int size)
{
struct chain_block **p = bp, *b = *bp;
if (!b)
return NULL;
p = bp;
while (b && b->size < size) {
p = &b->next;
b = b->next;
}
if (!b)
return NULL;
*p = b->next;
return b;
}
u16 *alloc_chain(int size)
{
int i, bucket = size2bucket(size);
struct chain_block *b;
u16 *chain;
if (!bucket) {
b = pop_block_0(&free_list[0], size);
if (b)
goto got_block;
} else {
b = pop_block(&free_list[bucket]);
if (b)
goto got_block;
/* pop a large block, hope the fragment is still useful */
b = pop_block_0(&free_list[0], size);
if (b)
goto got_block;
for (i = MAX_BUCKETS-1; i > bucket; i--)
b = pop_block(&free_list[bucket]);
if (b)
goto got_block;
}
}
return NULL;
got_block:
chain = chain_hlocks + b->base;
b->base += size;
b->size -= size;
if (b->size) {
/* return the fragment */
bucket = size2bucket(b->size);
push_block(&free_list[bucket], b);
} else {
free_block(b);
}
return chain;
}
void free_chain(int size, u16 *chain)
{
struct chain_block *b = alloc_block();
int bucket = size2bucket(size);
if (!b) {
// leak stuff;
return;
}
b->size = size;
b->base = chain - chain_hlocks;
push_bucket(&free_list[bucket], b);
}
void init_blocks(void)
{
int i;
for (i = 0; i < MAX_BLOCKS; i++)
free_block(chain_blocks[i]);
}