Re: [PATCH v3 04/19] mm: slub: implement SLUB version of obj_to_index()
From: Roman Gushchin
Date: Thu Apr 23 2020 - 17:02:02 EST
On Wed, Apr 22, 2020 at 11:52:13PM +0000, Christoph Lameter wrote:
> On Wed, 22 Apr 2020, Roman Gushchin wrote:
>
> > enum stat_item {
> > ALLOC_FASTPATH, /* Allocation from cpu slab */
> > @@ -86,6 +87,7 @@ struct kmem_cache {
> > unsigned long min_partial;
> > unsigned int size; /* The size of an object including metadata */
> > unsigned int object_size;/* The size of an object without metadata */
> > + struct reciprocal_value reciprocal_size;
>
>
> This needs to be moved further back since it is not an item that needs to
> be cache hot for the hotpaths. Place it with "align", inuse etc?
>
> Hmmm. the same applies to min_partial maybe?
>
>
Something like this?
Thanks!
--
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cdf4f299c982..6246a3c65cd5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,10 +84,8 @@ struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retrieving partial slabs, etc. */
slab_flags_t flags;
- unsigned long min_partial;
unsigned int size; /* The size of an object including metadata */
unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
@@ -103,6 +101,8 @@ struct kmem_cache {
void (*ctor)(void *);
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
+ unsigned long min_partial;
+ struct reciprocal_value reciprocal_size;
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */