Re: [PATCH] mm/zsmalloc.c: Fix zsmalloc 32-bit PAE support

From: kbuild test robot
Date: Thu Dec 13 2018 - 01:18:47 EST


Hi Rafael,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v4.20-rc6 next-20181212]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Rafael-David-Tinoco/mm-zsmalloc-c-Fix-zsmalloc-32-bit-PAE-support/20181211-020704
config: mips-allmodconfig (attached as .config)
compiler: mips-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.2.0 make.cross ARCH=mips

All error/warnings (new ones prefixed by >>):

>> mm/zsmalloc.c:116:5: error: #error "MAX_POSSIBLE_PHYSMEM_BITS is wrong for this arch";
#error "MAX_POSSIBLE_PHYSMEM_BITS is wrong for this arch";
^~~~~
In file included from include/linux/cache.h:5:0,
from arch/mips/include/asm/cpu-info.h:15,
from arch/mips/include/asm/cpu-features.h:13,
from arch/mips/include/asm/bitops.h:21,
from include/linux/bitops.h:19,
from include/linux/kernel.h:11,
from include/linux/list.h:9,
from include/linux/module.h:9,
from mm/zsmalloc.c:33:
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/uapi/linux/kernel.h:13:40: note: in definition of macro '__KERNEL_DIV_ROUND_UP'
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
>> mm/zsmalloc.c:151:59: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
#define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
^~~~~~~~~~~~~~~~~
>> mm/zsmalloc.c:256:32: note: in expansion of macro 'ZS_SIZE_CLASSES'
struct size_class *size_class[ZS_SIZE_CLASSES];
^~~~~~~~~~~~~~~
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/uapi/linux/kernel.h:13:40: note: in definition of macro '__KERNEL_DIV_ROUND_UP'
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
>> mm/zsmalloc.c:151:59: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
#define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
^~~~~~~~~~~~~~~~~
>> mm/zsmalloc.c:256:32: note: in expansion of macro 'ZS_SIZE_CLASSES'
struct size_class *size_class[ZS_SIZE_CLASSES];
^~~~~~~~~~~~~~~
>> mm/zsmalloc.c:256:21: error: variably modified 'size_class' at file scope
struct size_class *size_class[ZS_SIZE_CLASSES];
^~~~~~~~~~
In file included from include/linux/kernel.h:10:0,
from include/linux/list.h:9,
from include/linux/module.h:9,
from mm/zsmalloc.c:33:
mm/zsmalloc.c: In function 'get_size_class_index':
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/linux/compiler.h:76:40: note: in definition of macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
mm/zsmalloc.c:543:20: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
if (likely(size > ZS_MIN_ALLOC_SIZE))
^~~~~~~~~~~~~~~~~
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/linux/compiler.h:76:40: note: in definition of macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
mm/zsmalloc.c:543:20: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
if (likely(size > ZS_MIN_ALLOC_SIZE))
^~~~~~~~~~~~~~~~~
In file included from include/linux/cache.h:5:0,
from arch/mips/include/asm/cpu-info.h:15,
from arch/mips/include/asm/cpu-features.h:13,
from arch/mips/include/asm/bitops.h:21,
from include/linux/bitops.h:19,
from include/linux/kernel.h:11,
from include/linux/list.h:9,
from include/linux/module.h:9,
from mm/zsmalloc.c:33:
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/uapi/linux/kernel.h:13:40: note: in definition of macro '__KERNEL_DIV_ROUND_UP'
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
mm/zsmalloc.c:544:29: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
^~~~~~~~~~~~~~~~~
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/uapi/linux/kernel.h:13:40: note: in definition of macro '__KERNEL_DIV_ROUND_UP'
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
^
>> mm/zsmalloc.c:133:2: note: in expansion of macro 'MAX'
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^~~
mm/zsmalloc.c:544:29: note: in expansion of macro 'ZS_MIN_ALLOC_SIZE'
idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
^~~~~~~~~~~~~~~~~
In file included from include/linux/list.h:9:0,
from include/linux/module.h:9,
from mm/zsmalloc.c:33:
>> mm/zsmalloc.c:133:49: warning: right shift count is negative [-Wshift-count-negative]
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
^
include/linux/kernel.h:861:27: note: in definition of macro '__cmp'
#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
^
include/linux/kernel.h:937:27: note: in expansion of macro '__careful_cmp'
#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
^~~~~~~~~~~~~
>> mm/zsmalloc.c:547:9: note: in expansion of macro 'min_t'
return min_t(int, ZS_SIZE_CLASSES - 1, idx);
^~~~~

vim +116 mm/zsmalloc.c

32
> 33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/magic.h>
37 #include <linux/bitops.h>
38 #include <linux/errno.h>
39 #include <linux/highmem.h>
40 #include <linux/string.h>
41 #include <linux/slab.h>
42 #include <asm/tlbflush.h>
43 #include <asm/pgtable.h>
44 #include <linux/cpumask.h>
45 #include <linux/cpu.h>
46 #include <linux/vmalloc.h>
47 #include <linux/preempt.h>
48 #include <linux/spinlock.h>
49 #include <linux/shrinker.h>
50 #include <linux/types.h>
51 #include <linux/debugfs.h>
52 #include <linux/zsmalloc.h>
53 #include <linux/zpool.h>
54 #include <linux/mount.h>
55 #include <linux/migrate.h>
56 #include <linux/pagemap.h>
57 #include <linux/fs.h>
58
59 #define ZSPAGE_MAGIC 0x58
60
61 /*
62 * This must be power of 2 and greater than of equal to sizeof(link_free).
63 * These two conditions ensure that any 'struct link_free' itself doesn't
64 * span more than 1 page which avoids complex case of mapping 2 pages simply
65 * to restore link_free pointer values.
66 */
67 #define ZS_ALIGN 8
68
69 /*
70 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
71 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
72 */
73 #define ZS_MAX_ZSPAGE_ORDER 2
74 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
75
76 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
77
78 /*
79 * Object location (<PFN>, <obj_idx>) is encoded as
80 * as single (unsigned long) handle value.
81 *
82 * Note that object index <obj_idx> starts from 0.
83 */
84
85 /*
86 * Memory for allocating for handle keeps object position by
87 * encoding <page, obj_idx> and the encoded value has a room
88 * in least bit(ie, look at obj_to_location).
89 * We use the bit to synchronize between object access by
90 * user and migration.
91 */
92 #define HANDLE_PIN_BIT 0
93
94 /*
95 * Head in allocated object should have OBJ_ALLOCATED_TAG
96 * to identify the object was allocated or not.
97 * It's okay to add the status bit in the least bit because
98 * header keeps handle which is 4byte-aligned address so we
99 * have room for two bit at least.
100 */
101 #define OBJ_ALLOCATED_TAG 1
102 #define OBJ_TAG_BITS 1
103
104 /*
105 * MAX_POSSIBLE_PHYSMEM_BITS should be defined by all archs using zsmalloc:
106 * Trying to guess it from MAX_PHYSMEM_BITS, or considering it BITS_PER_LONG,
107 * proved to be wrong by not considering PAE capabilities, or using SPARSEMEM
108 * only headers, leading to bad object encoding due to object index overflow.
109 */
110 #ifndef MAX_POSSIBLE_PHYSMEM_BITS
111 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
112 #error "MAX_POSSIBLE_PHYSMEM_BITS HAS to be defined by arch using zsmalloc";
113 #else
114 #ifndef CONFIG_64BIT
115 #if (MAX_POSSIBLE_PHYSMEM_BITS >= (BITS_PER_LONG + PAGE_SHIFT - OBJ_TAG_BITS))
> 116 #error "MAX_POSSIBLE_PHYSMEM_BITS is wrong for this arch";
117 #endif
118 #endif
119 #endif
120
121 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
122 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
> 123 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
124
125 #define FULLNESS_BITS 2
126 #define CLASS_BITS 8
127 #define ISOLATED_BITS 3
128 #define MAGIC_VAL_BITS 8
129
130 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
131 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
132 #define ZS_MIN_ALLOC_SIZE \
> 133 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
134 /* each chunk includes extra space to keep handle */
135 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
136
137 /*
138 * On systems with 4K page size, this gives 255 size classes! There is a
139 * trader-off here:
140 * - Large number of size classes is potentially wasteful as free page are
141 * spread across these classes
142 * - Small number of size classes causes large internal fragmentation
143 * - Probably its better to use specific size classes (empirically
144 * determined). NOTE: all those class sizes must be set as multiple of
145 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
146 *
147 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
148 * (reason above)
149 */
150 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
> 151 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
152 ZS_SIZE_CLASS_DELTA) + 1)
153
154 enum fullness_group {
155 ZS_EMPTY,
156 ZS_ALMOST_EMPTY,
157 ZS_ALMOST_FULL,
158 ZS_FULL,
159 NR_ZS_FULLNESS,
160 };
161
162 enum zs_stat_type {
163 CLASS_EMPTY,
164 CLASS_ALMOST_EMPTY,
165 CLASS_ALMOST_FULL,
166 CLASS_FULL,
167 OBJ_ALLOCATED,
168 OBJ_USED,
169 NR_ZS_STAT_TYPE,
170 };
171
172 struct zs_size_stat {
173 unsigned long objs[NR_ZS_STAT_TYPE];
174 };
175
176 #ifdef CONFIG_ZSMALLOC_STAT
177 static struct dentry *zs_stat_root;
178 #endif
179
180 #ifdef CONFIG_COMPACTION
181 static struct vfsmount *zsmalloc_mnt;
182 #endif
183
184 /*
185 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
186 * n <= N / f, where
187 * n = number of allocated objects
188 * N = total number of objects zspage can store
189 * f = fullness_threshold_frac
190 *
191 * Similarly, we assign zspage to:
192 * ZS_ALMOST_FULL when n > N / f
193 * ZS_EMPTY when n == 0
194 * ZS_FULL when n == N
195 *
196 * (see: fix_fullness_group())
197 */
198 static const int fullness_threshold_frac = 4;
199 static size_t huge_class_size;
200
201 struct size_class {
202 spinlock_t lock;
203 struct list_head fullness_list[NR_ZS_FULLNESS];
204 /*
205 * Size of objects stored in this class. Must be multiple
206 * of ZS_ALIGN.
207 */
208 int size;
209 int objs_per_zspage;
210 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
211 int pages_per_zspage;
212
213 unsigned int index;
214 struct zs_size_stat stats;
215 };
216
217 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
218 static void SetPageHugeObject(struct page *page)
219 {
220 SetPageOwnerPriv1(page);
221 }
222
223 static void ClearPageHugeObject(struct page *page)
224 {
225 ClearPageOwnerPriv1(page);
226 }
227
228 static int PageHugeObject(struct page *page)
229 {
230 return PageOwnerPriv1(page);
231 }
232
233 /*
234 * Placed within free objects to form a singly linked list.
235 * For every zspage, zspage->freeobj gives head of this list.
236 *
237 * This must be power of 2 and less than or equal to ZS_ALIGN
238 */
239 struct link_free {
240 union {
241 /*
242 * Free object index;
243 * It's valid for non-allocated object
244 */
245 unsigned long next;
246 /*
247 * Handle of allocated object.
248 */
249 unsigned long handle;
250 };
251 };
252
253 struct zs_pool {
254 const char *name;
255
> 256 struct size_class *size_class[ZS_SIZE_CLASSES];
257 struct kmem_cache *handle_cachep;
258 struct kmem_cache *zspage_cachep;
259
260 atomic_long_t pages_allocated;
261
262 struct zs_pool_stats stats;
263
264 /* Compact classes */
265 struct shrinker shrinker;
266

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation

Attachment: .config.gz
Description: application/gzip