[PATCH v5 08/12] vmstat: switch per-cpu vmstat counters to 32-bits
From: Marcelo Tosatti
Date: Mon Mar 13 2023 - 12:30:01 EST
Some architectures only provide xchg/cmpxchg in 32/64-bit quantities.
Since the next patch is about to use xchg on per-CPU vmstat counters,
switch them to s32.
Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
Index: linux-vmstat-remote/include/linux/mmzone.h
===================================================================
--- linux-vmstat-remote.orig/include/linux/mmzone.h
+++ linux-vmstat-remote/include/linux/mmzone.h
@@ -689,8 +689,8 @@ struct per_cpu_pages {
struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
- s8 stat_threshold;
+ s32 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s32 stat_threshold;
#endif
#ifdef CONFIG_NUMA
/*
@@ -703,8 +703,8 @@ struct per_cpu_zonestat {
};
struct per_cpu_nodestat {
- s8 stat_threshold;
- s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
+ s32 stat_threshold;
+ s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};
#endif /* !__GENERATING_BOUNDS.H */
Index: linux-vmstat-remote/mm/vmstat.c
===================================================================
--- linux-vmstat-remote.orig/mm/vmstat.c
+++ linux-vmstat-remote/mm/vmstat.c
@@ -351,7 +351,7 @@ static inline void mod_zone_state(struct
long delta, int overstep_mode)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;
do {
@@ -428,7 +428,7 @@ static inline void mod_node_state(struct
int delta, int overstep_mode)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long o, n, t, z;
if (vmstat_item_in_bytes(item)) {
@@ -525,7 +525,7 @@ void __mod_zone_page_state(struct zone *
long delta)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
@@ -556,7 +556,7 @@ void __mod_node_page_state(struct pglist
long delta)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long x;
long t;
@@ -614,8 +614,8 @@ EXPORT_SYMBOL(__mod_node_page_state);
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
+ s32 v, t;
/* See __mod_node_page_state */
preempt_disable_nested();
@@ -623,7 +623,7 @@ void __inc_zone_state(struct zone *zone,
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
@@ -635,8 +635,8 @@ void __inc_zone_state(struct zone *zone,
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
@@ -646,7 +646,7 @@ void __inc_node_state(struct pglist_data
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
@@ -670,8 +670,8 @@ EXPORT_SYMBOL(__inc_node_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_stat_diff + item;
+ s32 v, t;
/* See __mod_node_page_state */
preempt_disable_nested();
@@ -679,7 +679,7 @@ void __dec_zone_state(struct zone *zone,
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
@@ -691,8 +691,8 @@ void __dec_zone_state(struct zone *zone,
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
@@ -702,7 +702,7 @@ void __dec_node_state(struct pglist_data
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);