[PATCH v2 7/8] mm/lru: likely enhancement
From: Alex Shi
Date: Tue Nov 12 2019 - 09:06:49 EST
Use likely() to remove speculations according to pagevec usage mode.
Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Roman Gushchin <guro@xxxxxx>
Cc: Shakeel Butt <shakeelb@xxxxxxxxxx>
Cc: Chris Down <chris@xxxxxxxxxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: cgroups@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
---
include/linux/memcontrol.h | 8 ++++----
mm/memcontrol.c | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f869897a68f0..2a6d7a503452 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1307,12 +1307,12 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct pglist_data *pgdat = page_pgdat(page);
struct lruvec *lruvec;
- if (!locked_lruvec)
+ if (unlikely(!locked_lruvec))
goto lock;
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- if (locked_lruvec == lruvec)
+ if (likely(locked_lruvec == lruvec))
return lruvec;
spin_unlock_irq(&locked_lruvec->lru_lock);
@@ -1329,12 +1329,12 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
struct pglist_data *pgdat = page_pgdat(page);
struct lruvec *lruvec;
- if (!locked_lruvec)
+ if (unlikely(!locked_lruvec))
goto lock;
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- if (locked_lruvec == lruvec)
+ if (likely(locked_lruvec == lruvec))
return lruvec;
spin_unlock_irqrestore(&locked_lruvec->lru_lock, locked_lruvec->flags);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d2539bac4677..d95adf49fae3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1273,7 +1273,7 @@ struct lruvec *lock_page_lruvec_irq(struct page *page,
spin_lock_irq(&lruvec->lru_lock);
/* lruvec may changed in commit_charge() */
- if (lruvec != mem_cgroup_page_lruvec(page, pgdat)) {
+ if (unlikely(lruvec != mem_cgroup_page_lruvec(page, pgdat))) {
spin_unlock_irq(&lruvec->lru_lock);
goto again;
}
@@ -1291,7 +1291,7 @@ struct lruvec *lock_page_lruvec_irqsave(struct page *page,
spin_lock_irqsave(&lruvec->lru_lock, lruvec->flags);
/* lruvec may changed in commit_charge() */
- if (lruvec != mem_cgroup_page_lruvec(page, pgdat)) {
+ if (unlikely(lruvec != mem_cgroup_page_lruvec(page, pgdat))) {
spin_unlock_irqrestore(&lruvec->lru_lock, lruvec->flags);
goto again;
}
--
1.8.3.1