[RFC PATCH V2 5/10] x86/Hyper-V/Balloon: Avoid releasing ha_lock when traverse ha_region_list

From: lantianyu1986
Date: Tue Jan 07 2020 - 08:10:25 EST


From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

ha_lock is to protect ha_region_list. It is held in
hv_online_page() and handle_pg_range(). handle_pg_range()
is to traverse ha region list, find associated hot-add region
and add memory into system. hv_online_page() is called inside
of add_memory(). Current code is to release ha_lock before
calling add_memory() to avoid holding ha_lock twice in the
hv_online_page().

To avoid releasing ha_lock, add "lock_thread" in the struct hv_
dynmem_device to record thread of traversing ha region list,
check "lock_thread" in the hv_online_page() and try holding
ha_lock when current thread is not "lock_thread".

Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
drivers/hv/hv_balloon.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 185146795122..729dc5551302 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -547,6 +547,7 @@ struct hv_dynmem_device {
* regions from ha_region_list.
*/
struct mutex ha_lock;
+ struct task_struct *lock_thread;

/*
* A list of hot-add regions.
@@ -711,7 +712,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
for (i = 0; i < (size/HA_CHUNK); i++) {
start_pfn = start + (i * HA_CHUNK);

- mutex_lock(&dm_device.ha_lock);
has->ha_end_pfn += HA_CHUNK;

if (total_pfn > HA_CHUNK) {
@@ -723,7 +723,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}

has->covered_end_pfn += processed_pfn;
- mutex_unlock(&dm_device.ha_lock);

init_completion(&dm_device.ol_waitevent);
dm_device.ha_waiting = !memhp_auto_online;
@@ -744,10 +743,8 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
*/
do_hot_add = false;
}
- mutex_lock(&dm_device.ha_lock);
has->ha_end_pfn -= HA_CHUNK;
has->covered_end_pfn -= processed_pfn;
- mutex_unlock(&dm_device.ha_lock);
break;
}

@@ -769,8 +766,14 @@ static void hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
unsigned long pfn = page_to_pfn(pg);
+ int need_unlock;
+
+ /* If current thread hasn't hold ha_lock, take ha_lock here. */
+ if (dm_device.lock_thread != current) {
+ mutex_lock(&dm_device.ha_lock);
+ need_unlock = 1;
+ }

- mutex_lock(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
if ((pfn < has->start_pfn) ||
@@ -780,7 +783,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
hv_bring_pgs_online(has, pfn, 1UL << order);
break;
}
- mutex_unlock(&dm_device.ha_lock);
+ if (need_unlock)
+ mutex_unlock(&dm_device.ha_lock);
}

static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -857,6 +861,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
pg_start);

mutex_lock(&dm_device.ha_lock);
+ dm_device.lock_thread = current;
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
@@ -909,9 +914,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
} else {
pfn_cnt = size;
}
- mutex_unlock(&dm_device.ha_lock);
hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
- mutex_lock(&dm_device.ha_lock);
}
/*
* If we managed to online any pages that were given to us,
@@ -920,6 +923,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
res = has->covered_end_pfn - old_covered_state;
break;
}
+ dm_device.lock_thread = NULL;
mutex_unlock(&dm_device.ha_lock);

return res;
--
2.14.5