[PATCH 2/3] x86/boot/KASLR: change the prototype of process_mem_region() to meet the align requirement

From: Pingfan Liu
Date: Wed Sep 05 2018 - 22:37:01 EST


Changing the prototype of process_mem_region(), in order to reuse this func
to find a region with special alignment requirement (used in patch 3/3 to
find a region align on 1GiB boundary. And a trivial change on the data type
of mem_vector.size to ease the comparison of underflow.

Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>
Cc: Baoquan He <bhe@xxxxxxxxxx>
Cc: Chao Fan <fanc.fnst@xxxxxxxxxxxxxx> (authored:1/16=6%)
Cc: x86@xxxxxxxxxx
---
arch/x86/boot/compressed/kaslr.c | 36 ++++++++++++++++++++++++------------
1 file changed, 24 insertions(+), 12 deletions(-)

diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 5185267..584f17c 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -89,7 +89,7 @@ static unsigned long get_boot_seed(void)

struct mem_vector {
unsigned long long start;
- unsigned long long size;
+ long long size;
};

/* Only supporting at most 4 unusable memmap regions with kaslr */
@@ -577,9 +577,12 @@ typedef void (*handles_mem_region)(struct mem_vector *entry,
unsigned long minimum,
unsigned long image_size);

-static void process_mem_region(struct mem_vector *entry,
- unsigned long minimum,
- unsigned long image_size)
+typedef void (*store_info)(struct mem_vector *region,
+ unsigned long image_size);
+
+static void __process_mem_region(struct mem_vector *entry,
+ unsigned long minimum, unsigned long volume, unsigned long align,
+ store_info store)
{
struct mem_vector region, overlap;
struct slot_area slot_area;
@@ -598,9 +601,11 @@ static void process_mem_region(struct mem_vector *entry,
end = min(entry->size + entry->start, mem_limit);
if (entry->start >= end)
return;
- cur_entry.start = entry->start;
- cur_entry.size = end - entry->start;

+ cur_entry.start = ALIGN(entry->start, align);
+ cur_entry.size = end - cur_entry.start;
+ if (cur_entry.size < 0)
+ return;
region.start = cur_entry.start;
region.size = cur_entry.size;

@@ -613,7 +618,7 @@ static void process_mem_region(struct mem_vector *entry,
region.start = minimum;

/* Potentially raise address to meet alignment needs. */
- region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+ region.start = ALIGN(region.start, align);

/* Did we raise the address above the passed in memory entry? */
if (region.start > cur_entry.start + cur_entry.size)
@@ -628,22 +633,22 @@ static void process_mem_region(struct mem_vector *entry,
region.size = KERNEL_IMAGE_SIZE - region.start;

/* Return if region can't contain decompressed kernel */
- if (region.size < image_size)
+ if (region.size < volume)
return;

/* If nothing overlaps, store the region and return. */
if (!mem_avoid_overlap(&region, &overlap)) {
- process_gb_huge_pages(&region, image_size);
+ (*store)(&region, volume);
return;
}

- /* Store beginning of region if holds at least image_size. */
- if (overlap.start > region.start + image_size) {
+ /* Store beginning of region if holds at least volume. */
+ if (overlap.start > region.start + volume) {
struct mem_vector beginning;

beginning.start = region.start;
beginning.size = overlap.start - region.start;
- process_gb_huge_pages(&beginning, image_size);
+ (*store)(&beginning, volume);
}

/* Return if overlap extends to or past end of region. */
@@ -656,6 +661,13 @@ static void process_mem_region(struct mem_vector *entry,
}
}

+static void process_mem_region(struct mem_vector *entry,
+ unsigned long minimum, unsigned long image_size)
+{
+ __process_mem_region(entry, minimum, image_size, CONFIG_PHYSICAL_ALIGN,
+ store_slot_info);
+}
+
#ifdef CONFIG_EFI
/*
* Returns true if mirror region found (and must have been processed
--
2.7.4