Re: [PATCH v2 7/8] x86/kaslr: Clean up slot handling

From: Kees Cook
Date: Tue Jul 28 2020 - 15:34:50 EST


On Mon, Jul 27, 2020 at 07:08:00PM -0400, Arvind Sankar wrote:
> The number of slots and slot areas can be unsigned int, since on 64-bit,
> the maximum amount of memory is 2^52, the minimum alignment is 2^21, so
> the slot number cannot be greater than 2^31. The slot areas are limited
> by MAX_SLOT_AREA, currently 100. Replace the type used for slot number,
> which is currently a mix of int and unsigned long, with unsigned int
> consistently.

I think it's good to standardize the type, but let's go to unsigned long
then we don't have to think about this again in the future.

> Drop unnecessary check that number of slots is not zero in
> store_slot_info, it's guaranteed to be at least 1 by the calculation.
>
> Drop unnecessary alignment of image_size to CONFIG_PHYSICAL_ALIGN in
> find_random_virt_addr, it cannot change the result: the largest valid
> slot is the largest n that satisfies

I view all of these things as robustness checks. It doesn't hurt to do
these checks and it'll avoid crashing into problems if future
refactoring breaks assumptions.

But again, let's split this patch up. type changes, refactoring, etc.

Notes below...

>
> minimum + n * CONFIG_PHYSICAL_ALIGN + image_size <= KERNEL_IMAGE_SIZE
>
> (since minimum is already aligned) and so n is equal to
>
> (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN
>
> even if image_size is not aligned to CONFIG_PHYSICAL_ALIGN.
>
> Signed-off-by: Arvind Sankar <nivedita@xxxxxxxxxxxx>
> ---
> arch/x86/boot/compressed/kaslr.c | 36 ++++++++++++--------------------
> 1 file changed, 13 insertions(+), 23 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
> index 91c5f9771f42..eca2acc65e2a 100644
> --- a/arch/x86/boot/compressed/kaslr.c
> +++ b/arch/x86/boot/compressed/kaslr.c
> @@ -511,16 +511,14 @@ static bool mem_avoid_overlap(struct mem_vector *img,
>
> struct slot_area {
> unsigned long addr;
> - int num;
> + unsigned int num;
> };
>
> #define MAX_SLOT_AREA 100
>
> static struct slot_area slot_areas[MAX_SLOT_AREA];
> -
> -static unsigned long slot_max;
> -
> -static unsigned long slot_area_index;
> +static unsigned int slot_area_index;
> +static unsigned int slot_max;
>
> static void store_slot_info(struct mem_vector *region, unsigned long image_size)
> {
> @@ -530,13 +528,10 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
> return;
>
> slot_area.addr = region->start;
> - slot_area.num = (region->size - image_size) /
> - CONFIG_PHYSICAL_ALIGN + 1;
> + slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN;
>
> - if (slot_area.num > 0) {
> - slot_areas[slot_area_index++] = slot_area;
> - slot_max += slot_area.num;
> - }
> + slot_areas[slot_area_index++] = slot_area;
> + slot_max += slot_area.num;
> }
>
> /*
> @@ -589,8 +584,7 @@ process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
>
> static unsigned long slots_fetch_random(void)
> {
> - unsigned long slot;
> - int i;
> + unsigned int slot, i;
>
> /* Handle case of no slots stored. */
> if (slot_max == 0)
> @@ -603,7 +597,7 @@ static unsigned long slots_fetch_random(void)
> slot -= slot_areas[i].num;
> continue;
> }
> - return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
> + return slot_areas[i].addr + (unsigned long)slot * CONFIG_PHYSICAL_ALIGN;
> }
>
> if (i == slot_area_index)
> @@ -819,28 +813,24 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
> return 0;
> }
>
> - if (process_efi_entries(minimum, image_size))
> - return slots_fetch_random();
> + if (!process_efi_entries(minimum, image_size))
> + process_e820_entries(minimum, image_size);

I like this change; the double-call to slots_fetch_random() bugged me.
:)

>
> - process_e820_entries(minimum, image_size);
> return slots_fetch_random();
> }
>
> static unsigned long find_random_virt_addr(unsigned long minimum,
> unsigned long image_size)
> {
> - unsigned long slots, random_addr;
> -
> - /* Align image_size for easy slot calculations. */
> - image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
> + unsigned int slots;
> + unsigned long random_addr;
>
> /*
> * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
> * that can hold image_size within the range of minimum to
> * KERNEL_IMAGE_SIZE?
> */
> - slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
> - CONFIG_PHYSICAL_ALIGN + 1;
> + slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN;

These are the same -- why change the code?

>
> random_addr = kaslr_get_random_long("Virtual") % slots;
>
> --
> 2.26.2
>

--
Kees Cook