[RFC Part2 PATCH 03/30] x86: add helper functions for RMPUPDATE and PSMASH instruction

From: Brijesh Singh
Date: Wed Mar 24 2021 - 13:05:55 EST


The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
hypervisor will use the instruction to add pages to the RMP table. See
APM3 for details on the instruction operations.

The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
to adjust the RMP entry without invalidating the previous RMP entry.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Joerg Roedel <jroedel@xxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Tony Luck <tony.luck@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxx>
Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Tom Lendacky <thomas.lendacky@xxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Sean Christopherson <seanjc@xxxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: kvm@xxxxxxxxxxxxxxx
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/include/asm/sev-snp.h | 27 ++++++++++++++++++++++
arch/x86/mm/mem_encrypt.c | 41 ++++++++++++++++++++++++++++++++++
2 files changed, 68 insertions(+)

diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h
index 2aa14b38c5ed..199d88a38c76 100644
--- a/arch/x86/include/asm/sev-snp.h
+++ b/arch/x86/include/asm/sev-snp.h
@@ -96,6 +96,29 @@ typedef struct rmpentry rmpentry_t;
#define rmpentry_gpa(x) ((unsigned long)(x)->info.gpa)
#define rmpentry_immutable(x) ((x)->info.immutable)

+
+/* Return code of RMPUPDATE */
+#define RMPUPDATE_SUCCESS 0
+#define RMPUPDATE_FAIL_INPUT 1
+#define RMPUPDATE_FAIL_PERMISSION 2
+#define RMPUPDATE_FAIL_INUSE 3
+#define RMPUPDATE_FAIL_OVERLAP 4
+
+struct rmpupdate {
+ u64 gpa;
+ u8 assigned;
+ u8 pagesize;
+ u8 immutable;
+ u8 rsvd;
+ u32 asid;
+} __packed;
+
+/* Return code of PSMASH */
+#define PSMASH_FAIL_INPUT 1
+#define PSMASH_FAIL_PERMISSION 2
+#define PSMASH_FAIL_INUSE 3
+#define PSMASH_FAIL_BADADDR 4
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
#include <linux/jump_label.h>

@@ -124,6 +147,8 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
int snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
int snp_set_memory_private(unsigned long vaddr, unsigned int npages);
rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level);
+int rmptable_psmash(struct page *page);
+int rmptable_rmpupdate(struct page *page, struct rmpupdate *e);

extern struct static_key_false snp_enable_key;
static inline bool snp_key_active(void)
@@ -155,6 +180,8 @@ static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int npages
static inline int snp_set_memory_private(unsigned long vaddr, unsigned int npages) { return 0; }
static inline bool snp_key_active(void) { return false; }
static inline rpmentry_t *lookup_page_in_rmptable(struct page *page, int *level) { return NULL; }
+static inline int rmptable_psmash(struct page *page) { return -ENXIO; }
+static inline int rmptable_rmpupdate(struct page *page, struct rmpupdate *e) { return -ENXIO; }

#endif /* CONFIG_AMD_MEM_ENCRYPT */

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 06394b6d56b2..7a0138cb3e17 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -644,3 +644,44 @@ rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)
return entry;
}
EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);
+
+int rmptable_psmash(struct page *page)
+{
+ unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
+ int ret;
+
+ if (!static_branch_unlikely(&snp_enable_key))
+ return -ENXIO;
+
+ /* Retry if another processor is modifying the RMP entry. */
+ do {
+ asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
+ : "=a"(ret)
+ : "a"(spa)
+ : "memory", "cc");
+ } while (ret == PSMASH_FAIL_INUSE);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rmptable_psmash);
+
+int rmptable_rmpupdate(struct page *page, struct rmpupdate *val)
+{
+ unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
+ bool flush = true;
+ int ret;
+
+ if (!static_branch_unlikely(&snp_enable_key))
+ return -ENXIO;
+
+ /* Retry if another processor is modifying the RMP entry. */
+ do {
+ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
+ : "=a"(ret)
+ : "a"(spa), "c"((unsigned long)val), "d"(flush)
+ : "memory", "cc");
+ } while (ret == PSMASH_FAIL_INUSE);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rmptable_rmpupdate);
--
2.17.1