[Part2 PATCH v4 17/29] KVM: SVM: Add support for SEV LAUNCH_UPDATE_DATA command

From: Brijesh Singh
Date: Tue Sep 19 2017 - 16:49:58 EST


The command is used for encrypting the guest memory region using the VM
encryption key (VEK) created during KVM_SEV_LAUNCH_START.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: "Radim KrÄmÃÅ" <rkrcmar@xxxxxxxxxx>
Cc: Joerg Roedel <joro@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Tom Lendacky <thomas.lendacky@xxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: kvm@xxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm.c | 191 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 190 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7453ef7cb487..86006dcfe574 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -752,6 +752,7 @@ struct kvm_sev_info {
unsigned int asid; /* ASID used for this guest */
unsigned int handle; /* SEV firmware handle */
int fd; /* SEV device fd */
+ unsigned long locked; /* Number of pages locked */
};

struct kvm_arch {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5e3e7d46270b..4667504acca5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -39,6 +39,8 @@
#include <linux/frame.h>
#include <linux/psp-sev.h>
#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>

#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -326,6 +328,7 @@ enum {

static unsigned int max_sev_asid;
static unsigned long *sev_asid_bitmap;
+#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)

static inline bool svm_sev_enabled(void)
{
@@ -1578,6 +1581,82 @@ static void sev_deactivate_handle(struct kvm *kvm)
kfree(data);
}

+static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n,
+ int write)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ unsigned long npages, pinned, size;
+ unsigned long locked, lock_limit;
+ struct page **pages;
+ int first, last;
+
+ /* calculate number of pages */
+ first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ npages = (last - first + 1);
+
+ locked = sev->locked + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+ pr_err("locked(%lu) > lock_limit(%lu)\n", locked, lock_limit);
+ return NULL;
+ }
+
+ /* Avoid using vmalloc for smaller buffer */
+ size = npages * sizeof(struct page *);
+ if (size > PAGE_SIZE)
+ pages = vmalloc(size);
+ else
+ pages = kmalloc(size, GFP_KERNEL);
+
+ if (!pages)
+ return NULL;
+
+ /* pin the user virtual address */
+ pinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ if (pinned != npages) {
+ pr_err("failed to lock %lu\n", npages);
+ goto err;
+ }
+
+ *n = npages;
+ sev->locked = locked;
+
+ return pages;
+err:
+ if (pinned > 0)
+ release_pages(pages, pinned, 0);
+
+ kvfree(pages);
+ return NULL;
+}
+
+static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+ unsigned long npages)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ sev->locked -= npages;
+ release_pages(pages, npages, 0);
+ kvfree(pages);
+}
+
+static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+{
+ uint8_t *page_virtual;
+ unsigned long i;
+
+ if (npages == 0 || pages == NULL)
+ return;
+
+ for (i = 0; i < npages; i++) {
+ page_virtual = kmap_atomic(pages[i]);
+ clflush_cache_range(page_virtual, PAGE_SIZE);
+ kunmap_atomic(page_virtual);
+ }
+}
+
static void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &kvm->arch.sev_info;
@@ -5698,7 +5777,7 @@ static int sev_activate_asid(struct kvm *kvm, unsigned int handle, int *error)
return ret;
}

-static int sev_issue_cmd(int fd, int id, void *data, int *error)
+static int __sev_issue_cmd(int fd, int id, void *data, int *error)
{
struct fd f;
int ret;
@@ -5713,6 +5792,13 @@ static int sev_issue_cmd(int fd, int id, void *data, int *error)
return ret;
}

+static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ return __sev_issue_cmd(sev->fd, id, data, error);
+}
+
static void *copy_user_blob(u64 __user uaddr, u32 len)
{
void *data;
@@ -5785,7 +5871,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
start->policy = params.policy;

/* create memory encryption context */
- ret = sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+ ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
if (ret)
goto e_free_session;

@@ -5815,6 +5901,103 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}

+static int get_num_contig_pages(int idx, struct page **inpages,
+ unsigned long npages)
+{
+ int i = idx + 1, pages = 1;
+ unsigned long paddr, next_paddr;
+
+ /* find the number of contiguous pages starting from idx */
+ paddr = __sme_page_pa(inpages[idx]);
+ while (i < npages) {
+ next_paddr = __sme_page_pa(inpages[i++]);
+ if ((paddr + PAGE_SIZE) == next_paddr) {
+ pages++;
+ paddr = next_paddr;
+ continue;
+ }
+ break;
+ }
+
+ return pages;
+}
+
+static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct kvm_sev_launch_update_data params;
+ struct sev_data_launch_update_data *data;
+ struct page **inpages;
+ int i, ret, pages;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_launch_update_data)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ vaddr = params.uaddr;
+ size = params.len;
+ vaddr_end = vaddr + size;
+
+ /* lock the user memory */
+ inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+ if (!inpages) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ /*
+ * PSP will perform the in-place encryption of the memory content (it
+ * may access the memory range with C=1). Lets make sure that caches
+ * are flushed so that data is written in DRAM with correct C-bit before
+ * PSP accesses it.
+ */
+ sev_clflush_pages(inpages, npages);
+
+ for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
+ int offset, len;
+
+ /*
+ * since user buffer may not be page aligned, calculate the
+ * offset within the page.
+ */
+ offset = vaddr & (PAGE_SIZE - 1);
+
+ /* calculate the number of pages that can be encrypted in one go */
+ pages = get_num_contig_pages(i, inpages, npages);
+
+ len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
+
+ data->handle = sev->handle;
+ data->len = len;
+ data->address = __sme_page_pa(inpages[i]) + offset;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
+ if (ret)
+ goto e_unpin;
+
+ size -= len;
+ next_vaddr = vaddr + len;
+ }
+e_unpin:
+ /* content of memory is updated, mark pages dirty */
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(inpages[i]);
+ mark_page_accessed(inpages[i]);
+ }
+ /* unlock the user pages */
+ sev_unpin_memory(kvm, inpages, npages);
+e_free:
+ kfree(data);
+ return ret;
+}
+
static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -5834,6 +6017,10 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
r = sev_launch_start(kvm, &sev_cmd);
break;
}
+ case KVM_SEV_LAUNCH_UPDATE_DATA: {
+ r = sev_launch_update_data(kvm, &sev_cmd);
+ break;
+ }
default:
break;
}
--
2.9.5