[Part2 PATCH v4 21/29] KVM: SVM: Add support for SEV DEBUG_DECRYPT command

From: Brijesh Singh
Date: Tue Sep 19 2017 - 16:50:33 EST


The command is used for decrypting a guest memory region for debug
purposes.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: "Radim KrÄmÃÅ" <rkrcmar@xxxxxxxxxx>
Cc: Joerg Roedel <joro@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Tom Lendacky <thomas.lendacky@xxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: kvm@xxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/kvm/svm.c | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 179 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7a6e82c48142..4d51ccb462db 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1622,7 +1622,6 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,

*n = npages;
sev->locked = locked;
-
return pages;
err:
if (pinned > 0)
@@ -6106,6 +6105,181 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}

+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+ unsigned long dst, int size,
+ int *error, bool enc)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_dbg *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = sev->handle;
+ data->dst_addr = dst;
+ data->src_addr = src;
+ data->len = size;
+
+ ret = sev_issue_cmd(kvm,
+ enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+ data, error);
+ kfree(data);
+ return ret;
+}
+
+/*
+ * Decrypt source memory into userspace or kernel buffer. If destination buffer
+ * or len is not aligned to 16-byte boundary then it uses intermediate buffer.
+ */
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long paddr,
+ unsigned long __user dst_uaddr,
+ unsigned long dst_kaddr, unsigned long dst_paddr,
+ int size, int *error)
+{
+ int ret, offset = 0, len = size;
+ struct page *tpage = NULL;
+
+ /*
+ * Debug command works with 16-byte aligned inputs, check if all inputs
+ * (src, dst and len) are 16-byte aligned. If one of the input is not
+ * aligned then we decrypt more than requested into a temporary buffer
+ * and copy the porition of data into destination buffer.
+ */
+ if (!IS_ALIGNED(paddr, 16) ||
+ !IS_ALIGNED(dst_paddr, 16) ||
+ !IS_ALIGNED(size, 16)) {
+ tpage = (void *)alloc_page(GFP_KERNEL);
+ if (!tpage)
+ return -ENOMEM;
+
+ dst_paddr = __sme_page_pa(tpage);
+
+ /*
+ * if source buffer is not aligned then offset will be used
+ * when copying the data from the temporary buffer into
+ * destination buffer.
+ */
+ offset = paddr & 15;
+
+ /* its safe to read more than requested size. */
+ len = round_up(size + offset, 16);
+
+ paddr = round_down(paddr, 16);
+
+ /*
+ * Cache access from the PSP are coherent with x86 but not other
+ * way around. Hence we flush the destination caches to ensure
+ * that x86 is able to see the PSP updates.
+ */
+ clflush_cache_range(page_address(tpage), PAGE_SIZE);
+ }
+
+ ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, false);
+
+ /*
+ * If temporary buffer is used then copy the data from temporary buffer
+ * into destination buffer.
+ */
+ if (!ret && tpage) {
+
+ /*
+ * If destination buffer is a userspace buffer then use
+ * copy_to_user otherwise memcpy.
+ */
+ if (dst_uaddr) {
+ if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+ page_address(tpage) + offset, size))
+ ret = -EFAULT;
+ } else {
+ memcpy((void *)dst_kaddr, page_address(tpage) + offset, size);
+ }
+ }
+
+ if (tpage)
+ __free_page(tpage);
+
+ return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+ unsigned long vaddr, vaddr_end, next_vaddr;
+ unsigned long dst_vaddr, dst_vaddr_end;
+ struct page **src_p, **dst_p;
+ struct kvm_sev_dbg debug;
+ unsigned long n;
+ int ret, size;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_dbg)))
+ return -EFAULT;
+
+ vaddr = debug.src_uaddr;
+ size = debug.len;
+ vaddr_end = vaddr + size;
+ dst_vaddr = debug.dst_uaddr;
+ dst_vaddr_end = dst_vaddr + size;
+
+ for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+ int len, s_off, d_off;
+
+ /* lock userspace source and destination page */
+ src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+ if (!src_p)
+ return -EFAULT;
+
+ dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+ if (!dst_p) {
+ sev_unpin_memory(kvm, src_p, n);
+ return -EFAULT;
+ }
+
+ /*
+ * Cache access between te PSP and x86 are not coherent, hence we
+ * flush the caches to buffers shared with PSP to ensure that we
+ * will be able to see the PSP updates.
+ */
+ sev_clflush_pages(src_p, 1);
+ sev_clflush_pages(dst_p, 1);
+
+ /*
+ * since user buffer may not be page aligned, calculate the
+ * offset within the page.
+ */
+ s_off = vaddr & ~PAGE_MASK;
+ d_off = dst_vaddr & ~PAGE_MASK;
+ len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+ ret = __sev_dbg_decrypt(kvm,
+ __sme_page_pa(src_p[0]) + s_off,
+ dst_vaddr, 0,
+ __sme_page_pa(dst_p[0]) + d_off,
+ len, &argp->error);
+
+ sev_unpin_memory(kvm, src_p, 1);
+ sev_unpin_memory(kvm, dst_p, 1);
+
+ if (ret)
+ goto err;
+
+ next_vaddr = vaddr + len;
+ dst_vaddr = dst_vaddr + len;
+ size -= len;
+ }
+err:
+ return ret;
+}
+
+static int sev_dbg_decrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ return sev_dbg_crypt(kvm, argp, true);
+}
+
static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -6141,6 +6315,10 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
r = sev_guest_status(kvm, &sev_cmd);
break;
}
+ case KVM_SEV_DBG_DECRYPT: {
+ r = sev_dbg_decrypt(kvm, &sev_cmd);
+ break;
+ }
default:
break;
}
--
2.9.5