Re: [PATCH v18 014/121] KVM: TDX: Add C wrapper functions for SEAMCALLs to the TDX module

From: Yuan Yao
Date: Wed Jan 31 2024 - 03:13:31 EST


On Mon, Jan 22, 2024 at 03:52:50PM -0800, isaku.yamahata@xxxxxxxxx wrote:
> From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
>
> A VMM interacts with the TDX module using a new instruction (SEAMCALL).
> For instance, a TDX VMM does not have full access to the VM control
> structure corresponding to VMX VMCS. Instead, a VMM induces the TDX module
> to act on behalf via SEAMCALLs.
>
> Export __seamcall and define C wrapper functions for SEAMCALLs for
^^^^^^^^^^^^^^^^^

It's not exported by this patch.

Others LGTM.

Reviewed-by: Yuan Yao <yuan.yao@xxxxxxxxx>

> readability.
>
> Some SEAMCALL APIs donate host pages to TDX module or guest TD, and the
> donated pages are encrypted. Those require the VMM to flush the cache
> lines to avoid cache line alias.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
> Reviewed-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
>
> ---
> Changes
> v18:
> - removed stub functions for __seamcall{,_ret}()
> - Added Reviewed-by Binbin
> - Make tdx_seamcall() use struct tdx_module_args instead of taking
> each inputs.
>
> v15 -> v16:
> - use struct tdx_module_args instead of struct tdx_module_output
> - Add tdh_mem_sept_rd() for SEPT_VE_DISABLE=1.
> ---
> arch/x86/kvm/vmx/tdx_ops.h | 360 +++++++++++++++++++++++++++++++++++++
> 1 file changed, 360 insertions(+)
> create mode 100644 arch/x86/kvm/vmx/tdx_ops.h
>
> diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
> new file mode 100644
> index 000000000000..0e26cf22240e
> --- /dev/null
> +++ b/arch/x86/kvm/vmx/tdx_ops.h
> @@ -0,0 +1,360 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* constants/data definitions for TDX SEAMCALLs */
> +
> +#ifndef __KVM_X86_TDX_OPS_H
> +#define __KVM_X86_TDX_OPS_H
> +
> +#include <linux/compiler.h>
> +
> +#include <asm/cacheflush.h>
> +#include <asm/asm.h>
> +#include <asm/kvm_host.h>
> +
> +#include "tdx_errno.h"
> +#include "tdx_arch.h"
> +#include "x86.h"
> +
> +static inline u64 tdx_seamcall(u64 op, struct tdx_module_args *in,
> + struct tdx_module_args *out)
> +{
> + u64 ret;
> +
> + if (out) {
> + *out = *in;
> + ret = __seamcall_ret(op, out);
> + } else
> + ret = __seamcall(op, in);
> +
> + if (unlikely(ret == TDX_SEAMCALL_UD)) {
> + /*
> + * SEAMCALLs fail with TDX_SEAMCALL_UD returned when VMX is off.
> + * This can happen when the host gets rebooted or live
> + * updated. In this case, the instruction execution is ignored
> + * as KVM is shut down, so the error code is suppressed. Other
> + * than this, the error is unexpected and the execution can't
> + * continue as the TDX features reply on VMX to be on.
> + */
> + kvm_spurious_fault();
> + return 0;
> + }
> + return ret;
> +}
> +
> +static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr)
> +{
> + struct tdx_module_args in = {
> + .rcx = addr,
> + .rdx = tdr,
> + };
> +
> + clflush_cache_range(__va(addr), PAGE_SIZE);
> + return tdx_seamcall(TDH_MNG_ADDCX, &in, NULL);
> +}
> +
> +static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa,
> + .rdx = tdr,
> + .r8 = hpa,
> + .r9 = source,
> + };
> +
> + clflush_cache_range(__va(hpa), PAGE_SIZE);
> + return tdx_seamcall(TDH_MEM_PAGE_ADD, &in, out);
> +}
> +
> +static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + .r8 = page,
> + };
> +
> + clflush_cache_range(__va(page), PAGE_SIZE);
> + return tdx_seamcall(TDH_MEM_SEPT_ADD, &in, out);
> +}
> +
> +static inline u64 tdh_mem_sept_rd(hpa_t tdr, gpa_t gpa, int level,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_SEPT_RD, &in, out);
> +}
> +
> +static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_SEPT_REMOVE, &in, out);
> +}
> +
> +static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
> +{
> + struct tdx_module_args in = {
> + .rcx = addr,
> + .rdx = tdvpr,
> + };
> +
> + clflush_cache_range(__va(addr), PAGE_SIZE);
> + return tdx_seamcall(TDH_VP_ADDCX, &in, NULL);
> +}
> +
> +static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa,
> + .rdx = tdr,
> + .r8 = hpa,
> + };
> +
> + clflush_cache_range(__va(hpa), PAGE_SIZE);
> + return tdx_seamcall(TDH_MEM_PAGE_RELOCATE, &in, out);
> +}
> +
> +static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa,
> + .rdx = tdr,
> + .r8 = hpa,
> + };
> +
> + clflush_cache_range(__va(hpa), PAGE_SIZE);
> + return tdx_seamcall(TDH_MEM_PAGE_AUG, &in, out);
> +}
> +
> +static inline u64 tdh_mem_range_block(hpa_t tdr, gpa_t gpa, int level,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_RANGE_BLOCK, &in, out);
> +}
> +
> +static inline u64 tdh_mng_key_config(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MNG_KEY_CONFIG, &in, NULL);
> +}
> +
> +static inline u64 tdh_mng_create(hpa_t tdr, int hkid)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + .rdx = hkid,
> + };
> +
> + clflush_cache_range(__va(tdr), PAGE_SIZE);
> + return tdx_seamcall(TDH_MNG_CREATE, &in, NULL);
> +}
> +
> +static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdvpr,
> + .rdx = tdr,
> + };
> +
> + clflush_cache_range(__va(tdvpr), PAGE_SIZE);
> + return tdx_seamcall(TDH_VP_CREATE, &in, NULL);
> +}
> +
> +static inline u64 tdh_mng_rd(hpa_t tdr, u64 field, struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + .rdx = field,
> + };
> +
> + return tdx_seamcall(TDH_MNG_RD, &in, out);
> +}
> +
> +static inline u64 tdh_mr_extend(hpa_t tdr, gpa_t gpa,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MR_EXTEND, &in, out);
> +}
> +
> +static inline u64 tdh_mr_finalize(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MR_FINALIZE, &in, NULL);
> +}
> +
> +static inline u64 tdh_vp_flush(hpa_t tdvpr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdvpr,
> + };
> +
> + return tdx_seamcall(TDH_VP_FLUSH, &in, NULL);
> +}
> +
> +static inline u64 tdh_mng_vpflushdone(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MNG_VPFLUSHDONE, &in, NULL);
> +}
> +
> +static inline u64 tdh_mng_key_freeid(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MNG_KEY_FREEID, &in, NULL);
> +}
> +
> +static inline u64 tdh_mng_init(hpa_t tdr, hpa_t td_params,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + .rdx = td_params,
> + };
> +
> + return tdx_seamcall(TDH_MNG_INIT, &in, out);
> +}
> +
> +static inline u64 tdh_vp_init(hpa_t tdvpr, u64 rcx)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdvpr,
> + .rdx = rcx,
> + };
> +
> + return tdx_seamcall(TDH_VP_INIT, &in, NULL);
> +}
> +
> +static inline u64 tdh_vp_rd(hpa_t tdvpr, u64 field,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdvpr,
> + .rdx = field,
> + };
> +
> + return tdx_seamcall(TDH_VP_RD, &in, out);
> +}
> +
> +static inline u64 tdh_mng_key_reclaimid(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MNG_KEY_RECLAIMID, &in, NULL);
> +}
> +
> +static inline u64 tdh_phymem_page_reclaim(hpa_t page,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = page,
> + };
> +
> + return tdx_seamcall(TDH_PHYMEM_PAGE_RECLAIM, &in, out);
> +}
> +
> +static inline u64 tdh_mem_page_remove(hpa_t tdr, gpa_t gpa, int level,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_PAGE_REMOVE, &in, out);
> +}
> +
> +static inline u64 tdh_sys_lp_shutdown(void)
> +{
> + struct tdx_module_args in = {
> + };
> +
> + return tdx_seamcall(TDH_SYS_LP_SHUTDOWN, &in, NULL);
> +}
> +
> +static inline u64 tdh_mem_track(hpa_t tdr)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_TRACK, &in, NULL);
> +}
> +
> +static inline u64 tdh_mem_range_unblock(hpa_t tdr, gpa_t gpa, int level,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = gpa | level,
> + .rdx = tdr,
> + };
> +
> + return tdx_seamcall(TDH_MEM_RANGE_UNBLOCK, &in, out);
> +}
> +
> +static inline u64 tdh_phymem_cache_wb(bool resume)
> +{
> + struct tdx_module_args in = {
> + .rcx = resume ? 1 : 0,
> + };
> +
> + return tdx_seamcall(TDH_PHYMEM_CACHE_WB, &in, NULL);
> +}
> +
> +static inline u64 tdh_phymem_page_wbinvd(hpa_t page)
> +{
> + struct tdx_module_args in = {
> + .rcx = page,
> + };
> +
> + return tdx_seamcall(TDH_PHYMEM_PAGE_WBINVD, &in, NULL);
> +}
> +
> +static inline u64 tdh_vp_wr(hpa_t tdvpr, u64 field, u64 val, u64 mask,
> + struct tdx_module_args *out)
> +{
> + struct tdx_module_args in = {
> + .rcx = tdvpr,
> + .rdx = field,
> + .r8 = val,
> + .r9 = mask,
> + };
> +
> + return tdx_seamcall(TDH_VP_WR, &in, out);
> +}
> +
> +#endif /* __KVM_X86_TDX_OPS_H */
> --
> 2.25.1
>
>