[PATCH 4.4 38/58] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state

From: Greg Kroah-Hartman
Date: Fri Jan 06 2017 - 17:14:29 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Paul Mackerras <paulus@xxxxxxxxxx>

commit 0d808df06a44200f52262b6eb72bcb6042f5a7c5 upstream.

When switching from/to a guest that has a transaction in progress,
we need to save/restore the checkpointed register state. Although
XER is part of the CPU state that gets checkpointed, the code that
does this saving and restoring doesn't save/restore XER.

This fixes it by saving and restoring the XER. To allow userspace
to read/write the checkpointed XER value, we also add a new ONE_REG
specifier.

The visible effect of this bug is that the guest may see its XER
value being corrupted when it uses transactions.

Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
Signed-off-by: Paul Mackerras <paulus@xxxxxxxxxx>
Reviewed-by: Thomas Huth <thuth@xxxxxxxxxx>
Signed-off-by: Paul Mackerras <paulus@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
Documentation/virtual/kvm/api.txt | 1 +
arch/powerpc/include/asm/kvm_host.h | 1 +
arch/powerpc/include/uapi/asm/kvm.h | 1 +
arch/powerpc/kernel/asm-offsets.c | 1 +
arch/powerpc/kvm/book3s_hv.c | 6 ++++++
arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++++
6 files changed, 14 insertions(+)

--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1991,6 +1991,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64
+ PPC | KVM_REG_PPC_TM_XER | 64
| |
MIPS | KVM_REG_MIPS_R0 | 64
...
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -545,6 +545,7 @@ struct kvm_vcpu_arch {
u64 tfiar;

u32 cr_tm;
+ u64 xer_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -587,6 +587,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)

/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -584,6 +584,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+ DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1186,6 +1186,9 @@ static int kvmppc_get_one_reg_hv(struct
case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm);
break;
+ case KVM_REG_PPC_TM_XER:
+ *val = get_reg_val(id, vcpu->arch.xer_tm);
+ break;
case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm);
break;
@@ -1393,6 +1396,9 @@ static int kvmppc_set_one_reg_hv(struct
case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_TM_XER:
+ vcpu->arch.xer_tm = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val);
break;
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2514,11 +2514,13 @@ kvmppc_save_tm:
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
+ mfxer r11
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
+ std r11, VCPU_XER_TM(r9)

/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
@@ -2611,11 +2613,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4)
+ ld r10, VCPU_XER_TM(r4)
mtlr r5
mtcr r6
mtctr r7
mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9
+ mtxer r10

/*
* Load up PPR and DSCR values but don't put them in the actual SPRs