On 8/15/21 2:13 AM, Gavin Shan wrote:
The owner of the SDEI event, like asynchronous page fault, needowner is not a terminology used in the SDEI spec
know the state of injected SDEI event. This supports SDEI events/need know the state of injected/to know the state of the injected
state updating by introducing notifier mechanism. It's notablea notifier mechanism
the notifier (handler) should be capable of migration.I don't understand the last sentence
why is the notifier attached to the exposed event and not to the
Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_sdei.h | 12 +++++++
arch/arm64/include/uapi/asm/kvm_sdei.h | 1 +
arch/arm64/kvm/sdei.c | 45 +++++++++++++++++++++++++-
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
index 7f5f5ad689e6..19f2d9b91f85 100644
--- a/arch/arm64/include/asm/kvm_sdei.h
+++ b/arch/arm64/include/asm/kvm_sdei.h
@@ -16,6 +16,16 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+struct kvm_vcpu;
+
+typedef void (*kvm_sdei_notifier)(struct kvm_vcpu *vcpu,
+ unsigned long num,
+ unsigned int state);
+enum {
+ KVM_SDEI_NOTIFY_DELIVERED,
+ KVM_SDEI_NOTIFY_COMPLETED,
+};
+
struct kvm_sdei_event {
struct kvm_sdei_event_state state;
struct kvm *kvm;
@@ -112,6 +122,8 @@ KVM_SDEI_FLAG_FUNC(enabled)
void kvm_sdei_init_vm(struct kvm *kvm);
void kvm_sdei_create_vcpu(struct kvm_vcpu *vcpu);
int kvm_sdei_hypercall(struct kvm_vcpu *vcpu);
+int kvm_sdei_register_notifier(struct kvm *kvm, unsigned long num,
+ kvm_sdei_notifier notifier);
void kvm_sdei_deliver(struct kvm_vcpu *vcpu);
void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu);
void kvm_sdei_destroy_vm(struct kvm *kvm);
diff --git a/arch/arm64/include/uapi/asm/kvm_sdei.h b/arch/arm64/include/uapi/asm/kvm_sdei.h
index 8928027023f6..4ef661d106fe 100644
--- a/arch/arm64/include/uapi/asm/kvm_sdei.h
+++ b/arch/arm64/include/uapi/asm/kvm_sdei.h
@@ -23,6 +23,7 @@ struct kvm_sdei_event_state {
__u8 type;
__u8 signaled;
__u8 priority;
+ __u64 notifier;
registered or even vcpu event? This needs to be motivated.
Also as commented earlier I really think we first need to agree on the
uapi and get a consensus on it as it must be right on the 1st shot. In
that prospect maybe introduce a patch dedicated to the uapi and document
it properly, including the way the end user is supposed to use it.
Another way to proceed would be to not support migration at the moment,
mature the API and then introduce migration support later. Would it make
sense? For instance, in the past in-kernel ITS emulation was first
introduced without migration support.
};
struct kvm_sdei_kvm_event_state {
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 1e8e213c9d70..5f7a37dcaa77 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -314,9 +314,11 @@ static unsigned long kvm_sdei_hypercall_complete(struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+ struct kvm_sdei_event *kse = NULL;
struct kvm_sdei_kvm_event *kske = NULL;
struct kvm_sdei_vcpu_event *ksve = NULL;
struct kvm_sdei_vcpu_regs *regs;
+ kvm_sdei_notifier notifier;
unsigned long ret = SDEI_SUCCESS;
int index;
@@ -349,6 +351,13 @@ static unsigned long kvm_sdei_hypercall_complete(struct kvm_vcpu *vcpu,
*vcpu_cpsr(vcpu) = regs->pstate;
*vcpu_pc(vcpu) = regs->pc;
+ /* Notifier */
+ kske = ksve->kske;
+ kse = kske->kse;
+ notifier = (kvm_sdei_notifier)(kse->state.notifier);
+ if (notifier)
+ notifier(vcpu, kse->state.num, KVM_SDEI_NOTIFY_COMPLETED);
+
/* Inject interrupt if needed */
if (resume)
kvm_inject_irq(vcpu);
@@ -358,7 +367,6 @@ static unsigned long kvm_sdei_hypercall_complete(struct kvm_vcpu *vcpu,
* event state as it's not destroyed because of the reference
* count.
*/
- kske = ksve->kske;
ksve->state.refcount--;
kske->state.refcount--;
if (!ksve->state.refcount) {
@@ -746,6 +754,35 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
return 1;
}
+int kvm_sdei_register_notifier(struct kvm *kvm,
+ unsigned long num,
+ kvm_sdei_notifier notifier)
+{
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_event *kse = NULL;
+ int ret = 0;
+
+ if (!ksdei) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock(&ksdei->lock);
+
+ kse = kvm_sdei_find_event(kvm, num);
+ if (!kse) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ kse->state.notifier = (unsigned long)notifier;
+
+unlock:
+ spin_unlock(&ksdei->lock);
+out:
+ return ret;
+}
+
void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
@@ -755,6 +792,7 @@ void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
struct kvm_sdei_kvm_event *kske = NULL;
struct kvm_sdei_vcpu_event *ksve = NULL;
struct kvm_sdei_vcpu_regs *regs = NULL;
+ kvm_sdei_notifier notifier;
unsigned long pstate;
int index = 0;
@@ -826,6 +864,11 @@ void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) = pstate;
*vcpu_pc(vcpu) = kske->state.entries[index];
+ /* Notifier */
+ notifier = (kvm_sdei_notifier)(kse->state.notifier);
+ if (notifier)
+ notifier(vcpu, kse->state.num, KVM_SDEI_NOTIFY_DELIVERED);
+
unlock:
spin_unlock(&vsdei->lock);
}