[PATCH v5 07/22] KVM: arm64: Support SDEI_EVENT_UNREGISTER hypercall
From: Gavin Shan
Date: Tue Mar 22 2022 - 04:09:04 EST
This supports SDEI_EVENT_UNREGISTER hypercall. It's used by the
guest to unregister SDEI event. The SDEI event won't be raised to
the guest after it's unregistered. The SDEI event is disabled
automatically on unregistration.
The currently handled events can't be unregistered. We set the
unregistration pending state for the event so that it can be
unregistered when the event handler is completed by receiving
SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall.
Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/arm64/kvm/sdei.c | 133 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 133 insertions(+)
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 4488d3f044f2..36eda31e0392 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -325,6 +325,135 @@ static unsigned long hypercall_context(struct kvm_vcpu *vcpu)
return ret;
}
+static unsigned long
+unregister_one_event(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ struct kvm_sdei_registered_event *registered_event)
+{
+ struct kvm_vcpu *vcpup;
+ struct kvm_sdei_vcpu *vsdei;
+ struct kvm_sdei_exposed_event *exposed_event;
+ int index;
+ bool pending;
+ unsigned long i, ret = SDEI_SUCCESS;
+
+ /*
+ * Cancel all vcpu events that have been queued, but not dispatched
+ * yet. If the vcpu event has been dispatched, we should mark it
+ * as pending for unregistration. The unregistration will be executed
+ * when the event handler is to be completed.
+ */
+ exposed_event = registered_event->exposed_event;
+ kvm_for_each_vcpu(i, vcpup, kvm) {
+ vsdei = vcpup->arch.sdei;
+ if (!vsdei)
+ continue;
+
+ /*
+ * The private vcpu events are requested to be unregistered
+ * on the specific vcpu or all vcpus. @vcpu is used to
+ * identify the cases. For the shared vcpu events, we need
+ * to unregister them on all vcpus.
+ */
+ if (kvm_sdei_is_private(exposed_event->state.type) &&
+ vcpu && vcpu != vcpup)
+ continue;
+
+ if (registered_event->vcpu_event_count > 0) {
+ spin_lock(&vsdei->lock);
+ pending = remove_all_vcpu_events(vcpup,
+ registered_event->state.num);
+ spin_unlock(&vsdei->lock);
+ } else {
+ pending = false;
+ }
+
+ /*
+ * For the private event, the pending state for unregistration
+ * is scattered and we need to update them individually.
+ * However, that same state for the shared event has to be
+ * updated at once according to @ret after the iteration is
+ * done.
+ */
+ ret = pending ? SDEI_PENDING : ret;
+ if (!kvm_sdei_is_private(exposed_event->state.type))
+ continue;
+
+ index = kvm_sdei_vcpu_index(vcpup, exposed_event);
+ if (pending) {
+ kvm_sdei_set_unregister_pending(registered_event,
+ index);
+ } else {
+ kvm_sdei_clear_enabled(registered_event, index);
+ kvm_sdei_clear_registered(registered_event, index);
+ }
+ }
+
+ /*
+ * Update the pending state for unregistration for the shared event
+ * at once.
+ */
+ if (kvm_sdei_is_shared(exposed_event->state.type)) {
+ index = kvm_sdei_vcpu_index(vcpu, exposed_event);
+ if (ret == SDEI_PENDING) {
+ kvm_sdei_set_unregister_pending(registered_event,
+ index);
+ } else {
+ kvm_sdei_clear_enabled(registered_event, index);
+ kvm_sdei_clear_registered(registered_event, index);
+ }
+ }
+
+ /* Destroy the registered event instance if needed */
+ if (kvm_sdei_none_registered(registered_event))
+ remove_one_registered_event(kvm, registered_event);
+
+ return ret;
+}
+
+static unsigned long hypercall_unregister(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+ struct kvm_sdei_exposed_event *exposed_event;
+ struct kvm_sdei_registered_event *registered_event;
+ unsigned long event_num = smccc_get_arg1(vcpu);
+ int index;
+ unsigned long ret = SDEI_SUCCESS;
+
+ if (!kvm_sdei_is_supported(event_num))
+ return SDEI_INVALID_PARAMETERS;
+
+ spin_lock(&ksdei->lock);
+
+ /* Check if the registered event exists */
+ registered_event = find_registered_event(kvm, event_num);
+ if (!registered_event) {
+ ret = SDEI_INVALID_PARAMETERS;
+ goto unlock;
+ }
+
+ /* Check if the event has been registered */
+ exposed_event = registered_event->exposed_event;
+ index = kvm_sdei_vcpu_index(vcpu, exposed_event);
+ if (!kvm_sdei_is_registered(registered_event, index)) {
+ ret = SDEI_DENIED;
+ goto unlock;
+ }
+
+ /* Check if the event has been pending for unregistration */
+ if (kvm_sdei_is_unregister_pending(registered_event, index)) {
+ ret = SDEI_PENDING;
+ goto unlock;
+ }
+
+ ret = unregister_one_event(kvm, vcpu, registered_event);
+
+unlock:
+ spin_unlock(&ksdei->lock);
+
+ return ret;
+}
+
int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
@@ -365,7 +494,11 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
break;
case SDEI_1_0_FN_SDEI_EVENT_COMPLETE:
case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME:
+ ret = SDEI_NOT_SUPPORTED;
+ break;
case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER:
+ ret = hypercall_unregister(vcpu);
+ break;
case SDEI_1_0_FN_SDEI_EVENT_STATUS:
case SDEI_1_0_FN_SDEI_EVENT_GET_INFO:
case SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET:
--
2.23.0