Re: [PATCH v14 1/6] KVM: s390: pv: asynchronous destroy for reboot
From: Claudio Imbrenda
Date: Tue Oct 04 2022 - 12:56:51 EST
On Tue, 4 Oct 2022 15:43:29 +0200
Steffen Eiden <seiden@xxxxxxxxxxxxx> wrote:
> Hi Claudio,
>
> LGTM, but I have some nits.
[...]
> > diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> > index b7ef0b71014d..d0027964a6f5 100644
> > --- a/arch/s390/kvm/kvm-s390.c
> > +++ b/arch/s390/kvm/kvm-s390.c
> > @@ -209,6 +209,8 @@ unsigned int diag9c_forwarding_hz;
> > module_param(diag9c_forwarding_hz, uint, 0644);
> > MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
> >
>
> IMO it would be better to initialize this variable explicitly here.
and that's how it was a few versions ago, but
> You do this later in patch [6/6].
> Whats the reason to do this not at this point.
Janosch asked to split it this way :)
>
>
> > +static int async_destroy;
> > +
> > /*
> > * For now we handle at most 16 double words as this is what the s390 base
> > * kernel handles and stores in the prefix page. If we ever need to go beyond
> > @@ -2504,9 +2506,13 @@ static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
> >
> > static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
> > {
> > + const bool needslock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
> `need_lock` ? or just `lock` ?
hmm, maybe need_lock
> > + void __user *argp = (void __user *)cmd->data;
> > int r = 0;
> > u16 dummy;
> > - void __user *argp = (void __user *)cmd->data;
> > +
> > + if (needslock)
> > + mutex_lock(&kvm->lock);
> >
> > switch (cmd->cmd) {
> > case KVM_PV_ENABLE: {
> > @@ -2540,6 +2546,31 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
> > set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
> > break;
> > }
[...]
> > -/* this should not fail, but if it does, we must not free the donated memory */
> > -int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
> > +/**
> > + * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
> > + * @kvm: the KVM that was associated with this leftover protected VM
> > + * @leftover: details about the leftover protected VM that needs a clean up
> > + * @rc: the RC code of the Destroy Secure Configuration UVC
> > + * @rrc: the RRC code of the Destroy Secure Configuration UVC
> > + *
> > + * Destroy one leftover protected VM.
> > + * On success, kvm->mm->context.protected_count will be decremented atomically
> > + * and all other resources used by the VM will be freed.
> > + *
> > + * Return: 0 in case of success, otherwise 1
> > + */
> > +static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
> > + struct pv_vm_to_be_destroyed *leftover,
> > + u16 *rc, u16 *rrc)
> > {
> > int cc;
> >
> > - cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
> > - UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
> > - WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
> > + cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc); > + KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x",
> *rc, *rrc);
> > + WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
> > + if (cc)
> > + return cc;
> maybe set the handle to zero here to be extra sure.
> `leftover` will get free'd directly after this function,
> but that might change and I assume we do not want to end up with
I don't see a reason why it should change.
> invalid handles hanging arround.
> > /*
> > - * if the mm still has a mapping, make all its pages accessible
> > - * before destroying the guest
> > + * Intentionally leak unusable memory. If the UVC fails, the memory
> > + * used for the VM and its metadata is permanently unusable.
> > + * This can only happen in case of a serious KVM or hardware bug; it
> > + * is not expected to happen in normal operation.
> > */
> > - if (mmget_not_zero(kvm->mm)) {
> > - s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
> > - mmput(kvm->mm);
> > + free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
> > + free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
> > + vfree(leftover->stor_var);
> > + atomic_dec(&kvm->mm->context.protected_count);
> > + return 0;
> > +}
[...]
> > + /* Cleanup all protected VMs in the need_cleanup list */
> > + while (!list_empty(&kvm->arch.pv.need_cleanup)) {
> > + cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
> > + need_zap = true;
> > + if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
> > + cc = 1;
> > + /* do not overwrite a previous error code */
> use UVC_RC_EXECUTED
will fix
> > + if (*rc == 1) {
> > + *rc = _rc;
> > + *rrc = _rrc;
> > + }
> > + }
> > + list_del(&cur->list);
> > + kfree(cur);
> > + }
> > +
> > + /*
> > + * If the mm still has a mapping, try to mark all its pages as
> > + * accessible. The counter should not reach zero before this
> > + * cleanup has been performed.
> > + */
> > + if (need_zap && mmget_not_zero(kvm->mm)) {
> > + s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
> > + mmput(kvm->mm);
> > + }
> > +
> > + /* Now the counter can safely reach 0 */
> > + atomic_dec(&kvm->mm->context.protected_count);
> > + return cc ? -EIO : 0;
> > +}
> > +
> > +/**
> > + * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
> > + * @kvm the VM previously associated with the protected VM
> > + * @rc return value for the RC field of the UVCB
> > + * @rrc return value for the RRC field of the UVCB
> > + *
> > + * Tear down the protected VM that had been previously prepared for teardown
> > + * using kvm_s390_pv_set_aside_vm. Ideally this should be called by
> > + * userspace asynchronously from a separate thread.
> > + *
> > + * Context: kvm->lock must not be held.
> > + *
> > + * Return: 0 in case of success, -EINVAL if no protected VM had been
> > + * prepared for asynchronous teardowm, -EIO in case of other errors.
> > + */
> > +int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
> > +{
> > + struct pv_vm_to_be_destroyed *p;
> > + int ret = 0;
> > +
> > + lockdep_assert_not_held(&kvm->lock);
> > + mutex_lock(&kvm->lock);
> > + p = kvm->arch.pv.set_aside;
> > + kvm->arch.pv.set_aside = NULL;
> > + mutex_unlock(&kvm->lock);
> > + if (!p)
> > + return -EINVAL;
> > +
> > + /* When a fatal signal is received, stop immediately */
> > + if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
> > + goto done;
> > + if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
> > + ret = -EIO;
> > + kfree(p);
> > + p = NULL;
> > +done:
> > + /*
> > + * p is not NULL if we aborted because of a fatal signal, in which
> > + * case queue the leftover for later cleanup.
> > + */
> > + if (p) {
> > + mutex_lock(&kvm->lock);
> > + list_add(&p->list, &kvm->arch.pv.need_cleanup);
> > + mutex_unlock(&kvm->lock);
> > + /* Did not finish, but pretend things went well */
> use UVC_RC_EXECUTED
will fix as well
> > + *rc = 1;
> > + *rrc = 42;
> > + }
> > + return ret;
> > +}
> > +
> > static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
> > struct mm_struct *mm)
> > {
> > diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> > index eed0315a77a6..02602c5c1975 100644
> > --- a/include/uapi/linux/kvm.h
> > +++ b/include/uapi/linux/kvm.h
> > @@ -1739,6 +1739,8 @@ enum pv_cmd_id {
> > KVM_PV_UNSHARE_ALL,
> > KVM_PV_INFO,
> > KVM_PV_DUMP,
> > + KVM_PV_ASYNC_CLEANUP_PREPARE,
> > + KVM_PV_ASYNC_CLEANUP_PERFORM,
> > };
> >
> > struct kvm_pv_cmd {