2021-09-27 19:40:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2021-11-07 11:47:06 +08:00
|
|
|
/*
|
2021-09-27 19:40:14 +08:00
|
|
|
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Atish Patra <atish.patra@wdc.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/sbi.h>
|
2021-11-18 16:39:08 +08:00
|
|
|
#include <asm/kvm_vcpu_sbi.h>
|
2021-09-27 19:40:14 +08:00
|
|
|
|
2021-11-18 16:39:08 +08:00
|
|
|
static int kvm_linux_err_map_sbi(int err)
|
|
|
|
{
|
|
|
|
switch (err) {
|
|
|
|
case 0:
|
|
|
|
return SBI_SUCCESS;
|
|
|
|
case -EPERM:
|
|
|
|
return SBI_ERR_DENIED;
|
|
|
|
case -EINVAL:
|
|
|
|
return SBI_ERR_INVALID_PARAM;
|
|
|
|
case -EFAULT:
|
|
|
|
return SBI_ERR_INVALID_ADDRESS;
|
|
|
|
case -EOPNOTSUPP:
|
|
|
|
return SBI_ERR_NOT_SUPPORTED;
|
2021-11-18 16:39:12 +08:00
|
|
|
case -EALREADY:
|
|
|
|
return SBI_ERR_ALREADY_AVAILABLE;
|
2021-11-18 16:39:08 +08:00
|
|
|
default:
|
|
|
|
return SBI_ERR_FAILURE;
|
|
|
|
};
|
|
|
|
}
|
2021-09-27 19:40:14 +08:00
|
|
|
|
2022-08-14 22:12:36 +08:00
|
|
|
#ifndef CONFIG_RISCV_SBI_V01
|
2021-11-18 16:39:09 +08:00
|
|
|
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
|
|
|
|
.extid_start = -1UL,
|
|
|
|
.extid_end = -1UL,
|
|
|
|
.handler = NULL,
|
|
|
|
};
|
|
|
|
#endif
|
2021-11-18 16:39:11 +08:00
|
|
|
|
2021-11-18 16:39:09 +08:00
|
|
|
static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
|
|
|
|
&vcpu_sbi_ext_v01,
|
2021-11-18 16:39:10 +08:00
|
|
|
&vcpu_sbi_ext_base,
|
2021-11-18 16:39:11 +08:00
|
|
|
&vcpu_sbi_ext_time,
|
|
|
|
&vcpu_sbi_ext_ipi,
|
|
|
|
&vcpu_sbi_ext_rfence,
|
2022-01-31 13:23:13 +08:00
|
|
|
&vcpu_sbi_ext_srst,
|
2021-11-18 16:39:12 +08:00
|
|
|
&vcpu_sbi_ext_hsm,
|
2021-11-26 13:18:41 +08:00
|
|
|
&vcpu_sbi_ext_experimental,
|
|
|
|
&vcpu_sbi_ext_vendor,
|
2021-11-18 16:39:09 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
2021-09-27 19:40:14 +08:00
|
|
|
{
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
|
|
|
|
vcpu->arch.sbi_context.return_handled = 0;
|
|
|
|
vcpu->stat.ecall_exit_stat++;
|
|
|
|
run->exit_reason = KVM_EXIT_RISCV_SBI;
|
|
|
|
run->riscv_sbi.extension_id = cp->a7;
|
|
|
|
run->riscv_sbi.function_id = cp->a6;
|
|
|
|
run->riscv_sbi.args[0] = cp->a0;
|
|
|
|
run->riscv_sbi.args[1] = cp->a1;
|
|
|
|
run->riscv_sbi.args[2] = cp->a2;
|
|
|
|
run->riscv_sbi.args[3] = cp->a3;
|
|
|
|
run->riscv_sbi.args[4] = cp->a4;
|
|
|
|
run->riscv_sbi.args[5] = cp->a5;
|
|
|
|
run->riscv_sbi.ret[0] = cp->a0;
|
|
|
|
run->riscv_sbi.ret[1] = cp->a1;
|
|
|
|
}
|
|
|
|
|
2022-01-31 12:59:31 +08:00
|
|
|
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *run,
|
KVM: fix bad user ABI for KVM_EXIT_SYSTEM_EVENT
When KVM_EXIT_SYSTEM_EVENT was introduced, it included a flags
member that at the time was unused. Unfortunately this extensibility
mechanism has several issues:
- x86 is not writing the member, so it would not be possible to use it
on x86 except for new events
- the member is not aligned to 64 bits, so the definition of the
uAPI struct is incorrect for 32- on 64-bit userspace. This is a
problem for RISC-V, which supports CONFIG_KVM_COMPAT, but fortunately
usage of flags was only introduced in 5.18.
Since padding has to be introduced, place a new field in there
that tells if the flags field is valid. To allow further extensibility,
in fact, change flags to an array of 16 values, and store how many
of the values are valid. The availability of the new ndata field
is tied to a system capability; all architectures are changed to
fill in the field.
To avoid breaking compilation of userspace that was using the flags
field, provide a userspace-only union to overlap flags with data[0].
The new field is placed at the same offset for both 32- and 64-bit
userspace.
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Gonda <pgonda@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reported-by: kernel test robot <lkp@intel.com>
Message-Id: <20220422103013.34832-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-04-22 18:30:13 +08:00
|
|
|
u32 type, u64 reason)
|
2022-01-31 12:59:31 +08:00
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
struct kvm_vcpu *tmp;
|
|
|
|
|
|
|
|
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
|
|
|
|
tmp->arch.power_off = true;
|
|
|
|
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
|
|
|
|
|
|
|
|
memset(&run->system_event, 0, sizeof(run->system_event));
|
|
|
|
run->system_event.type = type;
|
KVM: fix bad user ABI for KVM_EXIT_SYSTEM_EVENT
When KVM_EXIT_SYSTEM_EVENT was introduced, it included a flags
member that at the time was unused. Unfortunately this extensibility
mechanism has several issues:
- x86 is not writing the member, so it would not be possible to use it
on x86 except for new events
- the member is not aligned to 64 bits, so the definition of the
uAPI struct is incorrect for 32- on 64-bit userspace. This is a
problem for RISC-V, which supports CONFIG_KVM_COMPAT, but fortunately
usage of flags was only introduced in 5.18.
Since padding has to be introduced, place a new field in there
that tells if the flags field is valid. To allow further extensibility,
in fact, change flags to an array of 16 values, and store how many
of the values are valid. The availability of the new ndata field
is tied to a system capability; all architectures are changed to
fill in the field.
To avoid breaking compilation of userspace that was using the flags
field, provide a userspace-only union to overlap flags with data[0].
The new field is placed at the same offset for both 32- and 64-bit
userspace.
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Gonda <pgonda@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reported-by: kernel test robot <lkp@intel.com>
Message-Id: <20220422103013.34832-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-04-22 18:30:13 +08:00
|
|
|
run->system_event.ndata = 1;
|
|
|
|
run->system_event.data[0] = reason;
|
2022-01-31 12:59:31 +08:00
|
|
|
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
|
|
|
}
|
|
|
|
|
2021-09-27 19:40:14 +08:00
|
|
|
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
|
|
|
|
/* Handle SBI return only once */
|
|
|
|
if (vcpu->arch.sbi_context.return_handled)
|
|
|
|
return 0;
|
|
|
|
vcpu->arch.sbi_context.return_handled = 1;
|
|
|
|
|
|
|
|
/* Update return values */
|
|
|
|
cp->a0 = run->riscv_sbi.ret[0];
|
|
|
|
cp->a1 = run->riscv_sbi.ret[1];
|
|
|
|
|
|
|
|
/* Move to next instruction */
|
|
|
|
vcpu->arch.guest_context.sepc += 4;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-18 16:39:08 +08:00
|
|
|
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
|
|
|
|
if (sbi_ext[i]->extid_start <= extid &&
|
|
|
|
sbi_ext[i]->extid_end >= extid)
|
|
|
|
return sbi_ext[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
bool next_sepc = true;
|
|
|
|
bool userspace_exit = false;
|
|
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
|
|
const struct kvm_vcpu_sbi_extension *sbi_ext;
|
|
|
|
struct kvm_cpu_trap utrap = { 0 };
|
|
|
|
unsigned long out_val = 0;
|
|
|
|
bool ext_is_v01 = false;
|
|
|
|
|
|
|
|
sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
|
|
|
|
if (sbi_ext && sbi_ext->handler) {
|
2021-11-18 16:39:09 +08:00
|
|
|
#ifdef CONFIG_RISCV_SBI_V01
|
2021-11-18 16:39:08 +08:00
|
|
|
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
|
|
|
|
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
|
|
|
|
ext_is_v01 = true;
|
2021-11-18 16:39:09 +08:00
|
|
|
#endif
|
2021-11-18 16:39:08 +08:00
|
|
|
ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
|
|
|
|
} else {
|
2021-09-27 19:40:14 +08:00
|
|
|
/* Return error for unsupported SBI calls */
|
|
|
|
cp->a0 = SBI_ERR_NOT_SUPPORTED;
|
2021-11-18 16:39:08 +08:00
|
|
|
goto ecall_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle special error cases i.e trap, exit or userspace forward */
|
|
|
|
if (utrap.scause) {
|
|
|
|
/* No need to increment sepc or exit ioctl loop */
|
|
|
|
ret = 1;
|
|
|
|
utrap.sepc = cp->sepc;
|
|
|
|
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
|
|
|
|
next_sepc = false;
|
|
|
|
goto ecall_done;
|
2021-10-21 19:57:06 +08:00
|
|
|
}
|
2021-09-27 19:40:14 +08:00
|
|
|
|
2021-11-18 16:39:08 +08:00
|
|
|
/* Exit ioctl loop or Propagate the error code the guest */
|
|
|
|
if (userspace_exit) {
|
|
|
|
next_sepc = false;
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
/**
|
|
|
|
* SBI extension handler always returns an Linux error code. Convert
|
|
|
|
* it to the SBI specific error code that can be propagated the SBI
|
|
|
|
* caller.
|
|
|
|
*/
|
|
|
|
ret = kvm_linux_err_map_sbi(ret);
|
|
|
|
cp->a0 = ret;
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
ecall_done:
|
2021-09-27 19:40:14 +08:00
|
|
|
if (next_sepc)
|
|
|
|
cp->sepc += 4;
|
2021-11-18 16:39:08 +08:00
|
|
|
if (!ext_is_v01)
|
|
|
|
cp->a1 = out_val;
|
2021-09-27 19:40:14 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|