diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index c8197d60b..e95ebb9ea 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -93,6 +93,10 @@ bool is_postlaunched_vm(const struct acrn_vm *vm) return (get_vm_config(vm->vm_id)->load_order == POST_LAUNCHED_VM); } +bool is_valid_postlaunched_vmid(uint16_t vm_id) +{ + return ((vm_id < CONFIG_MAX_VM_NUM) && is_postlaunched_vm(get_vm_from_vmid(vm_id))); +} /** * @pre vm != NULL * @pre vm->vmid < CONFIG_MAX_VM_NUM diff --git a/hypervisor/arch/x86/guest/vmcall.c b/hypervisor/arch/x86/guest/vmcall.c index 70d89d715..97ea61821 100644 --- a/hypervisor/arch/x86/guest/vmcall.c +++ b/hypervisor/arch/x86/guest/vmcall.c @@ -26,7 +26,6 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) /* hypercall param1 is a relative vm id from SOS view */ uint16_t relative_vm_id = (uint16_t)param1; uint16_t vm_id = rel_vmid_2_vmid(sos_vm->vm_id, relative_vm_id); - bool vmid_is_valid = (vm_id < CONFIG_MAX_VM_NUM) ? true : false; int32_t ret = -1; switch (hypcall_id) { @@ -52,28 +51,28 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_DESTROY_VM: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_destroy_vm(vm_id); } break; case HC_START_VM: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_start_vm(vm_id); } break; case HC_RESET_VM: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_reset_vm(vm_id); } break; case HC_PAUSE_VM: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_pause_vm(vm_id); } break; @@ -84,14 +83,14 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_SET_VCPU_REGS: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_set_vcpu_regs(sos_vm, vm_id, param2); } break; case HC_SET_IRQLINE: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_set_irqline(sos_vm, vm_id, (struct acrn_irqline_ops *)¶m2); } @@ -99,14 +98,14 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_INJECT_MSI: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_inject_msi(sos_vm, vm_id, param2); } break; case HC_SET_IOREQ_BUFFER: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_set_ioreq_buffer(sos_vm, vm_id, param2); } break; @@ -114,7 +113,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_NOTIFY_REQUEST_FINISH: /* param1: relative vmid to sos, vm_id: absolute vmid * param2: vcpu_id */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_notify_ioreq_finish(vm_id, (uint16_t)param2); } @@ -126,7 +125,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_VM_WRITE_PROTECT_PAGE: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_write_protect_page(sos_vm, vm_id, param2); } break; @@ -141,49 +140,49 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_VM_GPA2HPA: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if ((vm_id < CONFIG_MAX_VM_NUM) && !is_prelaunched_vm(get_vm_from_vmid(vm_id))) { ret = hcall_gpa_to_hpa(sos_vm, vm_id, param2); } break; case HC_ASSIGN_PCIDEV: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_assign_pcidev(sos_vm, vm_id, param2); } break; case HC_DEASSIGN_PCIDEV: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_deassign_pcidev(sos_vm, vm_id, param2); } break; case HC_ASSIGN_MMIODEV: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_assign_mmiodev(sos_vm, vm_id, param2); } break; case HC_DEASSIGN_MMIODEV: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_deassign_mmiodev(sos_vm, vm_id, param2); } break; case HC_SET_PTDEV_INTR_INFO: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_set_ptdev_intr_info(sos_vm, vm_id, param2); } break; case HC_RESET_PTDEV_INTR_INFO: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_reset_ptdev_intr_info(sos_vm, vm_id, param2); } break; @@ -194,7 +193,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) case HC_VM_INTR_MONITOR: /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (vmid_is_valid) { + if (is_valid_postlaunched_vmid(vm_id)) { ret = hcall_vm_intr_monitor(sos_vm, vm_id, param2); } break; diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 13b19827e..284316da0 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -228,7 +228,7 @@ int32_t hcall_destroy_vm(uint16_t vmid) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); get_vm_lock(target_vm); - if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (is_paused_vm(target_vm)) { /* TODO: check target_vm guest_flags */ ret = shutdown_vm(target_vm); } @@ -253,7 +253,7 @@ int32_t hcall_start_vm(uint16_t vmid) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); get_vm_lock(target_vm); - if ((is_created_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { + if ((is_created_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { /* TODO: check target_vm guest_flags */ start_vm(target_vm); ret = 0; @@ -280,7 +280,7 @@ int32_t hcall_pause_vm(uint16_t vmid) int32_t ret = -1; get_vm_lock(target_vm); - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { /* TODO: check target_vm guest_flags */ pause_vm(target_vm); ret = 0; @@ -307,7 +307,7 @@ int32_t hcall_reset_vm(uint16_t vmid) int32_t ret = -1; get_vm_lock(target_vm); - if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (is_paused_vm(target_vm)) { /* TODO: check target_vm guest_flags */ ret = reset_vm(target_vm); } @@ -339,8 +339,7 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param) get_vm_lock(target_vm); /* Only allow setup init ctx while target_vm is inactive */ - if ((!is_poweroff_vm(target_vm)) && (param != 0U) && (is_postlaunched_vm(target_vm)) && - (target_vm->state != VM_RUNNING)) { + if ((!is_poweroff_vm(target_vm)) && (param != 0U) && (target_vm->state != VM_RUNNING)) { if (copy_from_gpa(vm, &vcpu_regs, param, sizeof(vcpu_regs)) != 0) { } else if (vcpu_regs.vcpu_id >= MAX_VCPUS_PER_VM) { pr_err("%s: invalid vcpu_id for set_vcpu_regs\n", __func__); @@ -378,7 +377,7 @@ int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid, struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { if (ops->gsi < get_vm_gsicount(vm)) { if (ops->gsi < vpic_pincount()) { /* @@ -467,7 +466,7 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param) int32_t ret = -1; struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { struct acrn_msi_entry msi; if (copy_from_gpa(vm, &msi, param, sizeof(msi)) == 0) { @@ -525,7 +524,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param int32_t ret = -1; get_vm_lock(target_vm); - if (is_created_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (is_created_vm(target_vm)) { struct acrn_set_ioreq_buffer iobuf; if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) == 0) { @@ -569,7 +568,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id) int32_t ret = -1; /* make sure we have set req_buf */ - if ((!is_poweroff_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { + if ((!is_poweroff_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { dev_dbg(DBG_LEVEL_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d", vmid, vcpu_id); @@ -787,7 +786,7 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_ struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { struct wp_data wp; if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) == 0) { @@ -820,8 +819,8 @@ int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); (void)memset((void *)&v_gpa2hpa, 0U, sizeof(v_gpa2hpa)); - if (!is_poweroff_vm(target_vm) && (!is_prelaunched_vm(target_vm)) - && (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) == 0)) { + if (!is_poweroff_vm(target_vm) && + (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) == 0)) { v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa); if (v_gpa2hpa.hpa == INVALID_HPA) { pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.", @@ -854,7 +853,7 @@ int32_t hcall_assign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/ - if (is_created_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (is_created_vm(target_vm)) { if (copy_from_gpa(vm, &pcidev, param, sizeof(pcidev)) == 0) { ret = vpci_assign_pcidev(target_vm, &pcidev); } @@ -883,7 +882,7 @@ int32_t hcall_deassign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */ - if ((is_paused_vm(target_vm) || is_created_vm(target_vm)) && is_postlaunched_vm(target_vm)) { + if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) { if (copy_from_gpa(vm, &pcidev, param, sizeof(pcidev)) == 0) { ret = vpci_deassign_pcidev(target_vm, &pcidev); } @@ -912,7 +911,7 @@ int32_t hcall_assign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/ - if (is_created_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (is_created_vm(target_vm)) { if (copy_from_gpa(vm, &mmiodev, param, sizeof(mmiodev)) == 0) { ret = deassign_mmio_dev(vm, &mmiodev); if (ret == 0) { @@ -944,7 +943,7 @@ int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */ - if ((is_paused_vm(target_vm) || is_created_vm(target_vm)) && is_postlaunched_vm(target_vm)) { + if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) { if (copy_from_gpa(vm, &mmiodev, param, sizeof(mmiodev)) == 0) { ret = deassign_mmio_dev(target_vm, &mmiodev); if (ret == 0) { @@ -974,7 +973,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa int32_t ret = -1; struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { struct hc_ptdev_irq irq; if (copy_from_gpa(vm, &irq, param, sizeof(irq)) == 0) { @@ -1026,7 +1025,7 @@ hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param) int32_t ret = -1; struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { struct hc_ptdev_irq irq; if (copy_from_gpa(vm, &irq, param, sizeof(irq)) == 0) { @@ -1162,7 +1161,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param) uint64_t hpa; struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { /* the param for this hypercall is page aligned */ hpa = gpa2hpa(vm, param); if (hpa != INVALID_HPA) { @@ -1208,14 +1207,11 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_callback_vector(const struct acrn_vm *vm, uint64_t param) +int32_t hcall_set_callback_vector(__unused const struct acrn_vm *vm, uint64_t param) { int32_t ret; - if (!is_sos_vm(vm)) { - pr_err("%s: Targeting to service vm", __func__); - ret = -EPERM; - } else if ((param > NR_MAX_VECTOR) || (param < VECTOR_DYNAMIC_START)) { + if ((param > NR_MAX_VECTOR) || (param < VECTOR_DYNAMIC_START)) { pr_err("%s: Invalid passed vector\n", __func__); ret = -EINVAL; } else { diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h index d4e599751..6fb832e94 100644 --- a/hypervisor/include/arch/x86/guest/vm.h +++ b/hypervisor/include/arch/x86/guest/vm.h @@ -225,6 +225,7 @@ bool is_created_vm(const struct acrn_vm *vm); bool is_paused_vm(const struct acrn_vm *vm); bool is_sos_vm(const struct acrn_vm *vm); bool is_postlaunched_vm(const struct acrn_vm *vm); +bool is_valid_postlaunched_vmid(uint16_t vm_id); bool is_prelaunched_vm(const struct acrn_vm *vm); uint16_t get_vmid_by_uuid(const uint8_t *uuid); struct acrn_vm *get_vm_from_vmid(uint16_t vm_id); diff --git a/hypervisor/include/common/hypercall.h b/hypervisor/include/common/hypercall.h index ec30d36cb..34ff6d6eb 100644 --- a/hypervisor/include/common/hypercall.h +++ b/hypervisor/include/common/hypercall.h @@ -435,7 +435,7 @@ int32_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu); * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_callback_vector(const struct acrn_vm *vm, uint64_t param); +int32_t hcall_set_callback_vector(__unused const struct acrn_vm *vm, uint64_t param); /** * @}