2018-05-17 16:47:54 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2018 Intel Corporation.
|
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <hypervisor.h>
|
|
|
|
|
2018-07-17 14:16:06 +08:00
|
|
|
#define ACRN_DBG_IOREQUEST 6U
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-09-25 15:45:02 +08:00
|
|
|
uint32_t acrn_vhm_vector = VECTOR_VIRT_IRQ_VHM;
|
|
|
|
|
2018-05-17 16:47:54 +08:00
|
|
|
static void fire_vhm_interrupt(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
|
2018-07-09 19:38:53 +08:00
|
|
|
* otherwise, send IPI hardcoded to BOOT_CPU_ID
|
2018-05-17 16:47:54 +08:00
|
|
|
*/
|
2018-11-05 13:28:23 +08:00
|
|
|
struct acrn_vm *vm0;
|
2018-11-05 13:25:25 +08:00
|
|
|
struct acrn_vcpu *vcpu;
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-07-13 11:38:27 +08:00
|
|
|
vm0 = get_vm_from_vmid(0U);
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-07-17 14:16:06 +08:00
|
|
|
vcpu = vcpu_from_vid(vm0, 0U);
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-09-25 15:45:02 +08:00
|
|
|
vlapic_intr_edge(vcpu, acrn_vhm_vector);
|
2018-05-17 16:47:54 +08:00
|
|
|
}
|
|
|
|
|
2018-12-13 16:55:11 +08:00
|
|
|
#if defined(HV_DEBUG)
|
2018-10-23 11:45:29 +08:00
|
|
|
static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req)
|
2018-05-17 16:47:54 +08:00
|
|
|
{
|
|
|
|
switch (req->type) {
|
|
|
|
case REQ_MMIO:
|
2018-07-03 16:41:54 +08:00
|
|
|
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=MMIO]", vcpu_id);
|
2018-05-17 16:47:54 +08:00
|
|
|
dev_dbg(ACRN_DBG_IOREQUEST,
|
|
|
|
"gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
2018-07-24 19:05:47 +08:00
|
|
|
req->reqs.mmio.address,
|
|
|
|
req->reqs.mmio.direction,
|
|
|
|
req->reqs.mmio.size,
|
|
|
|
req->reqs.mmio.value,
|
2018-05-17 16:47:54 +08:00
|
|
|
req->processed);
|
|
|
|
break;
|
|
|
|
case REQ_PORTIO:
|
2018-07-03 16:41:54 +08:00
|
|
|
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=PORTIO]", vcpu_id);
|
2018-05-17 16:47:54 +08:00
|
|
|
dev_dbg(ACRN_DBG_IOREQUEST,
|
|
|
|
"IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
|
2018-07-24 19:05:47 +08:00
|
|
|
req->reqs.pio.address,
|
|
|
|
req->reqs.pio.direction,
|
|
|
|
req->reqs.pio.size,
|
|
|
|
req->reqs.pio.value,
|
2018-05-17 16:47:54 +08:00
|
|
|
req->processed);
|
|
|
|
break;
|
|
|
|
default:
|
2018-07-03 16:41:54 +08:00
|
|
|
dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=%d] NOT support type",
|
2018-05-17 16:47:54 +08:00
|
|
|
vcpu_id, req->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 16:55:11 +08:00
|
|
|
#endif
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-10-12 09:27:17 +08:00
|
|
|
/**
|
|
|
|
* @brief Reset all IO requests status of the VM
|
|
|
|
*
|
|
|
|
* @param vm The VM whose IO requests to be reset
|
|
|
|
*
|
2018-11-21 17:27:56 +08:00
|
|
|
* @return None
|
2018-10-12 09:27:17 +08:00
|
|
|
*/
|
|
|
|
void reset_vm_ioreqs(struct acrn_vm *vm)
|
|
|
|
{
|
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
|
2018-12-13 02:01:50 +08:00
|
|
|
set_vhm_req_state(vm, i, REQ_STATE_FREE);
|
2018-10-12 09:27:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-19 11:30:09 +08:00
|
|
|
static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
|
hv: Add IO request completion polling feature
This patch introduce a new mode of IO request completion, polling mode.
Now, the sketch of ioreq process can be,
A. UOS vcpu0 generate PIO/MMIO ->
B. pcpu1(vcpu0 of UOS) trap into HV ->
C. pcpu1 build ioreq, send IPI and enter idle ->
D.1 pcpu0(vcpu0 of SOS) response IPI,
D.2 pcpu0 handle the ioreq in HV, kernel, DM,
D.3 pcpu0 mark ioreq as complete,
D.4 pcpu0 hypercall to enter HV ->
E.1 pcpu0 send IPI to wake pcpu1 up
E.2 UOS vcpu0 continue running
With this change, it skips D.4, E.1 steps. In step C, pcpu1 will enter a
polling ioreq state idle after send out the IPI.
It can save about ~5000 cpu cycles.
In polling mode, we do the polling in idle instead of pause cpu all the
time. It will consume more power. A better way is to use monitor/mwait
instructions which can put cpu into a sleep state with monitoring a
memory address. Unfortunately, APL has bug with monitor. We can gather
all ioreqs state into one monitorable memory and take advantage of
monitor/mwait for future platform.
The way polling or notification is per VM. We can config VMs in
different mode. By default, IO request completion will use notification
mode for all VMs. We can switch it by Kconfig.
Tracked-On: #1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2018-10-11 14:10:30 +08:00
|
|
|
{
|
2018-12-13 02:01:50 +08:00
|
|
|
return (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE);
|
hv: Add IO request completion polling feature
This patch introduce a new mode of IO request completion, polling mode.
Now, the sketch of ioreq process can be,
A. UOS vcpu0 generate PIO/MMIO ->
B. pcpu1(vcpu0 of UOS) trap into HV ->
C. pcpu1 build ioreq, send IPI and enter idle ->
D.1 pcpu0(vcpu0 of SOS) response IPI,
D.2 pcpu0 handle the ioreq in HV, kernel, DM,
D.3 pcpu0 mark ioreq as complete,
D.4 pcpu0 hypercall to enter HV ->
E.1 pcpu0 send IPI to wake pcpu1 up
E.2 UOS vcpu0 continue running
With this change, it skips D.4, E.1 steps. In step C, pcpu1 will enter a
polling ioreq state idle after send out the IPI.
It can save about ~5000 cpu cycles.
In polling mode, we do the polling in idle instead of pause cpu all the
time. It will consume more power. A better way is to use monitor/mwait
instructions which can put cpu into a sleep state with monitoring a
memory address. Unfortunately, APL has bug with monitor. We can gather
all ioreqs state into one monitorable memory and take advantage of
monitor/mwait for future platform.
The way polling or notification is per VM. We can config VMs in
different mode. By default, IO request completion will use notification
mode for all VMs. We can switch it by Kconfig.
Tracked-On: #1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2018-10-11 14:10:30 +08:00
|
|
|
}
|
|
|
|
|
2018-10-30 19:44:02 +08:00
|
|
|
/**
|
|
|
|
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
|
|
|
|
*
|
|
|
|
* @param vcpu The virtual CPU that triggers the MMIO access
|
|
|
|
* @param io_req The I/O request holding the details of the MMIO access
|
|
|
|
*
|
2018-10-11 09:33:13 +08:00
|
|
|
* @pre vcpu != NULL && io_req != NULL
|
|
|
|
*/
|
2018-11-05 13:25:25 +08:00
|
|
|
int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request *io_req)
|
2018-05-17 16:47:54 +08:00
|
|
|
{
|
2018-05-28 22:57:09 +08:00
|
|
|
union vhm_request_buffer *req_buf = NULL;
|
2018-07-24 19:05:47 +08:00
|
|
|
struct vhm_request *vhm_req;
|
2018-12-21 10:23:44 +08:00
|
|
|
bool is_polling = false;
|
2018-06-28 16:27:12 +08:00
|
|
|
uint16_t cur;
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-10-11 09:33:13 +08:00
|
|
|
if (vcpu->vm->sw.io_shared_page == NULL) {
|
2018-05-17 16:47:54 +08:00
|
|
|
return -EINVAL;
|
2018-07-13 06:02:55 +08:00
|
|
|
}
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-12-13 02:01:50 +08:00
|
|
|
ASSERT(get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_FREE,
|
|
|
|
"VHM request buffer is busy");
|
|
|
|
|
2018-05-28 22:57:09 +08:00
|
|
|
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
2018-05-17 16:47:54 +08:00
|
|
|
cur = vcpu->vcpu_id;
|
2018-07-27 21:24:39 +08:00
|
|
|
|
2018-12-13 16:55:11 +08:00
|
|
|
stac();
|
|
|
|
vhm_req = &req_buf->req_queue[cur];
|
2018-07-27 21:24:39 +08:00
|
|
|
/* ACRN insert request to VHM and inject upcall */
|
2018-07-24 19:05:47 +08:00
|
|
|
vhm_req->type = io_req->type;
|
|
|
|
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
|
|
|
|
&io_req->reqs, sizeof(union vhm_io_request));
|
hv: Add IO request completion polling feature
This patch introduce a new mode of IO request completion, polling mode.
Now, the sketch of ioreq process can be,
A. UOS vcpu0 generate PIO/MMIO ->
B. pcpu1(vcpu0 of UOS) trap into HV ->
C. pcpu1 build ioreq, send IPI and enter idle ->
D.1 pcpu0(vcpu0 of SOS) response IPI,
D.2 pcpu0 handle the ioreq in HV, kernel, DM,
D.3 pcpu0 mark ioreq as complete,
D.4 pcpu0 hypercall to enter HV ->
E.1 pcpu0 send IPI to wake pcpu1 up
E.2 UOS vcpu0 continue running
With this change, it skips D.4, E.1 steps. In step C, pcpu1 will enter a
polling ioreq state idle after send out the IPI.
It can save about ~5000 cpu cycles.
In polling mode, we do the polling in idle instead of pause cpu all the
time. It will consume more power. A better way is to use monitor/mwait
instructions which can put cpu into a sleep state with monitoring a
memory address. Unfortunately, APL has bug with monitor. We can gather
all ioreqs state into one monitorable memory and take advantage of
monitor/mwait for future platform.
The way polling or notification is per VM. We can config VMs in
different mode. By default, IO request completion will use notification
mode for all VMs. We can switch it by Kconfig.
Tracked-On: #1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2018-10-11 14:10:30 +08:00
|
|
|
if (vcpu->vm->sw.is_completion_polling) {
|
|
|
|
vhm_req->completion_polling = 1U;
|
2018-12-21 10:23:44 +08:00
|
|
|
is_polling = true;
|
hv: Add IO request completion polling feature
This patch introduce a new mode of IO request completion, polling mode.
Now, the sketch of ioreq process can be,
A. UOS vcpu0 generate PIO/MMIO ->
B. pcpu1(vcpu0 of UOS) trap into HV ->
C. pcpu1 build ioreq, send IPI and enter idle ->
D.1 pcpu0(vcpu0 of SOS) response IPI,
D.2 pcpu0 handle the ioreq in HV, kernel, DM,
D.3 pcpu0 mark ioreq as complete,
D.4 pcpu0 hypercall to enter HV ->
E.1 pcpu0 send IPI to wake pcpu1 up
E.2 UOS vcpu0 continue running
With this change, it skips D.4, E.1 steps. In step C, pcpu1 will enter a
polling ioreq state idle after send out the IPI.
It can save about ~5000 cpu cycles.
In polling mode, we do the polling in idle instead of pause cpu all the
time. It will consume more power. A better way is to use monitor/mwait
instructions which can put cpu into a sleep state with monitoring a
memory address. Unfortunately, APL has bug with monitor. We can gather
all ioreqs state into one monitorable memory and take advantage of
monitor/mwait for future platform.
The way polling or notification is per VM. We can config VMs in
different mode. By default, IO request completion will use notification
mode for all VMs. We can switch it by Kconfig.
Tracked-On: #1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2018-10-11 14:10:30 +08:00
|
|
|
}
|
2018-12-13 16:55:11 +08:00
|
|
|
clac();
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-12-21 10:23:44 +08:00
|
|
|
/* pause vcpu in notification mode , wait for VHM to handle the MMIO request.
|
2018-06-05 15:10:12 +08:00
|
|
|
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
|
2018-11-29 00:06:14 +08:00
|
|
|
* should fix the race issue between req.processed update and vcpu pause
|
2018-06-05 15:10:12 +08:00
|
|
|
*/
|
2018-12-21 10:23:44 +08:00
|
|
|
if (!is_polling) {
|
|
|
|
pause_vcpu(vcpu, VCPU_PAUSED);
|
|
|
|
}
|
2018-06-05 15:10:12 +08:00
|
|
|
|
2018-07-27 21:24:39 +08:00
|
|
|
/* Must clear the signal before we mark req as pending
|
|
|
|
* Once we mark it pending, VHM may process req and signal us
|
2018-05-17 16:47:54 +08:00
|
|
|
* before we perform upcall.
|
|
|
|
* because VHM can work in pulling mode without wait for upcall
|
|
|
|
*/
|
2018-12-13 02:01:50 +08:00
|
|
|
set_vhm_req_state(vcpu->vm, vcpu->vcpu_id, REQ_STATE_PENDING);
|
2018-05-17 16:47:54 +08:00
|
|
|
|
2018-12-13 16:55:11 +08:00
|
|
|
#if defined(HV_DEBUG)
|
|
|
|
stac();
|
2018-07-24 19:05:47 +08:00
|
|
|
acrn_print_request(vcpu->vcpu_id, vhm_req);
|
2018-12-13 16:55:11 +08:00
|
|
|
clac();
|
|
|
|
#endif
|
2018-05-17 16:47:54 +08:00
|
|
|
|
|
|
|
/* signal VHM */
|
|
|
|
fire_vhm_interrupt();
|
|
|
|
|
2018-12-21 10:23:44 +08:00
|
|
|
/* Polling completion of the request in polling mode */
|
|
|
|
if (is_polling) {
|
|
|
|
/*
|
|
|
|
* Now, we only have one case that will schedule out this vcpu
|
|
|
|
* from IO completion polling status, it's pause_vcpu to VCPU_ZOMBIE.
|
|
|
|
* In this case, we cannot come back to polling status again. Currently,
|
|
|
|
* it's OK as we needn't handle IO completion in zombie status.
|
|
|
|
*/
|
|
|
|
while (!need_reschedule(vcpu->pcpu_id)) {
|
|
|
|
if (has_complete_ioreq(vcpu)) {
|
|
|
|
/* we have completed ioreq pending */
|
|
|
|
emulate_io_post(vcpu);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
asm_pause();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-17 16:47:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2018-12-13 02:01:50 +08:00
|
|
|
|
|
|
|
uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id)
|
|
|
|
{
|
|
|
|
uint32_t state;
|
|
|
|
union vhm_request_buffer *req_buf = NULL;
|
|
|
|
struct vhm_request *vhm_req;
|
|
|
|
|
|
|
|
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
|
|
|
|
if (req_buf == NULL) {
|
2018-12-19 11:30:09 +08:00
|
|
|
state = 0xffffffffU;
|
|
|
|
} else {
|
|
|
|
stac();
|
|
|
|
vhm_req = &req_buf->req_queue[vhm_req_id];
|
|
|
|
state = atomic_load32(&vhm_req->processed);
|
|
|
|
clac();
|
2018-12-13 02:01:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state)
|
|
|
|
{
|
|
|
|
union vhm_request_buffer *req_buf = NULL;
|
|
|
|
struct vhm_request *vhm_req;
|
|
|
|
|
|
|
|
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
|
2018-12-19 11:30:09 +08:00
|
|
|
if (req_buf != NULL) {
|
|
|
|
stac();
|
|
|
|
vhm_req = &req_buf->req_queue[vhm_req_id];
|
|
|
|
atomic_store32(&vhm_req->processed, state);
|
|
|
|
clac();
|
2018-12-13 02:01:50 +08:00
|
|
|
}
|
|
|
|
}
|