HV: io: refine state transitions of VHM requests

Instead of using two members for maintaining the state of a VHM request, this
patch replaces the transitions with a single state. Basically the lifecycle of a
VHM request shall be:

    FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...

The structure header of vhm_request has more details of the transitions access
limitations under different states.

Also drop the set but unused member vcpu.ioreq_pending.

For backward-compatibility, the obsolete 'valid' member is still kept and
maintained before SOS and DM adapts to the new state transitions.

v2 -> v3:

    * Use complete_ioreq to mark an I/O request finished in
      dm_emulate_(pio|mmio)_post.

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2018-07-27 21:24:39 +08:00 committed by lijinxia
parent 941eb9db02
commit 17771c0ac2
8 changed files with 144 additions and 51 deletions

View File

@ -117,7 +117,6 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
vcpu->launched = false; vcpu->launched = false;
vcpu->paused_cnt = 0U; vcpu->paused_cnt = 0U;
vcpu->running = 0; vcpu->running = 0;
vcpu->ioreq_pending = 0;
vcpu->arch_vcpu.nr_sipi = 0; vcpu->arch_vcpu.nr_sipi = 0;
vcpu->pending_pre_work = 0U; vcpu->pending_pre_work = 0U;
vcpu->state = VCPU_INIT; vcpu->state = VCPU_INIT;
@ -277,7 +276,6 @@ void reset_vcpu(struct vcpu *vcpu)
vcpu->launched = false; vcpu->launched = false;
vcpu->paused_cnt = 0U; vcpu->paused_cnt = 0U;
vcpu->running = 0; vcpu->running = 0;
vcpu->ioreq_pending = 0;
vcpu->arch_vcpu.nr_sipi = 0; vcpu->arch_vcpu.nr_sipi = 0;
vcpu->pending_pre_work = 0U; vcpu->pending_pre_work = 0U;

View File

@ -613,12 +613,10 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
gpa, gpa,
&data); &data);
mmio->value = (uint64_t)data; mmio->value = (uint64_t)data;
io_req->processed = REQ_STATE_SUCCESS;
} else if (mmio->direction == REQUEST_WRITE) { } else if (mmio->direction == REQUEST_WRITE) {
vioapic_mmio_write(vm, vioapic_mmio_write(vm,
gpa, gpa,
data); data);
io_req->processed = REQ_STATE_SUCCESS;
} else { } else {
/* Can never happen due to the range of direction. */ /* Can never happen due to the range of direction. */
} }
@ -627,6 +625,7 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
ret = -EINVAL; ret = -EINVAL;
} }
io_req->processed = REQ_STATE_COMPLETE;
return ret; return ret;
} }

View File

@ -2069,17 +2069,16 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
gpa, gpa,
&mmio_req->value, &mmio_req->value,
mmio_req->size); mmio_req->size);
io_req->processed = REQ_STATE_SUCCESS;
} else if (mmio_req->direction == REQUEST_WRITE) { } else if (mmio_req->direction == REQUEST_WRITE) {
ret = vlapic_write_mmio_reg(vcpu, ret = vlapic_write_mmio_reg(vcpu,
gpa, gpa,
mmio_req->value, mmio_req->value,
mmio_req->size); mmio_req->size);
io_req->processed = REQ_STATE_SUCCESS;
} else { } else {
/* Can never happen due to the range of mmio_req->direction. */ /* Can never happen due to the range of mmio_req->direction. */
} }
io_req->processed = REQ_STATE_COMPLETE;
return ret; return ret;
} }

View File

@ -9,6 +9,12 @@
#include "guest/instr_emul_wrapper.h" #include "guest/instr_emul_wrapper.h"
#include "guest/instr_emul.h" #include "guest/instr_emul.h"
static void complete_ioreq(struct vhm_request *vhm_req)
{
vhm_req->valid = 0;
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
}
/** /**
* @pre io_req->type == REQ_PORTIO * @pre io_req->type == REQ_PORTIO
*/ */
@ -19,7 +25,7 @@ emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
struct pio_request *pio_req = &io_req->reqs.pio; struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size); uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
if (io_req->processed == REQ_STATE_SUCCESS) { if (io_req->processed == REQ_STATE_COMPLETE) {
if (pio_req->direction == REQUEST_READ) { if (pio_req->direction == REQUEST_READ) {
uint64_t value = (uint64_t)pio_req->value; uint64_t value = (uint64_t)pio_req->value;
int32_t context_idx = vcpu->arch_vcpu.cur_context; int32_t context_idx = vcpu->arch_vcpu.cur_context;
@ -53,11 +59,10 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur]; vhm_req = &req_buf->req_queue[cur];
io_req->processed = vhm_req->processed;
pio_req->value = vhm_req->reqs.pio.value; pio_req->value = vhm_req->reqs.pio.value;
/* VHM emulation data already copy to req, mark to free slot now */ /* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0; complete_ioreq(vhm_req);
return emulate_pio_post(vcpu, io_req); return emulate_pio_post(vcpu, io_req);
} }
@ -70,7 +75,7 @@ int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
int32_t ret; int32_t ret;
struct mmio_request *mmio_req = &io_req->reqs.mmio; struct mmio_request *mmio_req = &io_req->reqs.mmio;
if (io_req->processed == REQ_STATE_SUCCESS) { if (io_req->processed == REQ_STATE_COMPLETE) {
if (mmio_req->direction == REQUEST_READ) { if (mmio_req->direction == REQUEST_READ) {
/* Emulate instruction and update vcpu register set */ /* Emulate instruction and update vcpu register set */
ret = emulate_instruction(vcpu); ret = emulate_instruction(vcpu);
@ -99,38 +104,26 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
vhm_req = &req_buf->req_queue[cur]; vhm_req = &req_buf->req_queue[cur];
mmio_req->value = vhm_req->reqs.mmio.value; mmio_req->value = vhm_req->reqs.mmio.value;
io_req->processed = vhm_req->processed;
/* VHM emulation data already copy to req, mark to free slot now */ /* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0; complete_ioreq(vhm_req);
return emulate_mmio_post(vcpu, io_req); return emulate_mmio_post(vcpu, io_req);
} }
static void complete_ioreq(struct vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
vhm_req->valid = 0;
atomic_store32(&vcpu->ioreq_pending, 0U);
}
void emulate_io_post(struct vcpu *vcpu) void emulate_io_post(struct vcpu *vcpu)
{ {
union vhm_request_buffer *req_buf; union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req; struct vhm_request *vhm_req;
struct io_request *io_req = &vcpu->req;
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page; req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id]; vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
io_req->processed = atomic_load32(&vhm_req->processed);
if ((vhm_req->valid == 0) || if ((vhm_req->valid == 0) ||
((vhm_req->processed != REQ_STATE_SUCCESS) && ((io_req->processed != REQ_STATE_COMPLETE) &&
(vhm_req->processed != REQ_STATE_FAILED))) { (io_req->processed != REQ_STATE_FAILED))) {
return; return;
} }
@ -139,7 +132,7 @@ void emulate_io_post(struct vcpu *vcpu)
* mark ioreq done and don't resume vcpu. * mark ioreq done and don't resume vcpu.
*/ */
if (vcpu->state == VCPU_ZOMBIE) { if (vcpu->state == VCPU_ZOMBIE) {
complete_ioreq(vcpu); complete_ioreq(vhm_req);
return; return;
} }
@ -162,7 +155,7 @@ void emulate_io_post(struct vcpu *vcpu)
default: default:
/* REQ_WP can only be triggered on writes which do not need /* REQ_WP can only be triggered on writes which do not need
* post-work. Just mark the ioreq done. */ * post-work. Just mark the ioreq done. */
complete_ioreq(vcpu); complete_ioreq(vhm_req);
break; break;
} }
@ -222,7 +215,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
} }
/* TODO: failures in the handlers should be reflected /* TODO: failures in the handlers should be reflected
* here. */ * here. */
io_req->processed = REQ_STATE_SUCCESS; io_req->processed = REQ_STATE_COMPLETE;
status = 0; status = 0;
break; break;
} }

View File

@ -334,6 +334,8 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
uint64_t hpa = 0UL; uint64_t hpa = 0UL;
struct acrn_set_ioreq_buffer iobuf; struct acrn_set_ioreq_buffer iobuf;
struct vm *target_vm = get_vm_from_vmid(vmid); struct vm *target_vm = get_vm_from_vmid(vmid);
union vhm_request_buffer *req_buf;
uint16_t i;
if (target_vm == NULL) { if (target_vm == NULL) {
return -1; return -1;
@ -358,6 +360,11 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
target_vm->sw.io_shared_page = HPA2HVA(hpa); target_vm->sw.io_shared_page = HPA2HVA(hpa);
req_buf = target_vm->sw.io_shared_page;
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
atomic_store32(&req_buf->req_queue[i].processed, REQ_STATE_FREE);
}
return ret; return ret;
} }

View File

@ -72,12 +72,14 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req)
} }
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
/* ACRN insert request to VHM and inject upcall */
cur = vcpu->vcpu_id; cur = vcpu->vcpu_id;
vhm_req = &req_buf->req_queue[cur]; vhm_req = &req_buf->req_queue[cur];
ASSERT(atomic_load32(&vhm_req->processed) == REQ_STATE_FREE,
"VHM request buffer is busy");
/* ACRN insert request to VHM and inject upcall */
vhm_req->type = io_req->type; vhm_req->type = io_req->type;
vhm_req->processed = io_req->processed;
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request), (void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
&io_req->reqs, sizeof(union vhm_io_request)); &io_req->reqs, sizeof(union vhm_io_request));
@ -85,15 +87,15 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req)
* TODO: when pause_vcpu changed to switch vcpu out directlly, we * TODO: when pause_vcpu changed to switch vcpu out directlly, we
* should fix the race issue between req.valid = true and vcpu pause * should fix the race issue between req.valid = true and vcpu pause
*/ */
atomic_store32(&vcpu->ioreq_pending, 1U);
pause_vcpu(vcpu, VCPU_PAUSED); pause_vcpu(vcpu, VCPU_PAUSED);
/* Must clear the signal before we mark req valid /* Must clear the signal before we mark req as pending
* Once we mark to valid, VHM may process req and signal us * Once we mark it pending, VHM may process req and signal us
* before we perform upcall. * before we perform upcall.
* because VHM can work in pulling mode without wait for upcall * because VHM can work in pulling mode without wait for upcall
*/ */
vhm_req->valid = 1; vhm_req->valid = 1;
atomic_store32(&vhm_req->processed, REQ_STATE_PENDING);
acrn_print_request(vcpu->vcpu_id, vhm_req); acrn_print_request(vcpu->vcpu_id, vhm_req);
@ -140,7 +142,7 @@ static void _get_req_info_(struct vhm_request *req, int *id, char *type,
} }
switch (req->processed) { switch (req->processed) {
case REQ_STATE_SUCCESS: case REQ_STATE_COMPLETE:
(void)strcpy_s(state, 16U, "SUCCESS"); (void)strcpy_s(state, 16U, "SUCCESS");
break; break;
case REQ_STATE_PENDING: case REQ_STATE_PENDING:

View File

@ -248,7 +248,6 @@ struct vcpu {
bool launched; /* Whether the vcpu is launched on target pcpu */ bool launched; /* Whether the vcpu is launched on target pcpu */
uint32_t paused_cnt; /* how many times vcpu is paused */ uint32_t paused_cnt; /* how many times vcpu is paused */
uint32_t running; /* vcpu is picked up and run? */ uint32_t running; /* vcpu is picked up and run? */
uint32_t ioreq_pending; /* ioreq is ongoing or not? */
struct io_request req; /* used by io/ept emulation */ struct io_request req; /* used by io/ept emulation */

View File

@ -26,8 +26,9 @@
*/ */
#define VHM_REQUEST_MAX 16U #define VHM_REQUEST_MAX 16U
#define REQ_STATE_FREE 3
#define REQ_STATE_PENDING 0 #define REQ_STATE_PENDING 0
#define REQ_STATE_SUCCESS 1 #define REQ_STATE_COMPLETE 1
#define REQ_STATE_PROCESSING 2 #define REQ_STATE_PROCESSING 2
#define REQ_STATE_FAILED -1 #define REQ_STATE_FAILED -1
@ -90,27 +91,122 @@ union vhm_io_request {
int64_t reserved1[8]; int64_t reserved1[8];
}; };
/* vhm_request are 256Bytes aligned */ /**
* @brief 256-byte VHM requests
*
* The state transitions of a VHM request are:
*
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
* \ /
* +--> FAILED -+
*
* When a request is in COMPLETE or FREE state, the request is owned by the
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
* request except the state.
*
* When a request is in PENDING or PROCESSING state, the request is owned by
* SOS. The hypervisor shall not read or write the request other than the state.
*
* Based on the rules above, a typical VHM request lifecycle should looks like
* the following.
*
* (assume the initial state is FREE)
*
* SOS vCPU 0 SOS vCPU x UOS vCPU y
*
* hypervisor:
* fill in type, addr, etc.
* pause UOS vcpu y
* set state to PENDING (a)
* fire upcall to SOS vCPU 0
*
* VHM:
* scan for pending requests
* set state to PROCESSING (b)
* assign requests to clients (c)
*
* client:
* scan for assigned requests
* handle the requests (d)
* set state to COMPLETE
* notify the hypervisor
*
* hypervisor:
* resume UOS vcpu y (e)
*
* hypervisor:
* post-work (f)
* set state to FREE
*
* Note that the following shall hold.
*
* 1. (a) happens before (b)
* 2. (c) happens before (d)
* 3. (e) happens before (f)
* 4. One vCPU cannot trigger another I/O request before the previous one has
* completed (i.e. the state switched to FREE)
*
* Accesses to the state of a vhm_request shall be atomic and proper barriers
* are needed to ensure that:
*
* 1. Setting state to PENDING is the last operation when issuing a request in
* the hypervisor, as the hypervisor shall not access the request any more.
*
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
* of request handling in VHM or clients in SOS.
*
* The state FAILED is an obsolete state to indicate that the I/O request cannot
* be handled. In such cases the mediators and DM should switch the state to
* COMPLETE with the value set to all 1s for read, and skip the request for
* writes. This state WILL BE REMOVED after the mediators and DM are updated to
* follow this rule.
*/
struct vhm_request { struct vhm_request {
/* offset: 0bytes - 63bytes */ /**
* Type of this request.
*
* Byte offset: 0.
*/
uint32_t type; uint32_t type;
int32_t reserved0[15];
/* offset: 64bytes-127bytes */ /**
* Reserved.
*
* Byte offset: 4.
*/
uint32_t reserved0[15];
/**
* Details about this request. For REQ_PORTIO, this has type
* pio_request. For REQ_MMIO and REQ_WP, this has type mmio_request. For
* REQ_PCICFG, this has type pci_request.
*
* Byte offset: 64.
*/
union vhm_io_request reqs; union vhm_io_request reqs;
/* True: valid req which need VHM to process. /**
* ACRN write, VHM read only * Whether this request is valid for processing. ACRN write, VHM read
**/ * only.
*
* Warning; this field is obsolete and will be removed soon.
*
* Byte offset: 128.
*/
int32_t valid; int32_t valid;
/* the client which is distributed to handle this request */ /**
* The client which is distributed to handle this request. Accessed by
* VHM only.
*
* Byte offset: 132.
*/
int32_t client; int32_t client;
/* 1: VHM had processed and success /**
* 0: VHM had not yet processed * The status of this request, taking REQ_STATE_xxx as values.
* -1: VHM failed to process. Invalid request *
* VHM write, ACRN read only * Byte offset: 136.
*/ */
int32_t processed; int32_t processed;
} __aligned(256); } __aligned(256);