diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 49d60a633..b216dc5e9 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -117,7 +117,6 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle) vcpu->launched = false; vcpu->paused_cnt = 0U; vcpu->running = 0; - vcpu->ioreq_pending = 0; vcpu->arch_vcpu.nr_sipi = 0; vcpu->pending_pre_work = 0U; vcpu->state = VCPU_INIT; @@ -277,7 +276,6 @@ void reset_vcpu(struct vcpu *vcpu) vcpu->launched = false; vcpu->paused_cnt = 0U; vcpu->running = 0; - vcpu->ioreq_pending = 0; vcpu->arch_vcpu.nr_sipi = 0; vcpu->pending_pre_work = 0U; diff --git a/hypervisor/arch/x86/guest/vioapic.c b/hypervisor/arch/x86/guest/vioapic.c index 7ef2bd810..58a6a6fbe 100644 --- a/hypervisor/arch/x86/guest/vioapic.c +++ b/hypervisor/arch/x86/guest/vioapic.c @@ -613,12 +613,10 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, gpa, &data); mmio->value = (uint64_t)data; - io_req->processed = REQ_STATE_SUCCESS; } else if (mmio->direction == REQUEST_WRITE) { vioapic_mmio_write(vm, gpa, data); - io_req->processed = REQ_STATE_SUCCESS; } else { /* Can never happen due to the range of direction. */ } @@ -627,6 +625,7 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, ret = -EINVAL; } + io_req->processed = REQ_STATE_COMPLETE; return ret; } diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index a5326acd6..88c378448 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -2069,17 +2069,16 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, gpa, &mmio_req->value, mmio_req->size); - io_req->processed = REQ_STATE_SUCCESS; } else if (mmio_req->direction == REQUEST_WRITE) { ret = vlapic_write_mmio_reg(vcpu, gpa, mmio_req->value, mmio_req->size); - io_req->processed = REQ_STATE_SUCCESS; } else { /* Can never happen due to the range of mmio_req->direction. */ } + io_req->processed = REQ_STATE_COMPLETE; return ret; } diff --git a/hypervisor/arch/x86/io.c b/hypervisor/arch/x86/io.c index 0ecef8abf..397c3f12c 100644 --- a/hypervisor/arch/x86/io.c +++ b/hypervisor/arch/x86/io.c @@ -9,6 +9,12 @@ #include "guest/instr_emul_wrapper.h" #include "guest/instr_emul.h" +static void complete_ioreq(struct vhm_request *vhm_req) +{ + vhm_req->valid = 0; + atomic_store32(&vhm_req->processed, REQ_STATE_FREE); +} + /** * @pre io_req->type == REQ_PORTIO */ @@ -19,7 +25,7 @@ emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req) struct pio_request *pio_req = &io_req->reqs.pio; uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size); - if (io_req->processed == REQ_STATE_SUCCESS) { + if (io_req->processed == REQ_STATE_COMPLETE) { if (pio_req->direction == REQUEST_READ) { uint64_t value = (uint64_t)pio_req->value; int32_t context_idx = vcpu->arch_vcpu.cur_context; @@ -53,11 +59,10 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu) req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); vhm_req = &req_buf->req_queue[cur]; - io_req->processed = vhm_req->processed; pio_req->value = vhm_req->reqs.pio.value; /* VHM emulation data already copy to req, mark to free slot now */ - vhm_req->valid = 0; + complete_ioreq(vhm_req); return emulate_pio_post(vcpu, io_req); } @@ -70,7 +75,7 @@ int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req) int32_t ret; struct mmio_request *mmio_req = &io_req->reqs.mmio; - if (io_req->processed == REQ_STATE_SUCCESS) { + if (io_req->processed == REQ_STATE_COMPLETE) { if (mmio_req->direction == REQUEST_READ) { /* Emulate instruction and update vcpu register set */ ret = emulate_instruction(vcpu); @@ -99,38 +104,26 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu) vhm_req = &req_buf->req_queue[cur]; mmio_req->value = vhm_req->reqs.mmio.value; - io_req->processed = vhm_req->processed; /* VHM emulation data already copy to req, mark to free slot now */ - vhm_req->valid = 0; + complete_ioreq(vhm_req); return emulate_mmio_post(vcpu, io_req); } -static void complete_ioreq(struct vcpu *vcpu) -{ - union vhm_request_buffer *req_buf; - struct vhm_request *vhm_req; - - req_buf = (union vhm_request_buffer *) - vcpu->vm->sw.io_shared_page; - vhm_req = &req_buf->req_queue[vcpu->vcpu_id]; - - vhm_req->valid = 0; - atomic_store32(&vcpu->ioreq_pending, 0U); -} - void emulate_io_post(struct vcpu *vcpu) { union vhm_request_buffer *req_buf; struct vhm_request *vhm_req; + struct io_request *io_req = &vcpu->req; req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page; vhm_req = &req_buf->req_queue[vcpu->vcpu_id]; + io_req->processed = atomic_load32(&vhm_req->processed); if ((vhm_req->valid == 0) || - ((vhm_req->processed != REQ_STATE_SUCCESS) && - (vhm_req->processed != REQ_STATE_FAILED))) { + ((io_req->processed != REQ_STATE_COMPLETE) && + (io_req->processed != REQ_STATE_FAILED))) { return; } @@ -139,7 +132,7 @@ void emulate_io_post(struct vcpu *vcpu) * mark ioreq done and don't resume vcpu. */ if (vcpu->state == VCPU_ZOMBIE) { - complete_ioreq(vcpu); + complete_ioreq(vhm_req); return; } @@ -162,7 +155,7 @@ void emulate_io_post(struct vcpu *vcpu) default: /* REQ_WP can only be triggered on writes which do not need * post-work. Just mark the ioreq done. */ - complete_ioreq(vcpu); + complete_ioreq(vhm_req); break; } @@ -222,7 +215,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req) } /* TODO: failures in the handlers should be reflected * here. */ - io_req->processed = REQ_STATE_SUCCESS; + io_req->processed = REQ_STATE_COMPLETE; status = 0; break; } diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 21f784aff..07a5c0fbd 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -334,6 +334,8 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param) uint64_t hpa = 0UL; struct acrn_set_ioreq_buffer iobuf; struct vm *target_vm = get_vm_from_vmid(vmid); + union vhm_request_buffer *req_buf; + uint16_t i; if (target_vm == NULL) { return -1; @@ -358,6 +360,11 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param) target_vm->sw.io_shared_page = HPA2HVA(hpa); + req_buf = target_vm->sw.io_shared_page; + for (i = 0U; i < VHM_REQUEST_MAX; i++) { + atomic_store32(&req_buf->req_queue[i].processed, REQ_STATE_FREE); + } + return ret; } diff --git a/hypervisor/common/io_request.c b/hypervisor/common/io_request.c index 0a213ff50..d6f084e9a 100644 --- a/hypervisor/common/io_request.c +++ b/hypervisor/common/io_request.c @@ -72,12 +72,14 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req) } req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); - - /* ACRN insert request to VHM and inject upcall */ cur = vcpu->vcpu_id; vhm_req = &req_buf->req_queue[cur]; + + ASSERT(atomic_load32(&vhm_req->processed) == REQ_STATE_FREE, + "VHM request buffer is busy"); + + /* ACRN insert request to VHM and inject upcall */ vhm_req->type = io_req->type; - vhm_req->processed = io_req->processed; (void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request), &io_req->reqs, sizeof(union vhm_io_request)); @@ -85,15 +87,15 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req) * TODO: when pause_vcpu changed to switch vcpu out directlly, we * should fix the race issue between req.valid = true and vcpu pause */ - atomic_store32(&vcpu->ioreq_pending, 1U); pause_vcpu(vcpu, VCPU_PAUSED); - /* Must clear the signal before we mark req valid - * Once we mark to valid, VHM may process req and signal us + /* Must clear the signal before we mark req as pending + * Once we mark it pending, VHM may process req and signal us * before we perform upcall. * because VHM can work in pulling mode without wait for upcall */ vhm_req->valid = 1; + atomic_store32(&vhm_req->processed, REQ_STATE_PENDING); acrn_print_request(vcpu->vcpu_id, vhm_req); @@ -140,7 +142,7 @@ static void _get_req_info_(struct vhm_request *req, int *id, char *type, } switch (req->processed) { - case REQ_STATE_SUCCESS: + case REQ_STATE_COMPLETE: (void)strcpy_s(state, 16U, "SUCCESS"); break; case REQ_STATE_PENDING: diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 5e4d6d2da..d912a29cb 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -248,7 +248,6 @@ struct vcpu { bool launched; /* Whether the vcpu is launched on target pcpu */ uint32_t paused_cnt; /* how many times vcpu is paused */ uint32_t running; /* vcpu is picked up and run? */ - uint32_t ioreq_pending; /* ioreq is ongoing or not? */ struct io_request req; /* used by io/ept emulation */ diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h index 074a2761a..df00de8c3 100644 --- a/hypervisor/include/public/acrn_common.h +++ b/hypervisor/include/public/acrn_common.h @@ -26,8 +26,9 @@ */ #define VHM_REQUEST_MAX 16U +#define REQ_STATE_FREE 3 #define REQ_STATE_PENDING 0 -#define REQ_STATE_SUCCESS 1 +#define REQ_STATE_COMPLETE 1 #define REQ_STATE_PROCESSING 2 #define REQ_STATE_FAILED -1 @@ -90,27 +91,122 @@ union vhm_io_request { int64_t reserved1[8]; }; -/* vhm_request are 256Bytes aligned */ +/** + * @brief 256-byte VHM requests + * + * The state transitions of a VHM request are: + * + * FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ... + * \ / + * +--> FAILED -+ + * + * When a request is in COMPLETE or FREE state, the request is owned by the + * hypervisor. SOS (VHM or DM) shall not read or write the internals of the + * request except the state. + * + * When a request is in PENDING or PROCESSING state, the request is owned by + * SOS. The hypervisor shall not read or write the request other than the state. + * + * Based on the rules above, a typical VHM request lifecycle should looks like + * the following. + * + * (assume the initial state is FREE) + * + * SOS vCPU 0 SOS vCPU x UOS vCPU y + * + * hypervisor: + * fill in type, addr, etc. + * pause UOS vcpu y + * set state to PENDING (a) + * fire upcall to SOS vCPU 0 + * + * VHM: + * scan for pending requests + * set state to PROCESSING (b) + * assign requests to clients (c) + * + * client: + * scan for assigned requests + * handle the requests (d) + * set state to COMPLETE + * notify the hypervisor + * + * hypervisor: + * resume UOS vcpu y (e) + * + * hypervisor: + * post-work (f) + * set state to FREE + * + * Note that the following shall hold. + * + * 1. (a) happens before (b) + * 2. (c) happens before (d) + * 3. (e) happens before (f) + * 4. One vCPU cannot trigger another I/O request before the previous one has + * completed (i.e. the state switched to FREE) + * + * Accesses to the state of a vhm_request shall be atomic and proper barriers + * are needed to ensure that: + * + * 1. Setting state to PENDING is the last operation when issuing a request in + * the hypervisor, as the hypervisor shall not access the request any more. + * + * 2. Due to similar reasons, setting state to COMPLETE is the last operation + * of request handling in VHM or clients in SOS. + * + * The state FAILED is an obsolete state to indicate that the I/O request cannot + * be handled. In such cases the mediators and DM should switch the state to + * COMPLETE with the value set to all 1s for read, and skip the request for + * writes. This state WILL BE REMOVED after the mediators and DM are updated to + * follow this rule. + */ struct vhm_request { - /* offset: 0bytes - 63bytes */ + /** + * Type of this request. + * + * Byte offset: 0. + */ uint32_t type; - int32_t reserved0[15]; - /* offset: 64bytes-127bytes */ + /** + * Reserved. + * + * Byte offset: 4. + */ + uint32_t reserved0[15]; + + /** + * Details about this request. For REQ_PORTIO, this has type + * pio_request. For REQ_MMIO and REQ_WP, this has type mmio_request. For + * REQ_PCICFG, this has type pci_request. + * + * Byte offset: 64. + */ union vhm_io_request reqs; - /* True: valid req which need VHM to process. - * ACRN write, VHM read only - **/ + /** + * Whether this request is valid for processing. ACRN write, VHM read + * only. + * + * Warning; this field is obsolete and will be removed soon. + * + * Byte offset: 128. + */ int32_t valid; - /* the client which is distributed to handle this request */ + /** + * The client which is distributed to handle this request. Accessed by + * VHM only. + * + * Byte offset: 132. + */ int32_t client; - /* 1: VHM had processed and success - * 0: VHM had not yet processed - * -1: VHM failed to process. Invalid request - * VHM write, ACRN read only + /** + * The status of this request, taking REQ_STATE_xxx as values. + * + * Byte offset: 136. */ int32_t processed; } __aligned(256);