HV: io: refactoring vmexit handler on EPT violation

This is the counterpart to the PIO emulation side.

1. ept_violation_vmexit_handler (entry point for handling vmexit on EPT instruction):

    Extract mmio address, size, direction and value (for write only), fill in an
    I/O request, invoke do_io to handle that and emulate_pio_post for
    post-processing.

2. emulate_io

    Handle the given I/O request, either completed by registered MMIO handlers
    or sent to VHM.

3. emulate_mmio_post:

    Update guest registers after the emulation is done.

v2 -> v3:

    * Rename: emulate_mmio_by_handler -> hv_emulate_mmio.
    * Inline the original hv_emulate_mmio.
    * No longer check alignment. The handlers are responsible for handling
      unaligned accesses.

v1 -> v2:

    * Rename: do_io -> emulate_io.

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2018-07-25 05:16:40 +08:00 committed by lijinxia
parent 50e4bc1758
commit b21b172347
3 changed files with 142 additions and 129 deletions

View File

@ -11,7 +11,6 @@
#define ACRN_DBG_EPT 6U
static uint64_t find_next_table(uint32_t table_offset, void *table_base)
{
uint64_t table_entry;
@ -190,22 +189,6 @@ bool is_ept_supported(void)
return status;
}
static int
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req,
struct mem_io_node *mmio_handler)
{
struct mmio_request *mmio_req = &io_req->reqs.mmio;
if ((mmio_req->address % mmio_req->size) != 0UL) {
pr_err("access size not align with paddr");
return -EINVAL;
}
/* Handle this MMIO operation */
return mmio_handler->read_write(vcpu, io_req,
mmio_handler->handler_private_data);
}
int register_mmio_emulation_handler(struct vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data)
@ -275,82 +258,30 @@ void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
}
}
int dm_emulate_mmio_post(struct vcpu *vcpu)
{
int ret = 0;
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur];
mmio_req->value = vhm_req->reqs.mmio.value;
io_req->processed = vhm_req->processed;
/* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0;
if (io_req->processed != REQ_STATE_SUCCESS) {
goto out;
}
if (mmio_req->direction == REQUEST_READ) {
/* Emulate instruction and update vcpu register set */
ret = emulate_instruction(vcpu);
if (ret != 0) {
goto out;
}
}
out:
return ret;
}
static int
dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual __unused)
{
int status;
struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
if (mmio_req->direction == REQUEST_WRITE) {
status = emulate_instruction(vcpu);
if (status != 0) {
return status;
}
/* XXX: write access while EPT perm RX -> WP */
if ((exit_qual & 0x38UL) == 0x28UL) {
io_req->type = REQ_WP;
}
}
return 0;
}
int ept_violation_vmexit_handler(struct vcpu *vcpu)
{
int status = -EINVAL, ret;
uint64_t exit_qual;
uint64_t gpa;
struct list_head *pos;
struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
struct mem_io_node *mmio_handler = NULL;
io_req->type = REQ_MMIO;
io_req->processed = REQ_STATE_PENDING;
/* Handle page fault from guest */
exit_qual = vcpu->arch_vcpu.exit_qualification;
io_req->type = REQ_MMIO;
io_req->processed = REQ_STATE_PENDING;
/* Specify if read or write operation */
if ((exit_qual & 0x2UL) != 0UL) {
/* Write operation */
mmio_req->direction = REQUEST_WRITE;
mmio_req->value = 0UL;
/* XXX: write access while EPT perm RX -> WP */
if ((exit_qual & 0x38UL) == 0x28UL) {
io_req->type = REQ_WP;
}
} else {
/* Read operation */
mmio_req->direction = REQUEST_READ;
@ -380,60 +311,32 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
goto out;
}
list_for_each(pos, &vcpu->vm->mmio_list) {
mmio_handler = list_entry(pos, struct mem_io_node, list);
if (((mmio_req->address + mmio_req->size) <=
mmio_handler->range_start) ||
(mmio_req->address >= mmio_handler->range_end)) {
continue;
}
else if (!((mmio_req->address >= mmio_handler->range_start) &&
((mmio_req->address + mmio_req->size) <=
mmio_handler->range_end))) {
pr_fatal("Err MMIO, addr:0x%llx, size:%x",
mmio_req->address, mmio_req->size);
return -EIO;
}
if (mmio_req->direction == REQUEST_WRITE) {
if (emulate_instruction(vcpu) != 0) {
goto out;
}
}
/*
* For MMIO write, ask DM to run MMIO emulation after
* instruction emulation. For MMIO read, ask DM to run MMIO
* emulation at first.
*/
/* Call generic memory emulation handler
* For MMIO write, call hv_emulate_mmio after
* instruction emulation. For MMIO read,
* call hv_emulate_mmio at first.
*/
hv_emulate_mmio(vcpu, io_req, mmio_handler);
if (mmio_req->direction == REQUEST_READ) {
/* Emulate instruction and update vcpu register set */
if (emulate_instruction(vcpu) != 0) {
goto out;
}
}
status = 0;
break;
}
if (status != 0) {
/*
* No mmio handler from HV side, search from VHM in Dom0
*
* ACRN insert request to VHM and inject upcall
* For MMIO write, ask DM to run MMIO emulation after
* instruction emulation. For MMIO read, ask DM to run MMIO
* emulation at first.
*/
if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0) {
/* Determine value being written. */
if (mmio_req->direction == REQUEST_WRITE) {
status = emulate_instruction(vcpu);
if (status != 0) {
goto out;
}
status = acrn_insert_request_wait(vcpu, &vcpu->req);
}
status = emulate_io(vcpu, io_req);
/* io_req is hypervisor-private. For requests sent to VHM,
* io_req->processed will be PENDING till dm_emulate_mmio_post() is
* called on vcpu resume. */
if (status == 0) {
if (io_req->processed != REQ_STATE_PENDING) {
status = emulate_mmio_post(vcpu, io_req);
}
}
return status;
out:

View File

@ -6,6 +6,9 @@
#include <hypervisor.h>
#include "guest/instr_emul_wrapper.h"
#include "guest/instr_emul.h"
/**
* @pre io_req->type == REQ_PORTIO
*/
@ -59,6 +62,51 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
return emulate_pio_post(vcpu, io_req);
}
/**
* @pre vcpu->req.type == REQ_MMIO
*/
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
{
int32_t ret;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
if (io_req->processed == REQ_STATE_SUCCESS) {
if (mmio_req->direction == REQUEST_READ) {
/* Emulate instruction and update vcpu register set */
ret = emulate_instruction(vcpu);
} else {
ret = 0;
}
} else {
ret = 0;
}
return ret;
}
/**
* @pre vcpu->req.type == REQ_MMIO
*/
int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur];
mmio_req->value = vhm_req->reqs.mmio.value;
io_req->processed = vhm_req->processed;
/* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0;
return emulate_mmio_post(vcpu, io_req);
}
/**
* Try handling the given request by any port I/O handler registered in the
* hypervisor.
@ -94,6 +142,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
port, size);
status = -EIO;
io_req->processed = REQ_STATE_FAILED;
break;
} else {
if (pio_req->direction == REQUEST_WRITE) {
@ -120,12 +169,57 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
return status;
}
/**
* Use registered MMIO handlers on the given request if it falls in the range of
* any of them.
*
* @pre io_req->type == REQ_MMIO
*
* @return 0 - Successfully emulated by registered handlers.
* @return -ENODEV - No proper handler found.
* @return -EIO - The request spans multiple devices and cannot be emulated.
*/
static int32_t
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
{
int status = -ENODEV;
uint64_t address, size;
struct list_head *pos;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
struct mem_io_node *mmio_handler = NULL;
address = mmio_req->address;
size = mmio_req->size;
list_for_each(pos, &vcpu->vm->mmio_list) {
uint64_t base, end;
mmio_handler = list_entry(pos, struct mem_io_node, list);
base = mmio_handler->range_start;
end = mmio_handler->range_end;
if ((address + size <= base) || (address >= end)) {
continue;
} else if (!((address >= base) && (address + size <= end))) {
pr_fatal("Err MMIO, address:0x%llx, size:%x",
address, size);
io_req->processed = REQ_STATE_FAILED;
return -EIO;
} else {
/* Handle this MMIO operation */
status = mmio_handler->read_write(vcpu, io_req,
mmio_handler->handler_private_data);
break;
}
}
return status;
}
/**
* Handle an I/O request by either invoking a hypervisor-internal handler or
* deliver to VHM.
*
* @pre io_req->type == REQ_PORTIO
*
* @return 0 - Successfully emulated by registered handlers.
* @return -EIO - The request spans multiple devices and cannot be emulated.
* @return Negative on other errors during emulation.
@ -135,7 +229,20 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
{
int32_t status;
status = hv_emulate_pio(vcpu, io_req);
switch (io_req->type) {
case REQ_PORTIO:
status = hv_emulate_pio(vcpu, io_req);
break;
case REQ_MMIO:
case REQ_WP:
status = hv_emulate_mmio(vcpu, io_req);
break;
default:
/* Unknown I/O request type */
status = -EINVAL;
io_req->processed = REQ_STATE_FAILED;
break;
}
if (status == -ENODEV) {
/*

View File

@ -122,7 +122,10 @@ int register_mmio_emulation_handler(struct vm *vm,
uint64_t end, void *handler_private_data);
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
uint64_t end);
int dm_emulate_mmio_post(struct vcpu *vcpu);
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *req);