DM: adapt to the new VHM request state transitions

This is the counterpart in DM to the VHM request state update in the
hypervisor. Major changes include:

    * Remove accesses to the obsolete 'valid' member.
    * Access the 'processed' member using atomic operations.
    * Sync the documentation on vhm_request.

In addition, the new state transition also requires a VHM request to be always
handled properly, as there is no 'FAILED' state any more. Instead of crashing
the device model (and thus the UOS as well), the device model should return all
1s or ignore the request when it is to load from or store to an invalid address,
respectively.

Note: there is an issue in vm_system_reset() and vm_suspend_resume() where
completed VHM requests are not properly notified, causing the hypervisor to
complain as it sees uncompleted requests while trying to create a new one. This
issue will be resolved in a separate patch.

v1 -> v2:

    * Use macro-defined constants for the default values for invalid PIO/MMIO
      reads.
    * Change the return type of vmexit_handler_t in DM to void as the return
      values are no longer necessary.
    * Remove VM_EXITCODE that are no longer used.

Tracked-On: #875
Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Yu Wang <yu1.wang@intel.com>
This commit is contained in:
Junjie Mao 2018-08-01 20:10:21 +08:00 committed by lijinxia
parent ea137589e6
commit 638d7141d2
4 changed files with 129 additions and 111 deletions

View File

@ -60,10 +60,15 @@
#include "monitor.h"
#include "ioc.h"
#include "pm.h"
#include "atomic.h"
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
typedef int (*vmexit_handler_t)(struct vmctx *,
/* Values returned for reads on invalid I/O requests. */
#define VHM_REQ_PIO_INVAL (~0U)
#define VHM_REQ_MMIO_INVAL (~0UL)
typedef void (*vmexit_handler_t)(struct vmctx *,
struct vhm_request *, int *vcpu);
char *vmname;
@ -287,7 +292,7 @@ delete_cpu(struct vmctx *ctx, int vcpu)
return CPU_EMPTY(&cpumask);
}
static int
static void
vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int error;
@ -303,13 +308,14 @@ vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
in ? "in" : "out",
bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'),
port);
return VMEXIT_ABORT;
} else {
return VMEXIT_CONTINUE;
if (in) {
vhm_req->reqs.pio_request.value = VHM_REQ_PIO_INVAL;
}
}
}
static int
static void
vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err;
@ -326,14 +332,14 @@ vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
fprintf(stderr, "mmio address 0x%lx, size %ld",
vhm_req->reqs.mmio_request.address,
vhm_req->reqs.mmio_request.size);
vhm_req->processed = REQ_STATE_FAILED;
return VMEXIT_ABORT;
if (vhm_req->reqs.mmio_request.direction == REQUEST_READ) {
vhm_req->reqs.mmio_request.value = VHM_REQ_MMIO_INVAL;
}
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
}
static int
static void
vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err, in = (vhm_req->reqs.pci_request.direction == REQUEST_READ);
@ -351,11 +357,11 @@ vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
vhm_req->reqs.pci_request.dev,
vhm_req->reqs.pci_request.func,
vhm_req->reqs.pci_request.reg);
return VMEXIT_ABORT;
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
if (in) {
vhm_req->reqs.pio_request.value = VHM_REQ_PIO_INVAL;
}
}
}
#define DEBUG_EPT_MISCONFIG
@ -368,66 +374,15 @@ vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
#endif /* #ifdef DEBUG_EPT_MISCONFIG */
static int
vmexit_bogus(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_bogus++;
return VMEXIT_CONTINUE;
}
static int
vmexit_reqidle(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_reqidle++;
return VMEXIT_CONTINUE;
}
static int
vmexit_hlt(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_hlt++;
/*
* Just continue execution with the next instruction. We use
* the HLT VM exit as a way to be friendly with the host
* scheduler.
*/
return VMEXIT_CONTINUE;
}
static int
vmexit_pause(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_pause++;
return VMEXIT_CONTINUE;
}
static int
vmexit_mtrap(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_mtrap++;
return VMEXIT_CONTINUE;
}
static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_INOUT] = vmexit_inout,
[VM_EXITCODE_MMIO_EMUL] = vmexit_mmio_emul,
[VM_EXITCODE_PCI_CFG] = vmexit_pci_emul,
[VM_EXITCODE_BOGUS] = vmexit_bogus,
[VM_EXITCODE_REQIDLE] = vmexit_reqidle,
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_HLT] = vmexit_hlt,
[VM_EXITCODE_PAUSE] = vmexit_pause,
};
static void
handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu)
{
int rc;
enum vm_exitcode exitcode;
exitcode = vhm_req->type;
@ -437,17 +392,8 @@ handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu)
exit(1);
}
rc = (*handler[exitcode])(ctx, vhm_req, &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
vhm_req->processed = REQ_STATE_SUCCESS;
break;
case VMEXIT_ABORT:
vhm_req->processed = REQ_STATE_FAILED;
abort();
default:
exit(1);
}
(*handler[exitcode])(ctx, vhm_req, &vcpu);
atomic_store(&vhm_req->processed, REQ_STATE_COMPLETE);
/* If UOS is not in suspend or system reset mode, we don't
* need to notify request done.
@ -580,8 +526,7 @@ vm_system_reset(struct vmctx *ctx)
struct vhm_request *vhm_req;
vhm_req = &vhm_req_buf[vcpu_id];
if (vhm_req->valid &&
(vhm_req->processed == REQ_STATE_PROCESSING) &&
if ((atomic_load(&vhm_req->processed) == REQ_STATE_PROCESSING) &&
(vhm_req->client == ctx->ioreq_client))
vm_notify_request_done(ctx, vcpu_id);
}
@ -613,8 +558,7 @@ vm_suspend_resume(struct vmctx *ctx)
struct vhm_request *vhm_req;
vhm_req = &vhm_req_buf[vcpu_id];
if (vhm_req->valid &&
(vhm_req->processed == REQ_STATE_PROCESSING) &&
if ((atomic_load(&vhm_req->processed) == REQ_STATE_PROCESSING) &&
(vhm_req->client == ctx->ioreq_client))
vm_notify_request_done(ctx, vcpu_id);
}
@ -648,8 +592,7 @@ vm_loop(struct vmctx *ctx)
for (vcpu_id = 0; vcpu_id < 4; vcpu_id++) {
vhm_req = &vhm_req_buf[vcpu_id];
if (vhm_req->valid
&& (vhm_req->processed == REQ_STATE_PROCESSING)
if ((atomic_load(&vhm_req->processed) == REQ_STATE_PROCESSING)
&& (vhm_req->client == ctx->ioreq_client))
handle_vmexit(ctx, vhm_req, vcpu_id);
}

View File

@ -29,8 +29,6 @@
#ifndef _DM_H_
#define _DM_H_
#define VMEXIT_CONTINUE (0)
#define VMEXIT_ABORT (-1)
#include <stdbool.h>
#include "types.h"
#include "vmm.h"

View File

@ -46,9 +46,9 @@
#define VHM_REQUEST_MAX 16U
#define REQ_STATE_PENDING 0
#define REQ_STATE_SUCCESS 1
#define REQ_STATE_COMPLETE 1
#define REQ_STATE_PROCESSING 2
#define REQ_STATE_FAILED -1
#define REQ_STATE_FREE 3
#define REQ_PORTIO 0U
#define REQ_MMIO 1U
@ -102,13 +102,92 @@ struct pci_request {
int32_t reg;
} __aligned(8);
/* vhm_request are 256Bytes aligned */
/**
* @brief 256-byte VHM requests
*
* The state transitions of a VHM request are:
*
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
*
* When a request is in COMPLETE or FREE state, the request is owned by the
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
* request except the state.
*
* When a request is in PENDING or PROCESSING state, the request is owned by
* SOS. The hypervisor shall not read or write the request other than the state.
*
* Based on the rules above, a typical VHM request lifecycle should looks like
* the following.
*
* (assume the initial state is FREE)
*
* SOS vCPU 0 SOS vCPU x UOS vCPU y
*
* hypervisor:
* fill in type, addr, etc.
* pause UOS vcpu y
* set state to PENDING (a)
* fire upcall to SOS vCPU 0
*
* VHM:
* scan for pending requests
* set state to PROCESSING (b)
* assign requests to clients (c)
*
* client:
* scan for assigned requests
* handle the requests (d)
* set state to COMPLETE
* notify the hypervisor
*
* hypervisor:
* resume UOS vcpu y (e)
*
* hypervisor:
* post-work (f)
* set state to FREE
*
* Note that the following shall hold.
*
* 1. (a) happens before (b)
* 2. (c) happens before (d)
* 3. (e) happens before (f)
* 4. One vCPU cannot trigger another I/O request before the previous one has
* completed (i.e. the state switched to FREE)
*
* Accesses to the state of a vhm_request shall be atomic and proper barriers
* are needed to ensure that:
*
* 1. Setting state to PENDING is the last operation when issuing a request in
* the hypervisor, as the hypervisor shall not access the request any more.
*
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
* of request handling in VHM or clients in SOS.
*/
struct vhm_request {
/* offset: 0bytes - 63bytes */
/**
* @brief Type of this request.
*
* Byte offset: 0.
*/
uint32_t type;
int32_t reserved0[15];
/* offset: 64bytes-127bytes */
/**
* @brief Reserved.
*
* Byte offset: 4.
*/
uint32_t reserved0[15];
/**
* @brief Details about this request.
*
* For REQ_PORTIO, this has type pio_request. For REQ_MMIO and REQ_WP,
* this has type mmio_request. For REQ_PCICFG, this has type
* pci_request.
*
* Byte offset: 64.
*/
union {
struct pio_request pio_request;
struct pci_request pci_request;
@ -116,18 +195,28 @@ struct vhm_request {
int64_t reserved1[8];
} reqs;
/* True: valid req which need VHM to process.
* ACRN write, VHM read only
/**
* @brief Reserved.
*
* Byte offset: 132.
**/
int32_t valid;
uint32_t reserved1;
/* the client which is distributed to handle this request */
/**
* @brief The client which is distributed to handle this request.
*
* Accessed by VHM only.
*
* Byte offset: 132.
*/
int32_t client;
/* 1: VHM had processed and success
* 0: VHM had not yet processed
* -1: VHM failed to process. Invalid request
* VHM write, ACRN read only
/**
* @brief The status of this request.
*
* Taking REQ_STATE_xxx as values.
*
* Byte offset: 136.
*/
int32_t processed;
} __aligned(256);

View File

@ -237,18 +237,6 @@ enum vm_exitcode {
VM_EXITCODE_INOUT = 0,
VM_EXITCODE_MMIO_EMUL,
VM_EXITCODE_PCI_CFG,
VM_EXITCODE_BOGUS,
VM_EXITCODE_HLT,
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
VM_EXITCODE_RENDEZVOUS,
VM_EXITCODE_IOAPIC_EOI,
VM_EXITCODE_INOUT_STR,
VM_EXITCODE_MONITOR,
VM_EXITCODE_MWAIT,
VM_EXITCODE_REQIDLE,
VM_EXITCODE_MAX
};