acrn-hypervisor/hypervisor/include/arch/x86/ioreq.h

135 lines
3.7 KiB
C
Raw Normal View History

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef IOREQ_H
#define IOREQ_H
#include <types.h>
#include <acrn_common.h>
HV: io: drop REQ_STATE_FAILED Now the DM has adopted the new VHM request state transitions and REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the state to FAILED. This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes ''processed'' unsigned to make the compiler happy about typing and simplifies error handling in the following ways. * (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a constraint that these functions must be called after an I/O request completes (which is the case in the current design) and assuming handlers/VHM/DM will always give a value for reads (typically all 1's if the requested address is invalid). * emulate_io() now returns a positive value IOREQ_PENDING to indicate that the request is sent to VHM. This mitigates a potential race between dm_emulate_pio() and pio_instr_vmexit_handler() which can cause emulate_pio_post() being called twice for the same request. * Remove the ''processed'' member in io_request. Previously this mirrors the state of the VHM request which terminates at either COMPLETE or FAILED. After the FAILED state is removed, the terminal state will always be constantly COMPLETE. Thus the mirrored ''processed'' member is no longer useful. Note that emulate_instruction() will always succeed after a reshuffle, and this patch takes that assumption in advance. This does not hurt as that returned value is not currently handled. This patch makes it explicit that I/O emulation is not expected to fail. One issue remains, though, which occurs when a non-aligned cross-boundary access happens. Currently the hypervisor, VHM and DM adopts different policy: * Hypervisor: inject #GP if it detects that the access crossed boundary * VHM: deliver to DM if the access does not complete falls in the range of a client * DM: a handler covering part of the to-be-accessed region is picked and assertion failure can be triggered. A high-level design covering all these components (in addition to instruction emulation) is needed for this. Thus this patch does not yet cover the issue. Tracked-On: #875 Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
2018-08-10 00:35:23 +08:00
/* The return value of emulate_io() indicating the I/O request is delivered to
* VHM but not finished yet. */
#define IOREQ_PENDING 1
/* Internal representation of a I/O request. */
struct io_request {
/** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */
uint32_t type;
/** Details of this request in the same format as vhm_request. */
union vhm_io_request reqs;
};
/* Definition of a IO port range */
struct vm_io_range {
uint16_t base; /* IO port base */
uint16_t len; /* IO port range */
uint32_t flags; /* IO port attributes */
};
struct vm_io_handler;
struct vm;
struct vcpu;
typedef
uint32_t (*io_read_fn_t)(struct vm *vm, uint16_t port, size_t size);
typedef
void (*io_write_fn_t)(struct vm *vm, uint16_t port, size_t size, uint32_t val);
/* Describes a single IO handler description entry. */
struct vm_io_handler_desc {
/** The base address of the IO range for this description. */
uint16_t addr;
/** The number of bytes covered by this description. */
size_t len;
/** A pointer to the "read" function.
*
* The read function is called from the hypervisor whenever
* a read access to a range described in "ranges" occur.
* The arguments to the callback are:
*
* - The address of the port to read from.
* - The width of the read operation (1,2 or 4).
*
* The implementation must return the ports content as
* byte, word or doubleword (depending on the width).
*
* If the pointer is null, a read of 1's is assumed.
*/
io_read_fn_t io_read;
/** A pointer to the "write" function.
*
* The write function is called from the hypervisor code
* whenever a write access to a range described in "ranges"
* occur. The arguments to the callback are:
*
* - The address of the port to write to.
* - The width of the write operation (1,2 or 4).
* - The value to write as byte, word or doubleword
* (depending on the width)
*
* The implementation must write the value to the port.
*
* If the pointer is null, the write access is ignored.
*/
io_write_fn_t io_write;
};
struct vm_io_handler {
struct vm_io_handler *next;
struct vm_io_handler_desc desc;
};
#define IO_ATTR_R 0U
#define IO_ATTR_RW 1U
#define IO_ATTR_NO_ACCESS 2U
/* Typedef for MMIO handler and range check routine */
struct mmio_request;
typedef int (*hv_mem_io_handler_t)(struct vcpu *vcpu,
struct io_request *io_req);
/* Structure for MMIO handler node */
struct mem_io_node {
hv_mem_io_handler_t read_write;
void *handler_private_data;
struct list_head list;
uint64_t range_start;
uint64_t range_end;
};
/* External Interfaces */
HV: io: refactoring vmexit handler on I/O instruction This patch refactors how I/O instructions are emulated, in order for a unify the I/O emulation path. The major control flow includes: 1. pio_instr_vmexit_handler (entry point for handling vmexit on I/O instruction): Extract port address, register size, direction and value (for write only), fill in an I/O request (of type io_request), invokes do_io to handle that and update the guest registers if the request has been successfully handled when do_io returns. 2. emulate_io: Handle the given I/O request. The request is handled or sent to VHM if it returns 0 (the actual status can be found in io_req->processed). On errors a negative error code is returned. 3. emulate_pio_by_handler: Look for the PIO handler for the given request and invoke that handler. Return 0 if a proper handler is found and invoked (the status of the emulation can be found in io_req->processed), -EIO when the request spans across devices, and -ENODEV when no handler is found. 4. emulate_pio_post: Update guest registers after the emulation is done. Currently this can happen either right after do_io() or after the vcpu is resumed. Status check on the I/O request and follow-up actions on failure will also go here. Note: Currently do_io can return 0 with io_req->processed being REQ_STATE_PENDING if the request is sent to VHM for further processing. In this case the current vcpu will be paused after handling this vm_exit, and dm_emulate_pio_post will be invoked to do the rest after this vcpu is resumed. When vcpus are scheduled back to exactly where they are scheduled out later, do_io should be responsible for the post_work and the processing of do_io results shall be mostly the same. v2 -> v3: * Rename: emulate_pio_by_handler -> hv_emulate_pio. * Properly mask the value passed to port I/O handler. v1 -> v2: * Rename: do_io -> emulate_io. * Rename io_instr_vmexit_handler -> pio_instr_vmexit_handler to reflect the fact that it handles port I/O only. Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
2018-07-24 23:02:07 +08:00
int32_t pio_instr_vmexit_handler(struct vcpu *vcpu);
void setup_io_bitmap(struct vm *vm);
void free_io_emulation_resource(struct vm *vm);
void allow_guest_pio_access(struct vm *vm, uint16_t port_address,
uint32_t nbytes);
void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
io_read_fn_t io_read_fn_ptr,
io_write_fn_t io_write_fn_ptr);
int register_mmio_emulation_handler(struct vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data);
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
uint64_t end);
HV: io: drop REQ_STATE_FAILED Now the DM has adopted the new VHM request state transitions and REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the state to FAILED. This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes ''processed'' unsigned to make the compiler happy about typing and simplifies error handling in the following ways. * (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a constraint that these functions must be called after an I/O request completes (which is the case in the current design) and assuming handlers/VHM/DM will always give a value for reads (typically all 1's if the requested address is invalid). * emulate_io() now returns a positive value IOREQ_PENDING to indicate that the request is sent to VHM. This mitigates a potential race between dm_emulate_pio() and pio_instr_vmexit_handler() which can cause emulate_pio_post() being called twice for the same request. * Remove the ''processed'' member in io_request. Previously this mirrors the state of the VHM request which terminates at either COMPLETE or FAILED. After the FAILED state is removed, the terminal state will always be constantly COMPLETE. Thus the mirrored ''processed'' member is no longer useful. Note that emulate_instruction() will always succeed after a reshuffle, and this patch takes that assumption in advance. This does not hurt as that returned value is not currently handled. This patch makes it explicit that I/O emulation is not expected to fail. One issue remains, though, which occurs when a non-aligned cross-boundary access happens. Currently the hypervisor, VHM and DM adopts different policy: * Hypervisor: inject #GP if it detects that the access crossed boundary * VHM: deliver to DM if the access does not complete falls in the range of a client * DM: a handler covering part of the to-be-accessed region is picked and assertion failure can be triggered. A high-level design covering all these components (in addition to instruction emulation) is needed for this. Thus this patch does not yet cover the issue. Tracked-On: #875 Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
2018-08-10 00:35:23 +08:00
void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
void dm_emulate_mmio_post(struct vcpu *vcpu);
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
void emulate_io_post(struct vcpu *vcpu);
/*
* @pre vcpu != NULL && io_req != NULL
*/
int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req);
#endif /* IOREQ_H */