dm: vm_event: init vm_event

This patch adds vm_event sbuf and notification initialization.
We have 2 types of event source: DM and HV, and they are slightly
different:
 - Sbuf for DM event source is a memery page shared between threads.
   Event notifications are delivered by userspace eventfd.
 - While for hv event source,  sbuf is a memery page shared with HV. Its
   address(GPA) is shared to HV through hypercall. Its notifications
   are generated by HV upcall, then delivered by kernel/userspace eventfd.

A sbuf message path acts like a one way ‘tunnel’, so a data structure
‘vm_event_tunnel’ is created to organize those sbufs.

Tracked-On: #8547
Signed-off-by: Wu Zhou <wu.zhou@intel.com>
Reviewed-by: Jian Jun Chen <jian.jun.chen@intel.com>
This commit is contained in:
Wu Zhou 2023-08-04 15:38:27 +08:00 committed by acrnsi-robot
parent b23145b677
commit e83ae3e664
5 changed files with 203 additions and 0 deletions

View File

@ -188,6 +188,7 @@ SRCS += core/cmd_monitor/command.c
SRCS += core/cmd_monitor/command_handler.c
SRCS += core/cmd_monitor/cmd_monitor.c
SRCS += core/sbuf.c
SRCS += core/vm_event.c
# arch
SRCS += arch/x86/pm.c

View File

@ -71,6 +71,7 @@
#include "cmd_monitor.h"
#include "vdisplay.h"
#include "iothread.h"
#include "vm_event.h"
#define VM_MAXCPU 16 /* maximum virtual cpus */
@ -1144,6 +1145,12 @@ main(int argc, char *argv[])
goto dev_fail;
}
pr_notice("vm setup vm event\n");
error = vm_event_init(ctx);
if (error) {
pr_warn("VM_EVENT is not supported by kernel or hyperviosr!\n");
}
/*
* build the guest tables, MP etc.
*/
@ -1198,6 +1205,7 @@ main(int argc, char *argv[])
break;
}
vm_event_deinit();
vm_deinit_vdevs(ctx);
mevent_deinit();
iothread_deinit();
@ -1210,6 +1218,8 @@ main(int argc, char *argv[])
}
vm_fail:
vm_event_deinit();
vm_deinit_vdevs(ctx);
if (ssram)
clean_vssram_configs();

170
devicemodel/core/vm_event.c Normal file
View File

@ -0,0 +1,170 @@
/*
* Copyright (C) 2018-2023 Intel Corporation.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/epoll.h>
#include <sys/queue.h>
#include <pthread.h>
#include <sys/ioctl.h>
#include <sys/eventfd.h>
#include <acrn_common.h>
#include "vm_event.h"
#include "hsm_ioctl_defs.h"
#include "sbuf.h"
#include "log.h"
#define VM_EVENT_ELE_SIZE (sizeof(struct vm_event))
#define HV_VM_EVENT_TUNNEL 0
#define DM_VM_EVENT_TUNNEL 1
#define MAX_VM_EVENT_TUNNELS 2
#define MAX_EPOLL_EVENTS MAX_VM_EVENT_TUNNELS
typedef void (*vm_event_handler)(struct vmctx *ctx, struct vm_event *event);
static int epoll_fd;
static bool started = false;
static char hv_vm_event_page[4096] __aligned(4096);
static char dm_vm_event_page[4096] __aligned(4096);
enum event_source_type {
EVENT_SOURCE_TYPE_HV,
EVENT_SOURCE_TYPE_DM,
};
struct vm_event_tunnel {
enum event_source_type type;
struct shared_buf *sbuf;
uint32_t sbuf_size;
int kick_fd;
pthread_mutex_t mtx;
bool enabled;
};
static struct vm_event_tunnel ve_tunnel[MAX_VM_EVENT_TUNNELS] = {
{
.type = EVENT_SOURCE_TYPE_HV,
.sbuf = (struct shared_buf *)hv_vm_event_page,
.sbuf_size = 4096,
.enabled = false,
},
{
.type = EVENT_SOURCE_TYPE_DM,
.sbuf = (struct shared_buf *)dm_vm_event_page,
.sbuf_size = 4096,
.enabled = false,
},
};
static int create_event_tunnel(struct vmctx *ctx, struct vm_event_tunnel *tunnel, int epoll_fd)
{
struct epoll_event ev;
enum event_source_type type = tunnel->type;
struct shared_buf *sbuf = tunnel->sbuf;
int kick_fd = -1;
int error;
sbuf_init(sbuf, tunnel->sbuf_size, VM_EVENT_ELE_SIZE);
if (type == EVENT_SOURCE_TYPE_HV) {
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_RING, sbuf);
if (error) {
pr_err("%s: Setting vm_event ring failed %d\n", __func__, error);
goto out;
}
}
kick_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (kick_fd < 0) {
pr_err("%s: eventfd failed %d\n", __func__, errno);
goto out;
}
if (type == EVENT_SOURCE_TYPE_HV) {
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_FD, kick_fd);
if (error) {
pr_err("%s: Setting vm_event fd failed %d\n", __func__, error);
goto out;
}
}
ev.events = EPOLLIN;
ev.data.ptr = tunnel;
error = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, kick_fd, &ev);
if (error < 0) {
pr_err("%s: failed to add fd, error is %d\n", __func__, errno);
goto out;
}
tunnel->kick_fd = kick_fd;
pthread_mutex_init(&tunnel->mtx, NULL);
tunnel->enabled = true;
return 0;
out:
if (kick_fd >= 0) {
close(kick_fd);
}
return -1;
}
void destory_event_tunnel(struct vm_event_tunnel *tunnel)
{
if (tunnel->enabled) {
close(tunnel->kick_fd);
tunnel->enabled = false;
pthread_mutex_destroy(&tunnel->mtx);
}
}
int vm_event_init(struct vmctx *ctx)
{
int error;
epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
pr_err("%s: failed to create epoll %d\n", __func__, errno);
goto out;
}
error = create_event_tunnel(ctx, &ve_tunnel[HV_VM_EVENT_TUNNEL], epoll_fd);
if (error) {
goto out;
}
error = create_event_tunnel(ctx, &ve_tunnel[DM_VM_EVENT_TUNNEL], epoll_fd);
if (error) {
goto out;
}
started = true;
return 0;
out:
if (epoll_fd >= 0) {
close(epoll_fd);
}
destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
return -1;
}
int vm_event_deinit(void)
{
if (started) {
close(epoll_fd);
destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
started = false;
}
return 0;
}

View File

@ -140,6 +140,12 @@
#define ACRN_IOCTL_SETUP_ASYNCIO \
_IOW(ACRN_IOCTL_TYPE, 0x90, __u64)
/* VM EVENT */
#define ACRN_IOCTL_SETUP_VM_EVENT_RING \
_IOW(ACRN_IOCTL_TYPE, 0xa0, __u64)
#define ACRN_IOCTL_SETUP_VM_EVENT_FD \
_IOW(ACRN_IOCTL_TYPE, 0xa1, int)
#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U
#define ACRN_MEM_ACCESS_READ 0x00000001U
#define ACRN_MEM_ACCESS_WRITE 0x00000002U

View File

@ -0,0 +1,16 @@
/*
* Copyright (C) 2019-2023 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_EVENT_H
#define VM_EVENT_H
#include <types.h>
#include "vmmapi.h"
int vm_event_init(struct vmctx *ctx);
int vm_event_deinit(void);
#endif /* VM_EVENT_H */