clear-pkgs-linux-iot-lts2018/1193-acrn-vhm-Use-the-mempo...

893 lines
26 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Zhao Yakui <yakui.zhao@intel.com>
Date: Fri, 12 Apr 2019 14:54:52 +0800
Subject: [PATCH] acrn/vhm: Use the mempool to allocate the buffer for
hypercall parameter
Currently the buf for hypercall parameter resides on the kernel statck
and then the virt_to_phys is used to get the guest physical addr.
But it can't work when the VMAP_STACK is enabled as the virt_to_phys can't
return the correct physical address if the stack is allocated by using
vmalloc.
Use the kmalloc to allocate the required buffer for hypercall and then use
virt_to_phys to get the physical address. The physical address is used as
the parameter of copy_from/to_gpa in hypervisor.
To avoid the failure of kmalloc, the mempool is used.
v1-V2: Use the GFP_ATOMIC in inject_msi with IRQ disabled
Fix the incorrect pm_info state(From Victor).
Minor refinement related with the IC_GET_PLATFORM_INFO
Tracked-On: projectacrn/acrn-hypervisor#1318
Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
---
drivers/acrn/hv_npk_log.c | 49 ++++++-----
drivers/acrn/sbuf.c | 17 ++--
drivers/char/vhm/vhm_dev.c | 140 ++++++++++++++++++++++----------
drivers/vhm/Makefile | 3 +-
drivers/vhm/vhm_hugetlb.c | 33 ++++----
drivers/vhm/vhm_ioreq.c | 9 +-
drivers/vhm/vhm_mempool.c | 53 ++++++++++++
drivers/vhm/vhm_mm.c | 61 ++++++++------
drivers/vhm/vhm_vm_mngt.c | 30 +++++--
include/linux/vhm/acrn_vhm_mm.h | 5 ++
10 files changed, 275 insertions(+), 125 deletions(-)
create mode 100644 drivers/vhm/vhm_mempool.c
diff --git a/drivers/acrn/hv_npk_log.c b/drivers/acrn/hv_npk_log.c
index 2303b9a72a3a..adb342883674 100644
--- a/drivers/acrn/hv_npk_log.c
+++ b/drivers/acrn/hv_npk_log.c
@@ -91,6 +91,7 @@
#include <linux/pci.h>
#include <linux/vhm/acrn_hv_defs.h>
#include <linux/vhm/vhm_hypercall.h>
+#include <linux/vhm/acrn_vhm_mm.h>
#include "hv_npk_log.h"
#define HV_NPK_LOG_USAGE \
@@ -254,7 +255,7 @@ static int hv_npk_log_conf_set(const char *val, const struct kernel_param *kp)
{
char **argv;
int i, argc, ret = -EINVAL;
- struct hv_npk_log_param cmd;
+ struct hv_npk_log_param *cmd = NULL;
unsigned int args[HV_NPK_LOG_MAX_PARAM];
if (!hnl_conf && load_npk_conf() < 0)
@@ -270,47 +271,49 @@ static int hv_npk_log_conf_set(const char *val, const struct kernel_param *kp)
if (kstrtouint(argv[i], 10, &args[i]) < 0)
goto out;
- memset(&cmd, 0, sizeof(struct hv_npk_log_param));
- cmd.loglevel = 0xffffU;
- cmd.cmd = HV_NPK_LOG_CMD_INVALID;
+ cmd = acrn_mempool_alloc(GFP_KERNEL);
+ memset(cmd, 0, sizeof(struct hv_npk_log_param));
+ cmd->loglevel = 0xffffU;
+ cmd->cmd = HV_NPK_LOG_CMD_INVALID;
switch (tolower(argv[0][0])) {
case 'e': /* enable */
case 'c': /* configure */
if (!strncasecmp(argv[0], "enable", strlen(argv[0]))) {
- cmd.cmd = HV_NPK_LOG_CMD_ENABLE;
+ cmd->cmd = HV_NPK_LOG_CMD_ENABLE;
} else if (!strncasecmp(argv[0], "configure", strlen(argv[0]))
&& argc != 1) {
- cmd.cmd = HV_NPK_LOG_CMD_CONF;
+ cmd->cmd = HV_NPK_LOG_CMD_CONF;
} else
break;
if (argc <= 2) {
- cmd.loglevel = argc == 2 ? args[1] : 0xffffU;
+ cmd->loglevel = argc == 2 ? args[1] : 0xffffU;
if (hnl_conf->master == HV_NPK_LOG_UNKNOWN)
- mc2addr(&cmd.mmio_addr, HV_NPK_LOG_DFT_MASTER,
+ mc2addr(&cmd->mmio_addr, HV_NPK_LOG_DFT_MASTER,
HV_NPK_LOG_DFT_CHANNEL);
- } else if (argc > 2 && !mc2addr(&cmd.mmio_addr,
+ } else if (argc > 2 && !mc2addr(&cmd->mmio_addr,
args[1], args[2])) {
- cmd.loglevel = argc == 4 ? args[3] : 0xffffU;
+ cmd->loglevel = argc == 4 ? args[3] : 0xffffU;
}
break;
case 'd': /* disable */
if (!strncasecmp(argv[0], "disable", strlen(argv[0]))
&& argc == 1)
- cmd.cmd = HV_NPK_LOG_CMD_DISABLE;
+ cmd->cmd = HV_NPK_LOG_CMD_DISABLE;
break;
default:
pr_err("Unsupported command : %s\n", argv[0]);
break;
}
- if (cmd.cmd != HV_NPK_LOG_CMD_INVALID) {
- ret = hcall_setup_hv_npk_log(virt_to_phys(&cmd));
- ret = (ret < 0 || cmd.res == HV_NPK_LOG_RES_KO) ? -EINVAL : 0;
+ if (cmd->cmd != HV_NPK_LOG_CMD_INVALID) {
+ ret = hcall_setup_hv_npk_log(virt_to_phys(cmd));
+ ret = (ret < 0 || cmd->res == HV_NPK_LOG_RES_KO) ? -EINVAL : 0;
}
out:
argv_free(argv);
+ acrn_mempool_free(cmd);
if (ret < 0)
pr_err("Unsupported configuration : %s\n", val);
return ret;
@@ -320,27 +323,29 @@ static int hv_npk_log_conf_set(const char *val, const struct kernel_param *kp)
static int hv_npk_log_conf_get(char *buffer, const struct kernel_param *kp)
{
long ret;
- struct hv_npk_log_param query;
+ struct hv_npk_log_param *query;
if (!hnl_conf && load_npk_conf() < 0)
return sprintf(buffer, "%s\n",
"Failed to init the configuration.");
- memset(&query, 0, sizeof(struct hv_npk_log_param));
- query.cmd = HV_NPK_LOG_CMD_QUERY;
- ret = hcall_setup_hv_npk_log(virt_to_phys(&query));
- if (ret < 0 || query.res == HV_NPK_LOG_RES_KO)
+ query = acrn_mempool_alloc(GFP_KERNEL);
+ memset(query, 0, sizeof(struct hv_npk_log_param));
+ query->cmd = HV_NPK_LOG_CMD_QUERY;
+ ret = hcall_setup_hv_npk_log(virt_to_phys(query));
+ if (ret < 0 || query->res == HV_NPK_LOG_RES_KO)
return sprintf(buffer, "%s\n", "Failed to invoke the hcall.");
- if (!addr2mc(query.mmio_addr, &hnl_conf->master, &hnl_conf->channel)) {
- hnl_conf->status = query.res == HV_NPK_LOG_RES_ENABLED ?
+ if (!addr2mc(query->mmio_addr, &hnl_conf->master, &hnl_conf->channel)) {
+ hnl_conf->status = query->res == HV_NPK_LOG_RES_ENABLED ?
HV_NPK_LOG_ENABLED : HV_NPK_LOG_DISABLED;
} else {
hnl_conf->status = HV_NPK_LOG_UNKNOWN;
hnl_conf->master = HV_NPK_LOG_UNKNOWN;
hnl_conf->channel = HV_NPK_LOG_UNKNOWN;
}
- hnl_conf->loglevel = query.loglevel;
+ hnl_conf->loglevel = query->loglevel;
+ acrn_mempool_free(query);
return scnprintf(buffer, PAGE_SIZE, "Master(SW:%d~%d FW:%d~%d):%d "
"Channel(0~%d):%d Status:%d Log Level: %d\n%s\n",
diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c
index 018e1f4df72b..4b6754885201 100644
--- a/drivers/acrn/sbuf.c
+++ b/drivers/acrn/sbuf.c
@@ -60,6 +60,7 @@
#include <asm/hypervisor.h>
#include <linux/vhm/acrn_hv_defs.h>
#include <linux/vhm/vhm_hypercall.h>
+#include <linux/vhm/acrn_vhm_mm.h>
#include "sbuf.h"
static inline bool sbuf_is_empty(shared_buf_t *sbuf)
@@ -169,18 +170,22 @@ EXPORT_SYMBOL(sbuf_get);
int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, uint64_t gpa)
{
- struct sbuf_setup_param ssp;
+ struct sbuf_setup_param *ssp;
+ int ret;
if (x86_hyper_type != X86_HYPER_ACRN)
return -ENODEV;
- ssp.pcpu_id = pcpu_id;
- ssp.sbuf_id = sbuf_id;
+ ssp = acrn_mempool_alloc(GFP_KERNEL);
+ ssp->pcpu_id = pcpu_id;
+ ssp->sbuf_id = sbuf_id;
- ssp.gpa = gpa;
- pr_info("setup phys add = 0x%llx\n", ssp.gpa);
+ ssp->gpa = gpa;
+ pr_info("setup phys add = 0x%llx\n", gpa);
- return hcall_setup_sbuf(virt_to_phys(&ssp));
+ ret = hcall_setup_sbuf(virt_to_phys(ssp));
+ acrn_mempool_free(ssp);
+ return ret;
}
EXPORT_SYMBOL(sbuf_share_setup);
diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c
index 2c6437439d1c..768d510e0909 100644
--- a/drivers/char/vhm/vhm_dev.c
+++ b/drivers/char/vhm/vhm_dev.c
@@ -160,7 +160,6 @@ static long vhm_dev_ioctl(struct file *filep,
long ret = 0;
struct vhm_vm *vm;
struct ic_ptdev_irq ic_pt_irq;
- struct hc_ptdev_irq hc_pt_irq;
pr_debug("[%s] ioctl_num=0x%x\n", __func__, ioctl_num);
@@ -176,20 +175,25 @@ static long vhm_dev_ioctl(struct file *filep,
return 0;
} else if (ioctl_num == IC_GET_PLATFORM_INFO) {
- struct hc_platform_info platform_info;
+ struct hc_platform_info *platform_info;
- ret = hcall_get_platform_info(virt_to_phys(&platform_info));
- if (ret < 0)
+ platform_info = acrn_mempool_alloc(GFP_KERNEL);
+ ret = hcall_get_platform_info(virt_to_phys(platform_info));
+ if (ret < 0) {
+ acrn_mempool_free(platform_info);
return -EFAULT;
+ }
if (copy_to_user((void *)ioctl_param,
- &platform_info, sizeof(platform_info)))
+ platform_info, sizeof(*platform_info))) {
+ acrn_mempool_free(platform_info);
return -EFAULT;
+ }
+ acrn_mempool_free(platform_info);
return 0;
}
- memset(&hc_pt_irq, 0, sizeof(hc_pt_irq));
memset(&ic_pt_irq, 0, sizeof(ic_pt_irq));
vm = (struct vhm_vm *)filep->private_data;
if (vm == NULL) {
@@ -203,40 +207,47 @@ static long vhm_dev_ioctl(struct file *filep,
switch (ioctl_num) {
case IC_CREATE_VM: {
- struct acrn_create_vm created_vm;
+ struct acrn_create_vm *created_vm;
+
+ created_vm = acrn_mempool_alloc(GFP_KERNEL);
- if (copy_from_user(&created_vm, (void *)ioctl_param,
- sizeof(struct acrn_create_vm)))
+ if (copy_from_user(created_vm, (void *)ioctl_param,
+ sizeof(struct acrn_create_vm))) {
+ acrn_mempool_free(created_vm);
return -EFAULT;
+ }
- ret = hcall_create_vm(virt_to_phys(&created_vm));
+ ret = hcall_create_vm(virt_to_phys(created_vm));
if ((ret < 0) ||
- (created_vm.vmid == ACRN_INVALID_VMID)) {
+ (created_vm->vmid == ACRN_INVALID_VMID)) {
pr_err("vhm: failed to create VM from Hypervisor !\n");
+ acrn_mempool_free(created_vm);
return -EFAULT;
}
- if (copy_to_user((void *)ioctl_param, &created_vm,
+ if (copy_to_user((void *)ioctl_param, created_vm,
sizeof(struct acrn_create_vm))) {
ret = -EFAULT;
goto create_vm_fail;
}
- vm->vmid = created_vm.vmid;
+ vm->vmid = created_vm->vmid;
- if (created_vm.req_buf) {
- ret = acrn_ioreq_init(vm, created_vm.req_buf);
+ if (created_vm->req_buf) {
+ ret = acrn_ioreq_init(vm, created_vm->req_buf);
if (ret < 0)
goto create_vm_fail;
}
acrn_ioeventfd_init(vm->vmid);
acrn_irqfd_init(vm->vmid);
+ acrn_mempool_free(created_vm);
- pr_info("vhm: VM %d created\n", created_vm.vmid);
+ pr_info("vhm: VM %ld created\n", vm->vmid);
break;
create_vm_fail:
- hcall_destroy_vm(created_vm.vmid);
+ hcall_destroy_vm(created_vm->vmid);
+ acrn_mempool_free(created_vm);
vm->vmid = ACRN_INVALID_VMID;
break;
@@ -283,31 +294,40 @@ static long vhm_dev_ioctl(struct file *filep,
}
case IC_CREATE_VCPU: {
- struct acrn_create_vcpu cv;
+ struct acrn_create_vcpu *cv;
- if (copy_from_user(&cv, (void *)ioctl_param,
- sizeof(struct acrn_create_vcpu)))
+ cv = acrn_mempool_alloc(GFP_KERNEL);
+ if (copy_from_user(cv, (void *)ioctl_param,
+ sizeof(struct acrn_create_vcpu))) {
+ acrn_mempool_free(cv);
return -EFAULT;
+ }
ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid,
- virt_to_phys(&cv));
+ virt_to_phys(cv));
if (ret < 0) {
- pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id);
+ pr_err("vhm: failed to create vcpu %d!\n", cv->vcpu_id);
+ acrn_mempool_free(cv);
return -EFAULT;
}
atomic_inc(&vm->vcpu_num);
+ acrn_mempool_free(cv);
return ret;
}
case IC_SET_VCPU_REGS: {
- struct acrn_set_vcpu_regs asvr;
+ struct acrn_set_vcpu_regs *asvr;
- if (copy_from_user(&asvr, (void *)ioctl_param, sizeof(asvr)))
+ asvr = acrn_mempool_alloc(GFP_KERNEL);
+ if (copy_from_user(asvr, (void *)ioctl_param, sizeof(*asvr))) {
+ acrn_mempool_free(asvr);
return -EFAULT;
+ }
ret = acrn_hypercall2(HC_SET_VCPU_REGS, vm->vmid,
- virt_to_phys(&asvr));
+ virt_to_phys(asvr));
+ acrn_mempool_free(asvr);
if (ret < 0) {
pr_err("vhm: failed to set bsp state of vm %ld!\n",
vm->vmid);
@@ -412,12 +432,17 @@ static long vhm_dev_ioctl(struct file *filep,
}
case IC_INJECT_MSI: {
- struct acrn_msi_entry msi;
+ struct acrn_msi_entry *msi;
- if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi)))
+ msi = acrn_mempool_alloc(GFP_KERNEL);
+
+ if (copy_from_user(msi, (void *)ioctl_param, sizeof(*msi))) {
+ acrn_mempool_free(msi);
return -EFAULT;
+ }
- ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi));
+ ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
+ acrn_mempool_free(msi);
if (ret < 0) {
pr_err("vhm: failed to inject!\n");
return -EFAULT;
@@ -495,19 +520,24 @@ static long vhm_dev_ioctl(struct file *filep,
case IC_SET_PTDEV_INTR_INFO: {
struct table_iomems *new;
+ struct hc_ptdev_irq *hc_pt_irq;
if (copy_from_user(&ic_pt_irq,
(void *)ioctl_param, sizeof(ic_pt_irq)))
return -EFAULT;
- memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq));
+ hc_pt_irq = acrn_mempool_alloc(GFP_KERNEL);
+
+ memcpy(hc_pt_irq, &ic_pt_irq, sizeof(*hc_pt_irq));
ret = hcall_set_ptdev_intr_info(vm->vmid,
- virt_to_phys(&hc_pt_irq));
+ virt_to_phys(hc_pt_irq));
if (ret < 0) {
pr_err("vhm: failed to set intr info for ptdev!\n");
+ acrn_mempool_free(hc_pt_irq);
return -EFAULT;
}
+ acrn_mempool_free(hc_pt_irq);
if ((ic_pt_irq.type == IRQ_MSIX) &&
ic_pt_irq.msix.table_paddr) {
@@ -528,21 +558,25 @@ static long vhm_dev_ioctl(struct file *filep,
}
case IC_RESET_PTDEV_INTR_INFO: {
struct table_iomems *ptr;
+ struct hc_ptdev_irq *hc_pt_irq;
int dev_found = 0;
if (copy_from_user(&ic_pt_irq,
(void *)ioctl_param, sizeof(ic_pt_irq)))
return -EFAULT;
- memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq));
+ hc_pt_irq = acrn_mempool_alloc(GFP_KERNEL);
+ memcpy(hc_pt_irq, &ic_pt_irq, sizeof(*hc_pt_irq));
ret = hcall_reset_ptdev_intr_info(vm->vmid,
- virt_to_phys(&hc_pt_irq));
+ virt_to_phys(hc_pt_irq));
if (ret < 0) {
pr_err("vhm: failed to reset intr info for ptdev!\n");
+ acrn_mempool_free(hc_pt_irq);
return -EFAULT;
}
+ acrn_mempool_free(hc_pt_irq);
if (ic_pt_irq.type == IRQ_MSIX) {
mutex_lock(&table_iomems_lock);
list_for_each_entry(ptr, &table_iomems_list, list) {
@@ -619,40 +653,52 @@ static long vhm_dev_ioctl(struct file *filep,
switch (cmd & PMCMD_TYPE_MASK) {
case PMCMD_GET_PX_CNT:
case PMCMD_GET_CX_CNT: {
- uint64_t pm_info;
+ uint64_t *pm_info;
- ret = hcall_get_cpu_state(cmd, virt_to_phys(&pm_info));
- if (ret < 0)
+ pm_info = acrn_mempool_alloc(GFP_KERNEL);
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
+ if (ret < 0) {
+ acrn_mempool_free(pm_info);
return -EFAULT;
+ }
if (copy_to_user((void *)ioctl_param,
- &pm_info, sizeof(pm_info)))
+ pm_info, sizeof(pm_info)))
ret = -EFAULT;
-
+ acrn_mempool_free(pm_info);
break;
}
case PMCMD_GET_PX_DATA: {
- struct cpu_px_data px_data;
+ struct cpu_px_data *px_data;
- ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data));
- if (ret < 0)
+ px_data = acrn_mempool_alloc(GFP_KERNEL);
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
+ if (ret < 0) {
+ acrn_mempool_free(px_data);
return -EFAULT;
+ }
if (copy_to_user((void *)ioctl_param,
- &px_data, sizeof(px_data)))
+ px_data, sizeof(*px_data)))
ret = -EFAULT;
+ acrn_mempool_free(px_data);
break;
}
case PMCMD_GET_CX_DATA: {
- struct cpu_cx_data cx_data;
+ struct cpu_cx_data *cx_data;
- ret = hcall_get_cpu_state(cmd, virt_to_phys(&cx_data));
- if (ret < 0)
+ cx_data = acrn_mempool_alloc(GFP_KERNEL);
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
+ if (ret < 0) {
+ acrn_mempool_free(cx_data);
return -EFAULT;
+ }
if (copy_to_user((void *)ioctl_param,
- &cx_data, sizeof(cx_data)))
+ cx_data, sizeof(*cx_data)))
ret = -EFAULT;
+ acrn_mempool_free(cx_data);
break;
}
default:
@@ -861,6 +907,8 @@ static int __init vhm_init(void)
}
acrn_ioreq_driver_init();
+ /* initialize memory pool with 16 elements and 512 bytes element size */
+ acrn_mempool_init(16, 512);
pr_info("vhm: Virtio & Hypervisor service module initialized\n");
return 0;
}
@@ -873,6 +921,8 @@ static void __exit vhm_exit(void)
class_destroy(vhm_class);
unregister_chrdev(major, DEVICE_NAME);
sysfs_remove_group(&vhm_device->kobj, &vhm_attr_group);
+
+ acrn_mempool_deinit();
pr_info("vhm: exit\n");
}
diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile
index a1520388766a..229811310d33 100644
--- a/drivers/vhm/Makefile
+++ b/drivers/vhm/Makefile
@@ -1,2 +1,3 @@
subdir-ccflags-$(CONFIG_ACRN_VHM) := -Werror
-obj-y += vhm_mm.o vhm_hugetlb.o vhm_ioreq.o vhm_vm_mngt.o vhm_hypercall.o vhm_ioeventfd.o vhm_irqfd.o
+obj-y += vhm_mm.o vhm_hugetlb.o vhm_ioreq.o vhm_vm_mngt.o vhm_hypercall.o vhm_ioeventfd.o vhm_irqfd.o \
+ vhm_mempool.o
diff --git a/drivers/vhm/vhm_hugetlb.c b/drivers/vhm/vhm_hugetlb.c
index 72bbef13f062..f3c035bb1028 100644
--- a/drivers/vhm/vhm_hugetlb.c
+++ b/drivers/vhm/vhm_hugetlb.c
@@ -123,13 +123,14 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap)
struct page *page = NULL, *regions_buf_pg = NULL;
unsigned long len, guest_gpa, vma;
struct vm_memory_region *region_array;
- struct set_regions regions;
+ struct set_regions *regions = NULL;
int max_size = PAGE_SIZE/sizeof(struct vm_memory_region);
int ret;
if (vm == NULL || memmap == NULL)
return -EINVAL;
+ regions = acrn_mempool_alloc(GFP_KERNEL);
len = memmap->len;
vma = memmap->vma_base;
guest_gpa = memmap->gpa;
@@ -138,9 +139,9 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap)
regions_buf_pg = alloc_page(GFP_KERNEL);
if (regions_buf_pg == NULL)
return -ENOMEM;
- regions.mr_num = 0;
- regions.vmid = vm->vmid;
- regions.regions_gpa = page_to_phys(regions_buf_pg);
+ regions->mr_num = 0;
+ regions->vmid = vm->vmid;
+ regions->regions_gpa = page_to_phys(regions_buf_pg);
region_array = page_to_virt(regions_buf_pg);
while (len > 0) {
@@ -163,22 +164,22 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap)
}
/* fill each memory region into region_array */
- region_array[regions.mr_num].type = MR_ADD;
- region_array[regions.mr_num].gpa = guest_gpa;
- region_array[regions.mr_num].vm0_gpa = vm0_gpa;
- region_array[regions.mr_num].size = pagesize;
- region_array[regions.mr_num].prot =
+ region_array[regions->mr_num].type = MR_ADD;
+ region_array[regions->mr_num].gpa = guest_gpa;
+ region_array[regions->mr_num].vm0_gpa = vm0_gpa;
+ region_array[regions->mr_num].size = pagesize;
+ region_array[regions->mr_num].prot =
(MEM_TYPE_WB & MEM_TYPE_MASK) |
(memmap->prot & MEM_ACCESS_RIGHT_MASK);
- regions.mr_num++;
- if (regions.mr_num == max_size) {
+ regions->mr_num++;
+ if (regions->mr_num == max_size) {
pr_info("region buffer full, set & renew regions!\n");
- ret = set_memory_regions(&regions);
+ ret = set_memory_regions(regions);
if (ret < 0) {
pr_err("failed to set regions,ret=%d!\n", ret);
goto err;
}
- regions.mr_num = 0;
+ regions->mr_num = 0;
}
len -= pagesize;
@@ -186,20 +187,22 @@ int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap)
guest_gpa += pagesize;
}
- ret = set_memory_regions(&regions);
+ ret = set_memory_regions(regions);
if (ret < 0) {
pr_err("failed to set regions, ret=%d!\n", ret);
goto err;
}
__free_page(regions_buf_pg);
-
+ acrn_mempool_free(regions);
return 0;
err:
if (regions_buf_pg)
__free_page(regions_buf_pg);
if (page)
put_page(page);
+ acrn_mempool_free(regions);
+
return ret;
}
diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c
index 9deb720062c8..454ca9d1edcd 100644
--- a/drivers/vhm/vhm_ioreq.c
+++ b/drivers/vhm/vhm_ioreq.c
@@ -65,6 +65,7 @@
#include <linux/vhm/acrn_vhm_ioreq.h>
#include <linux/vhm/vhm_vm_mngt.h>
#include <linux/vhm/vhm_hypercall.h>
+#include <linux/vhm/acrn_vhm_mm.h>
#include <linux/idr.h>
static DEFINE_SPINLOCK(client_lock);
@@ -1011,7 +1012,7 @@ unsigned int vhm_dev_poll(struct file *filep, poll_table *wait)
int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma)
{
- struct acrn_set_ioreq_buffer set_buffer;
+ struct acrn_set_ioreq_buffer *set_buffer;
struct page *page;
int ret;
@@ -1024,12 +1025,14 @@ int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma)
return -ENOMEM;
}
+ set_buffer = acrn_mempool_alloc(GFP_KERNEL);
vm->req_buf = page_address(page);
vm->pg = page;
- set_buffer.req_buf = page_to_phys(page);
+ set_buffer->req_buf = page_to_phys(page);
- ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(&set_buffer));
+ ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
+ acrn_mempool_free(set_buffer);
if (ret < 0) {
pr_err("vhm-ioreq: failed to set request buffer !\n");
return -EFAULT;
diff --git a/drivers/vhm/vhm_mempool.c b/drivers/vhm/vhm_mempool.c
new file mode 100644
index 000000000000..f6101c682610
--- /dev/null
+++ b/drivers/vhm/vhm_mempool.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-Clause
+/*
+ * virtio and hyperviosr service module (VHM): VHM mempool
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mempool.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
+#include <linux/vhm/acrn_vhm_mm.h>
+
+static mempool_t *acrn_mempool;
+
+int acrn_mempool_init(int min_nr, int buf_size)
+{
+ if (acrn_mempool)
+ return 0;
+
+ acrn_mempool = mempool_create_kmalloc_pool(min_nr, buf_size);
+
+ if (acrn_mempool == 0) {
+ pr_err("Failed to initialize the memory poll\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void acrn_mempool_deinit(void)
+{
+ mempool_destroy(acrn_mempool);
+ acrn_mempool = NULL;
+}
+
+void *acrn_mempool_alloc(gfp_t gfp_flag)
+{
+ if (acrn_mempool == NULL)
+ return NULL;
+
+ return mempool_alloc(acrn_mempool, gfp_flag);
+}
+EXPORT_SYMBOL_GPL(acrn_mempool_alloc);
+
+void acrn_mempool_free(void *buf_ptr)
+{
+ if ((buf_ptr == NULL) || (acrn_mempool == NULL))
+ return;
+
+ mempool_free(buf_ptr, acrn_mempool);
+}
+EXPORT_SYMBOL_GPL(acrn_mempool_free);
diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c
index 494461b05490..ebca471548d7 100644
--- a/drivers/vhm/vhm_mm.c
+++ b/drivers/vhm/vhm_mm.c
@@ -65,17 +65,19 @@
static int set_memory_region(unsigned long vmid,
struct vm_memory_region *region)
{
- struct set_regions regions;
+ struct set_regions *regions;
- regions.vmid = vmid;
- regions.mr_num = 1;
- regions.regions_gpa = virt_to_phys(region);
+ regions = acrn_mempool_alloc(GFP_KERNEL);
+ regions->vmid = vmid;
+ regions->mr_num = 1;
+ regions->regions_gpa = virt_to_phys(region);
- if (set_memory_regions(&regions) < 0) {
+ if (set_memory_regions(regions) < 0) {
+ acrn_mempool_free(regions);
pr_err("vhm: failed to set memory region for vm[%ld]!\n", vmid);
return -EFAULT;
}
-
+ acrn_mempool_free(regions);
return 0;
}
@@ -83,30 +85,38 @@ int add_memory_region(unsigned long vmid, unsigned long gpa,
unsigned long host_gpa, unsigned long size,
unsigned int mem_type, unsigned mem_access_right)
{
- struct vm_memory_region region;
+ struct vm_memory_region *region;
+ int ret;
- region.type = MR_ADD;
- region.gpa = gpa;
- region.vm0_gpa = host_gpa;
- region.size = size;
- region.prot = ((mem_type & MEM_TYPE_MASK) |
+ region = acrn_mempool_alloc(GFP_KERNEL);
+ region->type = MR_ADD;
+ region->gpa = gpa;
+ region->vm0_gpa = host_gpa;
+ region->size = size;
+ region->prot = ((mem_type & MEM_TYPE_MASK) |
(mem_access_right & MEM_ACCESS_RIGHT_MASK));
- return set_memory_region(vmid, &region);
+ ret = set_memory_region(vmid, region);
+ acrn_mempool_free(region);
+ return ret;
}
EXPORT_SYMBOL_GPL(add_memory_region);
int del_memory_region(unsigned long vmid, unsigned long gpa,
unsigned long size)
{
- struct vm_memory_region region;
+ struct vm_memory_region *region;
+ int ret;
- region.type = MR_DEL;
- region.gpa = gpa;
- region.vm0_gpa = 0;
- region.size = size;
- region.prot = 0;
+ region = acrn_mempool_alloc(GFP_KERNEL);
+ region->type = MR_DEL;
+ region->gpa = gpa;
+ region->vm0_gpa = 0;
+ region->size = size;
+ region->prot = 0;
- return set_memory_region(vmid, &region);
+ ret = set_memory_region(vmid, region);
+ acrn_mempool_free(region);
+ return ret;
}
EXPORT_SYMBOL_GPL(del_memory_region);
@@ -131,18 +141,21 @@ int set_memory_regions(struct set_regions *regions)
int write_protect_page(unsigned long vmid,
unsigned long gpa, unsigned char set)
{
- struct wp_data wp;
+ struct wp_data *wp;
- wp.set = set;
- wp.gpa = gpa;
+ wp = acrn_mempool_alloc(GFP_KERNEL);
+ wp->set = set;
+ wp->gpa = gpa;
if (hcall_write_protect_page(vmid,
- virt_to_phys(&wp)) < 0) {
+ virt_to_phys(wp)) < 0) {
+ acrn_mempool_free(wp);
pr_err("vhm: vm[%ld] %s failed !\n", vmid, __func__);
return -EFAULT;
}
pr_debug("VHM: %s, gpa: 0x%lx, set: %d\n", __func__, gpa, set);
+ acrn_mempool_free(wp);
return 0;
}
diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c
index 61207106d5de..f75692719924 100644
--- a/drivers/vhm/vhm_vm_mngt.c
+++ b/drivers/vhm/vhm_vm_mngt.c
@@ -127,14 +127,21 @@ EXPORT_SYMBOL_GPL(vhm_get_vm_info);
int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr,
unsigned long msi_data)
{
- struct acrn_msi_entry msi;
+ struct acrn_msi_entry *msi;
int ret;
+ /* vhm_inject_msi is called in vhm_irqfd_inject from eventfd_signal
+ * and the interrupt is disabled.
+ * So the GFP_ATOMIC should be used instead of GFP_KERNEL to
+ * avoid the sleeping with interrupt disabled.
+ */
+ msi = acrn_mempool_alloc(GFP_ATOMIC);
/* msi_addr: addr[19:12] with dest vcpu id */
/* msi_data: data[7:0] with vector */
- msi.msi_addr = msi_addr;
- msi.msi_data = msi_data;
- ret = hcall_inject_msi(vmid, virt_to_phys(&msi));
+ msi->msi_addr = msi_addr;
+ msi->msi_data = msi_data;
+ ret = hcall_inject_msi(vmid, virt_to_phys(msi));
+ acrn_mempool_free(msi);
if (ret < 0) {
pr_err("vhm: failed to inject!\n");
return -EFAULT;
@@ -145,18 +152,23 @@ EXPORT_SYMBOL_GPL(vhm_inject_msi);
unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa)
{
- struct vm_gpa2hpa gpa2hpa;
+ struct vm_gpa2hpa *gpa2hpa;
int ret;
+ unsigned long hpa;
- gpa2hpa.gpa = gpa;
- gpa2hpa.hpa = -1UL; /* Init value as invalid gpa */
- ret = hcall_vm_gpa2hpa(vmid, virt_to_phys(&gpa2hpa));
+ gpa2hpa = acrn_mempool_alloc(GFP_KERNEL);
+ gpa2hpa->gpa = gpa;
+ gpa2hpa->hpa = -1UL; /* Init value as invalid gpa */
+ ret = hcall_vm_gpa2hpa(vmid, virt_to_phys(gpa2hpa));
if (ret < 0) {
pr_err("vhm: failed to inject!\n");
+ acrn_mempool_free(gpa2hpa);
return -EFAULT;
}
+ hpa = gpa2hpa->hpa;
mb();
- return gpa2hpa.hpa;
+ acrn_mempool_free(gpa2hpa);
+ return hpa;
}
EXPORT_SYMBOL_GPL(vhm_vm_gpa2hpa);
diff --git a/include/linux/vhm/acrn_vhm_mm.h b/include/linux/vhm/acrn_vhm_mm.h
index 49c429921e05..c5635ed88b3e 100644
--- a/include/linux/vhm/acrn_vhm_mm.h
+++ b/include/linux/vhm/acrn_vhm_mm.h
@@ -193,4 +193,9 @@ int hugepage_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys);
* Return: 0 on success, <0 for error.
*/
int set_memory_regions(struct set_regions *regions);
+
+int acrn_mempool_init(int pool_size, int buf_size);
+void acrn_mempool_deinit(void);
+void *acrn_mempool_alloc(gfp_t gfp_flag);
+void acrn_mempool_free(void *buf);
#endif
--
https://clearlinux.org