clear-pkgs-linux-iot-lts2018/0757-vhm-support-polling-mo...

143 lines
4.2 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Shuo Liu <shuo.a.liu@intel.com>
Date: Tue, 30 Oct 2018 18:07:25 +0800
Subject: [PATCH] vhm: support polling mode of ioreq completion
hypervisor can poll ioreqs' completion in cpu idle. We needn't notify it
to hypervisor in such polling mode, otherwise there might be race between
notify and new coming ioreq.
VHM will work in polling mode if hypervisor has such capability,
otherwise it still work with notification mode.
Tracked-On: PKT-1592
Tracked-On: projectacrn/acrn-hypervisor#1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
Reviewed-by: Zhao, Yakui <yakui.zhao@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
---
drivers/vhm/vhm_ioreq.c | 52 ++++++++++++++++++++-------------
include/linux/vhm/acrn_common.h | 12 ++++++--
2 files changed, 41 insertions(+), 23 deletions(-)
diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c
index 9c50d94f0565..7e067ac4923d 100644
--- a/drivers/vhm/vhm_ioreq.c
+++ b/drivers/vhm/vhm_ioreq.c
@@ -659,6 +659,32 @@ static void acrn_ioreq_notify_client(struct ioreq_client *client)
wake_up_interruptible(&client->wq);
}
+static int ioreq_complete_request(unsigned long vmid, int vcpu,
+ struct vhm_request *vhm_req)
+{
+ bool polling_mode;
+
+ polling_mode = vhm_req->completion_polling;
+ smp_mb();
+ atomic_set(&vhm_req->processed, REQ_STATE_COMPLETE);
+ /*
+ * In polling mode, HV will poll ioreqs' completion.
+ * Once marked the ioreq as REQ_STATE_COMPLETE, hypervisor side
+ * can poll the result and continue the IO flow. Thus, we don't
+ * need to notify hypervisor by hypercall.
+ * Please note, we need get completion_polling before set the request
+ * as complete, or we will race with hypervisor.
+ */
+ if (!polling_mode) {
+ if (hcall_notify_req_finish(vmid, vcpu) < 0) {
+ pr_err("vhm-ioreq: notify request complete failed!\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
static bool req_in_range(struct ioreq_range *range, struct vhm_request *req)
{
bool ret = false;
@@ -718,6 +744,7 @@ static int cached_enable;
static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu)
{
int req_handled = 0;
+ int err = 0;
/*XXX: like DM, assume cfg address write is size 4 */
if (is_cfg_addr(req)) {
@@ -762,17 +789,10 @@ static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu)
}
}
- if (req_handled) {
- smp_mb();
- atomic_set(&req->processed, REQ_STATE_COMPLETE);
- if (hcall_notify_req_finish(vm->vmid, vcpu) < 0) {
- pr_err("vhm-ioreq: failed to "
- "notify request finished !\n");
- return -EFAULT;
- }
- }
+ if (req_handled)
+ err = ioreq_complete_request(vm->vmid, vcpu, req);
- return req_handled;
+ return err ? err: req_handled;
}
static bool bdf_match(struct ioreq_client *client)
@@ -880,7 +900,6 @@ int acrn_ioreq_complete_request(int client_id, uint64_t vcpu,
struct vhm_request *vhm_req)
{
struct ioreq_client *client;
- int ret;
if (client_id < 0 || client_id >= MAX_CLIENT) {
pr_err("vhm-ioreq: no client for id %d\n", client_id);
@@ -898,16 +917,7 @@ int acrn_ioreq_complete_request(int client_id, uint64_t vcpu,
vhm_req += vcpu;
}
- smp_mb();
- atomic_set(&vhm_req->processed, REQ_STATE_COMPLETE);
-
- ret = hcall_notify_req_finish(client->vmid, vcpu);
- if (ret < 0) {
- pr_err("vhm-ioreq: failed to notify request finished !\n");
- return -EFAULT;
- }
-
- return 0;
+ return ioreq_complete_request(client->vmid, vcpu, vhm_req);
}
EXPORT_SYMBOL_GPL(acrn_ioreq_complete_request);
diff --git a/include/linux/vhm/acrn_common.h b/include/linux/vhm/acrn_common.h
index d28ce51ff271..330af7110a42 100644
--- a/include/linux/vhm/acrn_common.h
+++ b/include/linux/vhm/acrn_common.h
@@ -184,9 +184,17 @@ struct vhm_request {
uint32_t type;
/**
- * @reserved0: Reserved fields. Byte offset: 4.
+ * @completion_polling: Hypervisor will poll completion if set.
+ *
+ * Byte offset: 4.
+ */
+ uint32_t completion_polling;
+
+
+ /**
+ * @reserved0: Reserved fields. Byte offset: 8.
*/
- uint32_t reserved0[15];
+ uint32_t reserved0[14];
/**
* @reqs: Details about this request.
--
https://clearlinux.org