clear-pkgs-linux-iot-lts2018/1235-Revert-drm-i915-gvt-re...

146 lines
5.1 KiB
Diff
Raw Permalink Normal View History

2020-10-28 12:31:19 +08:00
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Liu Xinyun <xinyun.liu@intel.com>
Date: Thu, 19 Sep 2019 00:31:20 +0800
Subject: [PATCH] Revert "drm/i915/gvt: removed save/store registers"
bring back this feature to improve stability for WaaG
This reverts commit 7b25f1946a757a5c67ed45a773ff1f98e0889a97.
Tracked-On: projectacrn/acrn-hypervisor#3830
Signed-off-by: Liu Xinyun <xinyun.liu@intel.com>
Reviewed-by: Zhao Yakui <yakui.zhao@intel.com>
---
drivers/gpu/drm/i915/gvt/sched_policy.c | 12 ++++++++
drivers/gpu/drm/i915/gvt/scheduler.c | 40 +++++++++++++++++--------
2 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 4fac40d26549..c32e7d5e8629 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -444,7 +444,9 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
+ int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!vgpu_data->active)
return;
@@ -463,5 +465,15 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
+ intel_runtime_pm_get(dev_priv);
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ if (scheduler->engine_owner[ring_id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 7ebbcd22c024..37207c12e196 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -249,7 +249,6 @@ static inline bool is_gvt_request(struct i915_request *req)
return i915_gem_context_force_single_submission(req->gem_context);
}
-/*
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -263,7 +262,6 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
reg = RING_ACTHD_UDW(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
}
-*/
static void active_hp_work(struct work_struct *work)
{
@@ -291,9 +289,21 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
+
+ if (!is_gvt_request(req)) {
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
+ if (action == INTEL_CONTEXT_SCHEDULE_IN &&
+ scheduler->engine_owner[ring_id]) {
+ /* Switch ring from vGPU to host. */
+ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+ NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
- if (!is_gvt_request(req))
return NOTIFY_OK;
+ }
workload = scheduler->current_workload[ring_id];
if (unlikely(!workload))
@@ -301,14 +311,27 @@ static int shadow_context_status_change(struct notifier_block *nb,
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
+ if (workload->vgpu != scheduler->engine_owner[ring_id]) {
+ /* Switch ring from host to vGPU or vGPU to vGPU. */
+ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+ workload->vgpu, ring_id);
+ scheduler->engine_owner[ring_id] = workload->vgpu;
+ } else
+ gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
+ ring_id, workload->vgpu->id);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
+
schedule_work(&gvt->active_hp_work);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
+ save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
- return NOTIFY_OK;
+ save_ring_hw_state(workload->vgpu, ring_id);
+ break;
default:
WARN_ON(1);
return NOTIFY_OK;
@@ -1031,7 +1054,6 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- long lret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -1077,13 +1099,7 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- lret = i915_request_wait(workload->req, 0,
- MAX_SCHEDULE_TIMEOUT);
-
- gvt_dbg_sched("i915_wait_request %p returns %ld\n",
- workload, lret);
- if (lret >= 0 && workload->status == -EINPROGRESS)
- workload->status = 0;
+ i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
/*
* increased guilty_count means that this request triggerred
--
https://clearlinux.org