clear-pkgs-linux-iot-lts2018/0452-drm-i915-gvt-ivi-lazy-...

166 lines
5.4 KiB
Diff

From 644733c826a3596a5193fd7dd65039db264dc717 Mon Sep 17 00:00:00 2001
From: Ping Gao <ping.a.gao@intel.com>
Date: Fri, 2 Jun 2017 09:03:19 +0800
Subject: [PATCH 452/550] drm/i915/gvt: ivi: lazy shadow context
It's a significant overhead to shadow guest context by copying all
the pages, for performance consideration this patch introduce context
lazy shadow, it copy the first page of the context only and let the
GGTT entries of other shadow context pages point to the corresponding
pages of the guest context.
Change-Id: I6b806da29fa75eff73122d0328b1a277780eabe1
Signed-off-by: Ping Gao <ping.a.gao@intel.com>
Acknowledged-by: Singh, Satyeshwar <satyeshwar.singh@intel.com>
Reviewed-on:
Reviewed-by: He, Min <min.he@intel.com>
Reviewed-by: Jiang, Fei <fei.jiang@intel.com>
Reviewed-by: Dong, Eddie <eddie.dong@intel.com>
Tested-by: Dong, Eddie <eddie.dong@intel.com>
---
drivers/gpu/drm/i915/gvt/scheduler.c | 92 +++++++++++++++++++++-------
1 file changed, 69 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e7514bdf2fe9..f0f0ada705cd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -119,6 +119,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
}
}
+static bool enable_lazy_shadow_ctx = true;
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
@@ -130,6 +131,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct page *page;
void *dst;
unsigned long context_gpa, context_page_num;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct i915_ggtt *ggtt = &gvt->dev_priv->ggtt;
+ dma_addr_t addr;
+ gen8_pte_t __iomem *pte;
int i;
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
@@ -143,6 +148,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = 19;
i = 2;
+#ifdef CONFIG_INTEL_IOMMU
+ /*
+ * In case IOMMU for graphics is turned on, we don't want to
+ * turn on lazy shadow context feature because it will touch
+ * GGTT entries which require a BKL and since this is a
+ * performance enhancement feature, we will end up negating
+ * the performance.
+ */
+ if(intel_iommu_gfx_mapped) {
+ enable_lazy_shadow_ctx = false;
+ }
+#endif
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -153,14 +170,41 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ if (!enable_lazy_shadow_ctx) {
+ page = i915_gem_object_get_page(ctx_obj,
+ LRC_PPHWSP_PN + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
- kunmap(page);
+ kunmap(page);
+ } else {
+ unsigned long mfn;
+ struct i915_gem_context *shadow_ctx =
+ workload->vgpu->submission.shadow_ctx;
+
+ addr = i915_ggtt_offset(
+ shadow_ctx->__engine[ring_id].state) +
+ (LRC_PPHWSP_PN + i) * PAGE_SIZE;
+ pte = (gen8_pte_t __iomem *)ggtt->gsm +
+ (addr >> PAGE_SHIFT);
+
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu,
+ context_gpa >> 12);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("fail to translate gfn during context shadow\n");
+ return -ENXIO;
+ }
+
+ mfn <<= 12;
+ mfn |= _PAGE_PRESENT | _PAGE_RW | PPAT_CACHED;
+ writeq(mfn, pte);
+ }
i++;
}
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
@@ -712,29 +756,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
- context_page_num = rq->engine->context_size;
- context_page_num = context_page_num >> PAGE_SHIFT;
+ if (!enable_lazy_shadow_ctx) {
+ context_page_num = rq->engine->context_size;
+ context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
- context_page_num = 19;
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ context_page_num = 19;
- i = 2;
+ i = 2;
- while (i < context_page_num) {
- context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((workload->ctx_desc.lrca + i) <<
- I915_GTT_PAGE_SHIFT));
- if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid guest context descriptor\n");
- return;
- }
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ I915_GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid guest context descriptor\n");
+ return;
+ }
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
- src = kmap(page);
- intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ src = kmap(page);
+ intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+ I915_GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
--
2.19.1