clear-pkgs-linux-iot-lts2018/0591-drm-i915-gvt-show-pid-...

202 lines
7.9 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Min He <min.he@intel.com>
Date: Thu, 4 Jan 2018 21:48:47 +0800
Subject: [PATCH] drm/i915/gvt: show pid/hw_id of current DomU process in
debugfs
v1: show pid and hw id of current DomU process when showing shadow context
status
This allows us to identify which process a domu workload has come from.
v2: expose HW context id to debugfs
This patch expose the HW context id to the debugfs node, so that vtune
can utilize this context id to match with the one exposed by MD API.
v3: When storing DomU pid and hw id in the HWS page, offset them by the
vgpu id.
When there were multiple DomUs running, they would all write their pid
to the same address in the HWS page. When we checked i915_context_status
each shadow context would show the same current pid and hw id. By
offsetting the writes by the DomU's ID, we can see the details for each
shadow context correctly. This fixes defect 201282.
Change-Id: I106fae75af5963f043286acd604d3bab02b87c17
Signed-off-by: Min He <min.he@intel.com>
Signed-off-by: Daniel van der Wath <danielx.j.van.der.wath@intel.com>
Signed-off-by: Fei Jiang <fei.jiang@intel.com>
Reviewed-by: Singh, Satyeshwar <satyeshwar.singh@intel.com>
Reviewed-by: Abes, Brahim <brahimx.abes@intel.com>
Reviewed-by: He, Min <min.he@intel.com>
Reviewed-on:
Reviewed-by: Dong, Eddie <eddie.dong@intel.com>
Tested-by: Dong, Eddie <eddie.dong@intel.com>
---
drivers/gpu/drm/i915/gvt/scheduler.c | 38 +++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_debugfs.c | 26 ++++++++++++++++-
drivers/gpu/drm/i915/intel_lrc.c | 8 ++++++
drivers/gpu/drm/i915/intel_ringbuffer.h | 5 ++++
4 files changed, 76 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1d25ee3c1277..c28bc9a2fffa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -435,6 +435,38 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return ret;
}
+static void gen8_shadow_pid_cid(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ u32 *cs;
+
+ /* Copy the PID and CID from the guest's HWS page to the host's one */
+ cs = intel_ring_begin(workload->req, 16);
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(NOPID);
+ *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) +
+ I915_GEM_HWS_PID_ADDR;
+ *cs++ = 0;
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(NOPID);
+ *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_PID_ADDR +
+ (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT);
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(NOPID);
+ *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) +
+ I915_GEM_HWS_CID_ADDR;
+ *cs++ = 0;
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(NOPID);
+ *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_CID_ADDR +
+ (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT);
+ *cs++ = 0;
+ intel_ring_advance(workload->req, cs);
+}
+
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -633,6 +665,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
+ gen8_shadow_pid_cid(workload);
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
@@ -1180,6 +1214,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
+ if (!s->shadow_ctx->name) {
+ s->shadow_ctx->name = kasprintf(GFP_KERNEL, "Shadow Context %d", vgpu->id);
+ }
+
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b2e0c2348882..4a1330a42a28 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1943,6 +1943,19 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
ring->space, ring->head, ring->tail, ring->emit);
}
+static void describe_ctx_ring_shadowed(struct seq_file *m,
+ struct i915_gem_context *ctx, struct intel_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ int pid, cid, vgt_id;
+
+ sscanf(ctx->name, "Shadow Context %d", &vgt_id);
+ pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id);
+ cid = intel_read_status_page(engine, I915_GEM_HWS_CID_INDEX + vgt_id);
+ seq_printf(m, " (Current DomU Process PID: %d, CID: %d)",
+ pid, cid);
+}
+
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1957,6 +1970,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ bool is_shadow_context = false;
seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) {
struct task_struct *task;
@@ -1967,6 +1981,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
task->comm, task->pid);
put_task_struct(task);
}
+ } else if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) {
+ seq_puts(m, "DomU Shadow Context ");
+ is_shadow_context = true;
} else if (IS_ERR(ctx->file_priv)) {
seq_puts(m, "(deleted) ");
} else {
@@ -1979,12 +1996,19 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce =
to_intel_context(ctx, engine);
+ u64 lrc_desc = ce->lrc_desc;
+ seq_printf(m, "ctx id 0x%x ", (uint32_t)((lrc_desc >> 12) &
+ 0xFFFFF));
seq_printf(m, "%s: ", engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
- if (ce->ring)
+ if (ce->ring) {
describe_ctx_ring(m, ce->ring);
+ if(is_shadow_context)
+ describe_ctx_ring_shadowed(m, ctx,
+ ce->ring, engine);
+ }
seq_putc(m, '\n');
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8d5324f2d38..adfe6901b8d5 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2750,6 +2750,14 @@ populate_lr_context(struct i915_gem_context *ctx,
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+ /* write the context's pid and hw_id/cid to the per-context HWS page */
+ if(intel_vgpu_active(engine->i915) && pid_nr(ctx->pid)) {
+ *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_PID_ADDR)
+ = pid_nr(ctx->pid) & 0x3fffff;
+ *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_CID_ADDR)
+ = ctx->hw_id & 0x3fffff;
+ }
+
err_unpin_ctx:
i915_gem_object_unpin_map(ctx_obj);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4f39f6b5d1e0..83833ce2a4f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -797,6 +797,11 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_PID_INDEX 0x50
+#define I915_GEM_HWS_PID_ADDR (I915_GEM_HWS_PID_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_CID_INDEX 0x58
+#define I915_GEM_HWS_CID_ADDR (I915_GEM_HWS_CID_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
--
https://clearlinux.org