clear-pkgs-linux-iot-lts2018/0621-drm-i915-gvt-handle-pp...

480 lines
14 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Zhipeng Gong <zhipeng.gong@intel.com>
Date: Fri, 14 Sep 2018 16:10:21 +0800
Subject: [PATCH] drm/i915/gvt: handle ppgtt update from g2v
This patch handles ppgtt update from g2v notification.
It read out ppgtt pte entries from guest pte tables
page and convert them to host pfns.
It creates local ppgtt tables and insert the content pages
into the local ppgtt tables directly, which does not track
the usage of guest page table and removes the cost
of write protection from the original shadow page mechansim.
This patch is only for sos.
v3:
- Not pass pd pages, let GVT-g read from guest memory instead.
v4:
- fix page walk error.
- remove insert_4lvl_sg.
v5:
- use cache_level from guest.
Tracked-On: #874
Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Reviewed-by: He, Min <min.he@intel.com>
Reviewed-by: Zhao Yakui <yakui.zhao@intel.com>
---
drivers/gpu/drm/i915/gvt/gtt.c | 354 ++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/gtt.h | 11 +-
drivers/gpu/drm/i915/gvt/handlers.c | 6 +
3 files changed, 370 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 3fbce20..0c7160a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1757,6 +1757,32 @@ static int ppgtt_handle_guest_write_page_table_bytes(
return 0;
}
+static void invalidate_mm_pv(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_gvt_gtt_entry se;
+
+ if (WARN_ON(mm->ppgtt_mm.root_entry_type !=
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY))
+ return;
+
+ i915_ppgtt_close(&mm->ppgtt_mm.ppgtt->vm);
+ i915_ppgtt_put(mm->ppgtt_mm.ppgtt);
+
+ ppgtt_get_shadow_root_entry(mm, &se, 0);
+ if (!ops->test_present(&se))
+ return;
+ trace_spt_guest_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, 0);
+ se.val64 = 0;
+ ppgtt_set_shadow_root_entry(mm, &se, 0);
+
+ mm->ppgtt_mm.shadowed = false;
+}
+
static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
@@ -1769,6 +1795,11 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
if (!mm->ppgtt_mm.shadowed)
return;
+ if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) {
+ invalidate_mm_pv(mm);
+ return;
+ }
+
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
ppgtt_get_shadow_root_entry(mm, &se, index);
@@ -1786,6 +1817,33 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
mm->ppgtt_mm.shadowed = false;
}
+static int shadow_mm_pv(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_entry se;
+
+ if (WARN_ON(mm->ppgtt_mm.root_entry_type !=
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY))
+ return -EINVAL;
+
+ mm->ppgtt_mm.ppgtt = i915_ppgtt_create(gvt->dev_priv, NULL);
+ if (IS_ERR(mm->ppgtt_mm.ppgtt)) {
+ gvt_vgpu_err("fail to create ppgtt for pdp 0x%llx\n",
+ px_dma(&mm->ppgtt_mm.ppgtt->pml4));
+ return PTR_ERR(mm->ppgtt_mm.ppgtt);
+ }
+
+ se.type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ se.val64 = px_dma(&mm->ppgtt_mm.ppgtt->pml4);
+ ppgtt_set_shadow_root_entry(mm, &se, 0);
+
+ trace_spt_guest_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, 0);
+ mm->ppgtt_mm.shadowed = true;
+
+ return 0;
+}
static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
{
@@ -1800,6 +1858,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
if (mm->ppgtt_mm.shadowed)
return 0;
+ if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE)
+ return shadow_mm_pv(mm);
+
mm->ppgtt_mm.shadowed = true;
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
@@ -2788,3 +2849,296 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
}
+
+int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0};
+ int ret = 0;
+
+ if (WARN_ON(page_table_level != 4))
+ return -EINVAL;
+
+ gvt_dbg_mm("alloc_4lvl pdp=%llx start=%llx length=%llx\n",
+ pv_ppgtt->pdp, pv_ppgtt->start,
+ pv_ppgtt->length);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]);
+ ret = -EINVAL;
+ } else {
+ ret = mm->ppgtt_mm.ppgtt->vm.allocate_va_range(
+ &mm->ppgtt_mm.ppgtt->vm,
+ pv_ppgtt->start, pv_ppgtt->length);
+ if (ret)
+ gvt_vgpu_err("failed to alloc for pdp %llx\n", pdps[0]);
+ }
+
+ return ret;
+}
+
+int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0};
+ int ret = 0;
+
+ if (WARN_ON(page_table_level != 4))
+ return -EINVAL;
+
+ gvt_dbg_mm("clear_4lvl pdp=%llx start=%llx length=%llx\n",
+ pv_ppgtt->pdp, pv_ppgtt->start,
+ pv_ppgtt->length);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]);
+ ret = -EINVAL;
+ } else {
+ mm->ppgtt_mm.ppgtt->vm.clear_range(
+ &mm->ppgtt_mm.ppgtt->vm,
+ pv_ppgtt->start, pv_ppgtt->length);
+ }
+
+ return ret;
+}
+
+#define GEN8_PML4E_SIZE (1UL << GEN8_PML4E_SHIFT)
+#define GEN8_PML4E_SIZE_MASK (~(GEN8_PML4E_SIZE - 1))
+#define GEN8_PDPE_SIZE (1UL << GEN8_PDPE_SHIFT)
+#define GEN8_PDPE_SIZE_MASK (~(GEN8_PDPE_SIZE - 1))
+#define GEN8_PDE_SIZE (1UL << GEN8_PDE_SHIFT)
+#define GEN8_PDE_SIZE_MASK (~(GEN8_PDE_SIZE - 1))
+
+#define pml4_addr_end(addr, end) \
+({ unsigned long __boundary = \
+ ((addr) + GEN8_PML4E_SIZE) & GEN8_PML4E_SIZE_MASK; \
+ (__boundary < (end)) ? __boundary : (end); \
+})
+
+#define pdp_addr_end(addr, end) \
+({ unsigned long __boundary = \
+ ((addr) + GEN8_PDPE_SIZE) & GEN8_PDPE_SIZE_MASK; \
+ (__boundary < (end)) ? __boundary : (end); \
+})
+
+#define pd_addr_end(addr, end) \
+({ unsigned long __boundary = \
+ ((addr) + GEN8_PDE_SIZE) & GEN8_PDE_SIZE_MASK; \
+ (__boundary < (end)) ? __boundary : (end); \
+})
+
+struct ppgtt_walk {
+ unsigned long *mfns;
+ int mfn_index;
+ unsigned long *pt;
+};
+
+static int walk_pt_range(struct intel_vgpu *vgpu, u64 pt,
+ u64 start, u64 end, struct ppgtt_walk *walk)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops;
+ unsigned long start_index, end_index;
+ int ret;
+ int i;
+ unsigned long mfn, gfn;
+
+ start_index = gma_ops->gma_to_pte_index(start);
+ end_index = ((end - start) >> PAGE_SHIFT) + start_index;
+
+ gvt_dbg_mm("%s: %llx start=%llx end=%llx start_index=%lx end_index=%lx mfn_index=%x\n",
+ __func__, pt, start, end,
+ start_index, end_index, walk->mfn_index);
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ (pt & PAGE_MASK) + (start_index << info->gtt_entry_size_shift),
+ walk->pt + start_index,
+ (end_index - start_index) << info->gtt_entry_size_shift);
+ if (ret) {
+ gvt_vgpu_err("fail to read gpa %llx\n", pt);
+ return ret;
+ }
+
+ for (i = start_index; i < end_index; i++) {
+ gfn = walk->pt[i] >> PAGE_SHIFT;
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
+ return -ENXIO;
+ }
+ walk->mfns[walk->mfn_index++] = mfn << PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+
+static int walk_pd_range(struct intel_vgpu *vgpu, u64 pd,
+ u64 start, u64 end, struct ppgtt_walk *walk)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops;
+ unsigned long index;
+ u64 pt, next;
+ int ret = 0;
+
+ do {
+ index = gma_ops->gma_to_pde_index(start);
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ (pd & PAGE_MASK) + (index <<
+ info->gtt_entry_size_shift), &pt, 8);
+ if (ret)
+ return ret;
+ next = pd_addr_end(start, end);
+ gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n",
+ __func__, pd, start, end, next);
+ walk_pt_range(vgpu, pt, start, next, walk);
+
+ start = next;
+ } while (start != end);
+
+ return ret;
+}
+
+
+static int walk_pdp_range(struct intel_vgpu *vgpu, u64 pdp,
+ u64 start, u64 end, struct ppgtt_walk *walk)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops;
+ unsigned long index;
+ u64 pd, next;
+ int ret = 0;
+
+ do {
+ index = gma_ops->gma_to_l4_pdp_index(start);
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ (pdp & PAGE_MASK) + (index <<
+ info->gtt_entry_size_shift), &pd, 8);
+ if (ret)
+ return ret;
+ next = pdp_addr_end(start, end);
+ gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n",
+ __func__, pdp, start, end, next);
+
+ walk_pd_range(vgpu, pd, start, next, walk);
+ start = next;
+ } while (start != end);
+
+ return ret;
+}
+
+
+static int walk_pml4_range(struct intel_vgpu *vgpu, u64 pml4,
+ u64 start, u64 end, struct ppgtt_walk *walk)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops;
+ unsigned long index;
+ u64 pdp, next;
+ int ret = 0;
+
+ do {
+ index = gma_ops->gma_to_pml4_index(start);
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ (pml4 & PAGE_MASK) + (index <<
+ info->gtt_entry_size_shift), &pdp, 8);
+ if (ret)
+ return ret;
+ next = pml4_addr_end(start, end);
+ gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n",
+ __func__, pml4, start, end, next);
+
+ walk_pdp_range(vgpu, pdp, start, next, walk);
+ start = next;
+ } while (start != end);
+
+ return ret;
+}
+
+int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0};
+ int ret = 0;
+ u64 start = pv_ppgtt->start;
+ u64 length = pv_ppgtt->length;
+ struct sg_table st;
+ struct scatterlist *sg = NULL;
+ int num_pages = length >> PAGE_SHIFT;
+ struct i915_vma vma;
+ struct ppgtt_walk walk;
+ int i;
+
+ if (WARN_ON(page_table_level != 4))
+ return -EINVAL;
+
+ gvt_dbg_mm("insert_4lvl pml4=%llx start=%llx length=%llx cache=%x\n",
+ pv_ppgtt->pdp, start, length, pv_ppgtt->cache_level);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("fail to find mm for pml4 0x%llx\n", pdps[0]);
+ return -EINVAL;
+ }
+
+ walk.mfn_index = 0;
+ walk.mfns = NULL;
+ walk.pt = NULL;
+
+ walk.mfns = kmalloc_array(num_pages,
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!walk.mfns) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ walk.pt = (unsigned long *)__get_free_pages(GFP_KERNEL, 0);
+ if (!walk.pt) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sg_alloc_table(&st, num_pages, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = walk_pml4_range(vgpu, pdps[0], start, start + length, &walk);
+ if (ret)
+ goto fail_free_sg;
+
+ WARN_ON(num_pages != walk.mfn_index);
+
+ for_each_sg(st.sgl, sg, num_pages, i) {
+ sg->offset = 0;
+ sg->length = PAGE_SIZE;
+ sg_dma_address(sg) = walk.mfns[i];
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+
+ /* fake vma for insert call*/
+ memset(&vma, 0, sizeof(vma));
+ vma.node.start = start;
+ vma.pages = &st;
+ mm->ppgtt_mm.ppgtt->vm.insert_entries(
+ &mm->ppgtt_mm.ppgtt->vm, &vma,
+ pv_ppgtt->cache_level, 0);
+
+fail_free_sg:
+ sg_free_table(&st);
+fail:
+ kfree(walk.mfns);
+ free_page((unsigned long)walk.pt);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index bfb6f65..87ca63b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -130,7 +130,7 @@ enum intel_gvt_mm_type {
INTEL_GVT_MM_PPGTT,
};
-#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
+#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
@@ -153,6 +153,7 @@ struct intel_vgpu_mm {
struct list_head list;
struct list_head lru_list;
+ struct i915_hw_ppgtt *ppgtt;
} ppgtt_mm;
struct {
void *virtual_ggtt;
@@ -271,4 +272,12 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
+int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu,
+ int page_table_level);
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index afa15c0..d1870e0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1271,6 +1271,12 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
+ case VGT_G2V_PPGTT_L4_ALLOC:
+ return intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(vgpu, 4);
+ case VGT_G2V_PPGTT_L4_INSERT:
+ return intel_vgpu_g2v_pv_ppgtt_insert_4lvl(vgpu, 4);
+ case VGT_G2V_PPGTT_L4_CLEAR:
+ return intel_vgpu_g2v_pv_ppgtt_clear_4lvl(vgpu, 4);
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
--
2.21.0