clear-pkgs-linux-iot-lts2018/0618-drm-i915-gvt-handle-gl...

462 lines
13 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Zhipeng Gong <zhipeng.gong@intel.com>
Date: Fri, 14 Sep 2018 16:10:21 +0800
Subject: [PATCH] drm/i915/gvt: handle global gtt update from g2v
This patch handles ggtt update from g2v notification.
It maps the physical pages behind virtual page table to guest,
so guest can update its pte entries directly to avoid mmio trap.
Then guest ggtt pte entries are converted to host pte entries
and inserted into host gtt table.
The tricky part is that pvmmio parameter detection is later than
virtual page trable creation and pci bar address update,
So the map ggtt mmio is done during pvmmio param detection.
This patch is only for sos.
v2:
- cut invalid range.
- release gfn to mfn mapping when free gtt.
v3:
- call ggtt insert_entries function
- add size check in validate_ggtt_range
- rename trap to map
V4: Fall back to disable ggtt PV when it fails to alloc 2M pages
for GGTT pV.
Tracked-On: projectacrn/acrn-hypervisor#994
Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Reviewed-by: He, Min <min.he@intel.com>
---
drivers/gpu/drm/i915/gvt/cfg_space.c | 41 +++++
drivers/gpu/drm/i915/gvt/gtt.c | 261 ++++++++++++++++++++++++++-
drivers/gpu/drm/i915/gvt/gtt.h | 5 +
drivers/gpu/drm/i915/gvt/gvt.h | 3 +
drivers/gpu/drm/i915/gvt/handlers.c | 15 ++
5 files changed, 320 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index f6bcfcb571b7..fd461db48a8d 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -138,6 +138,47 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
return 0;
}
+int map_gttmmio(struct intel_vgpu *vgpu, bool map)
+{
+ struct intel_vgpu_gm *gm = &vgpu->gm;
+ unsigned long mfn;
+ struct scatterlist *sg;
+ struct sg_table *st = gm->st;
+ u64 start, end;
+ int ret = 0;
+
+ if (!st) {
+ DRM_INFO("no scatter list, fallback to disable ggtt pv\n");
+ return -EINVAL;
+ }
+
+ start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+ start &= ~GENMASK(3, 0);
+ start += vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1;
+
+ end = start +
+ (vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1);
+
+ WARN_ON((end - start) != gvt_ggtt_sz(vgpu->gvt));
+
+ gvt_dbg_mmio("%s start=%llx end=%llx map=%d\n",
+ __func__, start, end, map);
+
+ start >>= PAGE_SHIFT;
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ mfn = page_to_pfn(sg_page(sg));
+ gvt_dbg_mmio("page=%p mfn=%lx size=%x start=%llx\n",
+ sg_page(sg), mfn, sg->length, start);
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, start,
+ mfn, sg->length >> PAGE_SHIFT, map);
+ if (ret)
+ return ret;
+ start += sg->length >> PAGE_SHIFT;
+ }
+
+ return ret;
+}
+
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 901d7fa82d98..ddc725728bf8 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1569,6 +1569,106 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
return 0;
}
+static void free_ggtt_virtual_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu_gm *gm = &mm->vgpu->gm;
+ struct sg_table *st = gm->st;
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ }
+
+ sg_free_table(st);
+ kfree(st);
+ vunmap(mm->ggtt_mm.virtual_ggtt);
+ gm->st = NULL;
+}
+
+/*
+ * Alloc virtual page table for guest ggtt. If ggtt pv enabled, the
+ * physical pages behind virtual page table is also mapped to guest,
+ * guest can update its pte entries directly to avoid trap.
+ */
+static void *alloc_ggtt_virtual_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ unsigned int page_count = gvt_ggtt_sz(vgpu->gvt) >> PAGE_SHIFT;
+ struct intel_vgpu_gm *gm = &vgpu->gm;
+ struct page **pages = NULL;
+ struct page *p;
+ unsigned int i;
+ void *vaddr = NULL;
+ int order;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ unsigned int npages = page_count;
+
+ /*
+ * page_table_entry_size is bigger than the size alloc_pages can
+ * allocate, We have to split it according to the PMD size (2M).
+ * Head page is kept in scatter list so that we can free them later.
+ */
+ order = get_order(1 << PMD_SHIFT);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+ gm->st = st;
+ do {
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p)
+ goto fail;
+ gvt_dbg_mm("page=%p size=%ld\n", p, PAGE_SIZE << order);
+ sg_set_page(sg, p, PAGE_SIZE << order, 0);
+ st->nents++;
+ npages -= 1 << order;
+ if (!npages) {
+ sg_mark_end(sg);
+ break;
+ }
+ sg = __sg_next(sg);
+ } while (1);
+
+
+ /* keep all the pages for vmap */
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto fail;
+
+ i = 0;
+ for_each_sgt_page(p, sgt_iter, st)
+ pages[i++] = p;
+
+ WARN_ON(i != page_count);
+
+ vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
+ if (!vaddr) {
+ gvt_vgpu_err("fail to vmap pages");
+ goto fail;
+ }
+ kfree(pages);
+ return vaddr;
+
+fail:
+ sg_set_page(sg, NULL, 0, 0);
+ sg_mark_end(sg);
+ free_ggtt_virtual_page_table(mm);
+ kfree(pages);
+ gm->st = NULL;
+ return NULL;
+}
+
/*
* The heart of PPGTT shadow page table.
*/
@@ -1963,7 +2063,6 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
{
struct intel_vgpu_mm *mm;
- unsigned long nr_entries;
mm = vgpu_alloc_mm(vgpu);
if (!mm)
@@ -1971,10 +2070,17 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
mm->type = INTEL_GVT_MM_GGTT;
- nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
- mm->ggtt_mm.virtual_ggtt =
- vzalloc(array_size(nr_entries,
+ mm->ggtt_mm.virtual_ggtt = alloc_ggtt_virtual_page_table(mm);
+ if (!mm->ggtt_mm.virtual_ggtt) {
+ unsigned long nr_entries;
+
+ DRM_INFO("fail to alloc contiguous pages, fallback\n");
+ nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
+ mm->ggtt_mm.virtual_ggtt =
+ vzalloc(array_size(nr_entries,
vgpu->gvt->device_info.gtt_entry_size));
+ }
+
if (!mm->ggtt_mm.virtual_ggtt) {
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
@@ -2003,7 +2109,17 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
list_del(&mm->ppgtt_mm.lru_list);
invalidate_ppgtt_mm(mm);
} else {
- vfree(mm->ggtt_mm.virtual_ggtt);
+ if (mm->ggtt_mm.virtual_ggtt) {
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_vgpu_gm *gm = &vgpu->gm;
+
+ if (gm->st) {
+ map_gttmmio(mm->vgpu, false);
+ free_ggtt_virtual_page_table(mm);
+ } else
+ vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.virtual_ggtt = NULL;
+ }
mm->ggtt_mm.last_partial_off = -1UL;
}
@@ -3144,3 +3260,138 @@ int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu,
return ret;
}
+
+static void validate_ggtt_range(struct intel_vgpu *vgpu,
+ u64 *start, u64 *length)
+{
+ u64 end;
+
+ if (WARN_ON(*start > vgpu->gvt->dev_priv->ggtt.vm.total ||
+ *length > vgpu->gvt->dev_priv->ggtt.vm.total)) {
+ *length = 0;
+ return;
+ }
+
+ end = *start + *length - 1;
+
+ if (*start >= vgpu_aperture_gmadr_base(vgpu) &&
+ end <= vgpu_aperture_gmadr_end(vgpu))
+ return;
+
+ if (*start >= vgpu_hidden_gmadr_base(vgpu) &&
+ end <= vgpu_hidden_gmadr_end(vgpu))
+ return;
+
+ /* handle the cases with invalid ranges */
+ WARN_ON(1);
+
+ /* start is in aperture range, end is after apeture range */
+ if (*start >= vgpu_aperture_gmadr_base(vgpu) &&
+ *start <= vgpu_aperture_gmadr_end(vgpu)) {
+ *length = vgpu_aperture_gmadr_end(vgpu) - *start + 1;
+ return;
+ }
+
+ /* start is before aperture range, end is in apeture range */
+ if (end >= vgpu_aperture_gmadr_base(vgpu) &&
+ end <= vgpu_aperture_gmadr_end(vgpu)) {
+ *start = vgpu_aperture_gmadr_base(vgpu);
+ return;
+ }
+
+ /* start is in hidden range, end is after hidden range */
+ if (*start >= vgpu_hidden_gmadr_base(vgpu) &&
+ *start <= vgpu_hidden_gmadr_end(vgpu)) {
+ *length = vgpu_hidden_gmadr_end(vgpu) - *start + 1;
+ return;
+ }
+
+ /* start is before hidden range, end is in hidden range */
+ if (end >= vgpu_hidden_gmadr_base(vgpu) &&
+ end <= vgpu_hidden_gmadr_end(vgpu)) {
+ *start = vgpu_hidden_gmadr_base(vgpu);
+ return;
+ }
+
+ /* both start and end are not in valid range*/
+ *length = 0;
+
+ return;
+}
+
+int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct gvt_shared_page *shared_page = vgpu->mmio.shared_page;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ u64 start = shared_page->pv_ggtt.start;
+ u64 num_entries = shared_page->pv_ggtt.length;
+ u32 cache_level = shared_page->pv_ggtt.cache_level;
+ u64 length = num_entries << PAGE_SHIFT;
+ u64 *vaddr = gtt->ggtt_mm->ggtt_mm.virtual_ggtt;
+ u64 gtt_entry_index;
+ u64 gtt_entry;
+ unsigned long mfn;
+ struct i915_vma vma;
+ struct sg_table st;
+ struct scatterlist *sg = NULL;
+ int ret = 0;
+ int i;
+
+ gvt_dbg_mm("ggtt_insert: start=%llx length=%llx cache=%x\n",
+ start, length, cache_level);
+ validate_ggtt_range(vgpu, &start, &length);
+ if (length == 0)
+ return 0;
+
+ num_entries = length >> PAGE_SHIFT;
+
+ if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+ return -ENOMEM;
+
+ for_each_sg(st.sgl, sg, num_entries, i) {
+ gtt_entry_index = (start >> PAGE_SHIFT) + i;
+ gtt_entry = vaddr[gtt_entry_index];
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu,
+ gtt_entry >> PAGE_SHIFT);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("fail to translate gfn: 0x%llx\n",
+ gtt_entry >> PAGE_SHIFT);
+ ret = -ENXIO;
+ goto fail;
+ }
+ sg->offset = 0;
+ sg->length = PAGE_SIZE;
+ sg_dma_address(sg) = mfn << PAGE_SHIFT;
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+
+ /* fake vma for insert call*/
+ memset(&vma, 0, sizeof(vma));
+ vma.node.start = start;
+ vma.pages = &st;
+ ggtt->vm.insert_entries(&ggtt->vm, &vma, cache_level, 0);
+
+fail:
+ sg_free_table(&st);
+ return ret;
+}
+
+int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu)
+{
+ struct gvt_shared_page *shared_page = vgpu->mmio.shared_page;
+ u64 start = shared_page->pv_ggtt.start;
+ u64 length = shared_page->pv_ggtt.length;
+ struct i915_ggtt *ggtt = &vgpu->gvt->dev_priv->ggtt;
+
+ gvt_dbg_mm("ggtt_clear: start=%llx length=%llx\n",
+ start, length);
+ validate_ggtt_range(vgpu, &start, &length);
+ if (length == 0)
+ return 0;
+
+ ggtt->vm.clear_range(&ggtt->vm, start, length);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 87ca63b763d0..5af949d8b316 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -280,4 +280,9 @@ int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu,
int page_table_level);
+
+int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu);
+
+int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu);
+
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 1a287ba76923..9344293ed692 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -85,6 +85,7 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
+ struct sg_table *st;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
@@ -568,6 +569,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
PCI_BASE_ADDRESS_MEM_MASK;
}
+int map_gttmmio(struct intel_vgpu *vgpu, bool map);
+
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 16e2d41174bd..8fa3bed75d61 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1277,6 +1277,12 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
return intel_vgpu_g2v_pv_ppgtt_insert_4lvl(vgpu, 4);
case VGT_G2V_PPGTT_L4_CLEAR:
return intel_vgpu_g2v_pv_ppgtt_clear_4lvl(vgpu, 4);
+ case VGT_G2V_GGTT_INSERT:
+ return intel_vgpu_g2v_pv_ggtt_insert(vgpu);
+ break;
+ case VGT_G2V_GGTT_CLEAR:
+ return intel_vgpu_g2v_pv_ggtt_clear(vgpu);
+ break;
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
@@ -1348,6 +1354,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = 0;
break;
}
+ if (vgpu_vreg(vgpu, offset) & PVMMIO_GGTT_UPDATE) {
+ ret = map_gttmmio(vgpu, true);
+ if (ret) {
+ DRM_INFO("ggtt pv mode is off\n");
+ vgpu_vreg(vgpu, offset) &=
+ ~PVMMIO_GGTT_UPDATE;
+ }
+ }
+
} else {
vgpu_vreg(vgpu, offset) = 0;
}
--
https://clearlinux.org