hv: vtd: export iommu_flush_cache

VT-d shares the EPT tables as the second level translation tables.
For the IOMMUs that don't support page-walk coherecy, cpu cache should
be flushed for the IOMMU EPT entries that are modified.

For the current implementation, EPT tables for translating from GPA to HPA
for EPT/IOMMU are not modified after VM is created, so cpu cache invlidation is
done once per VM before starting execution of VM.
However, this may be changed, runtime EPT modification is possible.

When cpu cache of EPT entries is invalidated when modification, there is no need
invalidate cpu cache globally per VM.

This patch exports iommu_flush_cache for EPT entry cache invlidation operations.
- IOMMUs share the same copy of EPT table, cpu cache should be flushed if any of
  the IOMMU active doesn't support page-walk coherency.
- In the context of ACRN, GPA to HPA mapping relationship is not changed after
  VM created, skip flushing iotlb to avoid potential performance penalty.

Tracked-On: #3607
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Binbin Wu 2019-07-01 12:23:15 +08:00 committed by ACRN System Integration
parent 826aaf7bab
commit 2abd8b34ef
3 changed files with 29 additions and 15 deletions

View File

@ -287,16 +287,18 @@ static inline void dmar_wait_completion(const struct dmar_drhd_rt *dmar_unit, ui
}
}
/* flush cache when root table, context table updated */
static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
void *p, uint32_t size)
/* Flush CPU cache when root table, context table or second-level translation teable updated
* In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created,
* skip flushing iotlb to avoid performance penalty.
*/
void iommu_flush_cache(const void *p, uint32_t size)
{
uint32_t i;
/* if vtd support page-walk coherency, no need to flush cacheline */
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
if (!iommu_page_walk_coherent) {
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
clflush((char *)p + i);
clflush((const char *)p + i);
}
}
}
@ -1088,7 +1090,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
root_entry->hi_64 = 0UL;
root_entry->lo_64 = lo_64;
iommu_flush_cache(dmar_unit, root_entry, sizeof(struct dmar_entry));
iommu_flush_cache(root_entry, sizeof(struct dmar_entry));
} else {
context_table_addr = dmar_get_bitslice(root_entry->lo_64,
ROOT_ENTRY_LOWER_CTP_MASK, ROOT_ENTRY_LOWER_CTP_POS);
@ -1143,7 +1145,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
context_entry->hi_64 = hi_64;
context_entry->lo_64 = lo_64;
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
}
}
}
@ -1192,7 +1194,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s
/* clear the present bit first */
context_entry->lo_64 = 0UL;
context_entry->hi_64 = 0UL;
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
sid.bits.b = bus;
sid.bits.d = pci_slot(devfun);
@ -1376,7 +1378,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
ir_entry->entry.hi_64 = irte.entry.hi_64;
ir_entry->entry.lo_64 = irte.entry.lo_64;
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
dmar_invalid_iec(dmar_unit, index, 0U, false);
}
return ret;
@ -1407,7 +1409,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index)
ir_entry = ir_table + index;
ir_entry->bits.present = 0x0UL;
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
dmar_invalid_iec(dmar_unit, index, 0U, false);
}
}

View File

@ -102,8 +102,8 @@ enum _page_table_level {
#define PAGE_SIZE_2M MEM_2M
#define PAGE_SIZE_1G MEM_1G
void sanitize_pte_entry(uint64_t *ptep);
void sanitize_pte(uint64_t *pt_page);
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops);
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops);
/**
* @brief MMU paging enable
*
@ -176,12 +176,12 @@ static inline void cache_flush_invalidate_all(void)
asm volatile (" wbinvd\n" : : : "memory");
}
static inline void clflush(volatile void *p)
static inline void clflush(const volatile void *p)
{
asm volatile ("clflush (%0)" :: "r"(p));
}
static inline void clflushopt(volatile void *p)
static inline void clflushopt(const volatile void *p)
{
asm volatile ("clflushopt (%0)" :: "r"(p));
}

View File

@ -665,6 +665,18 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
*
*/
void dmar_free_irte(struct intr_source intr_src, uint16_t index);
/**
* @brief Flash cacheline(s) for a specific address with specific size.
*
* Flash cacheline(s) for a specific address with specific size,
* if all IOMMUs active support page-walk coherency, cacheline(s) are not fluashed.
*
* @param[in] p the address of the buffer, whose cache need to be invalidated
* @param[in] size the size of the buffer
*
*/
void iommu_flush_cache(const void *p, uint32_t size);
/**
* @}
*/