From 2fe4280cfa8be0bf0e218517bf52136fde0e6118 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Sat, 9 May 2020 08:23:37 +0000 Subject: [PATCH] hv: vtd: add two paramters for dmar_assign_irte idx_in: - If the caller of dmar_assign_irte passes a valid IRTE index, it will be resued; - If the caller of dmar_assign_irte passes INVALID_IRTE_ID as IRTE index, the function will allocate a new IRTE. idx_out: This paramter return the actual index of IRTE used. The caller need to check whether the return value is valid or not. Also this patch adds an internal function alloc_irte. The function takes count as input paramter to allocate continuous IRTEs. The count can only be 1, 2, 4, 8, 16 or 32. This is prepared for multiple MSI vector support. Tracked-On: #4831 Signed-off-by: Binbin Wu Acked-by: Eddie Dong --- hypervisor/arch/x86/guest/assign.c | 43 +++++++------ hypervisor/arch/x86/vtd.c | 96 ++++++++++++++++++---------- hypervisor/include/arch/x86/vtd.h | 11 ++-- hypervisor/pre_build/static_checks.c | 4 ++ 4 files changed, 98 insertions(+), 56 deletions(-) diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index 3c2166e83..be35c77f0 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -126,23 +126,23 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm, intr_src.is_msi = true; intr_src.pid_paddr = pid_paddr; intr_src.src.msi.value = entry->phys_sid.msi_id.bdf; - ret = dmar_assign_irte(&intr_src, &irte, (uint16_t)entry->allocated_pirq); + ret = dmar_assign_irte(&intr_src, &irte, (uint16_t)entry->allocated_pirq, &ir_index.index); if (ret == 0) { - /* - * Update the MSI interrupt source to point to the IRTE - * SHV is set to 0 as ACRN disables MMC (Multi-Message Capable - * for MSI devices. - */ entry->pmsi.data.full = 0U; - ir_index.index = (uint16_t)entry->allocated_pirq; - entry->pmsi.addr.full = 0UL; - entry->pmsi.addr.ir_bits.intr_index_high = ir_index.bits.index_high; - entry->pmsi.addr.ir_bits.shv = 0U; - entry->pmsi.addr.ir_bits.intr_format = 0x1U; - entry->pmsi.addr.ir_bits.intr_index_low = ir_index.bits.index_low; - entry->pmsi.addr.ir_bits.constant = 0xFEEU; + if (ir_index.index != INVALID_IRTE_ID) { + /* + * Update the MSI interrupt source to point to the IRTE + * SHV is set to 0 as ACRN disables MMC (Multi-Message Capable + * for MSI devices. + */ + entry->pmsi.addr.ir_bits.intr_index_high = ir_index.bits.index_high; + entry->pmsi.addr.ir_bits.shv = 0U; + entry->pmsi.addr.ir_bits.intr_format = 0x1U; + entry->pmsi.addr.ir_bits.intr_index_low = ir_index.bits.index_low; + entry->pmsi.addr.ir_bits.constant = 0xFEEU; + } } else { /* In case there is no corresponding IOMMU, for example, if the * IOMMU is ignored, pass the MSI info in Compatibility Format @@ -223,15 +223,18 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry) intr_src.is_msi = false; intr_src.pid_paddr = 0UL; intr_src.src.ioapic_id = ioapic_irq_to_ioapic_id(phys_irq); - ret = dmar_assign_irte(&intr_src, &irte, (uint16_t)phys_irq); + ret = dmar_assign_irte(&intr_src, &irte, (uint16_t)phys_irq, &ir_index.index); if (ret == 0) { - ir_index.index = (uint16_t)phys_irq; - rte.ir_bits.vector = vector; - rte.ir_bits.constant = 0U; - rte.ir_bits.intr_index_high = ir_index.bits.index_high; - rte.ir_bits.intr_format = 1U; - rte.ir_bits.intr_index_low = ir_index.bits.index_low; + if (ir_index.index != INVALID_IRTE_ID) { + rte.ir_bits.vector = vector; + rte.ir_bits.constant = 0U; + rte.ir_bits.intr_index_high = ir_index.bits.index_high; + rte.ir_bits.intr_format = 1U; + rte.ir_bits.intr_index_low = ir_index.bits.index_low; + } else { + rte.bits.intr_mask = 1; + } } else { rte.bits.dest_mode = IOAPIC_RTE_DESTMODE_LOGICAL; rte.bits.delivery_mode = delmode; diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c index 35ec95c5a..943e439c7 100644 --- a/hypervisor/arch/x86/vtd.c +++ b/hypervisor/arch/x86/vtd.c @@ -128,6 +128,7 @@ struct dmar_drhd_rt { uint64_t root_table_addr; uint64_t ir_table_addr; + uint64_t irte_alloc_bitmap[CONFIG_MAX_IR_ENTRIES/64U]; uint64_t qi_queue; uint16_t qi_tail; @@ -1261,7 +1262,30 @@ int32_t init_iommu(void) return ret; } -int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry *irte, uint16_t index) +/* Allocate continuous IRTEs specified by num, num can be 1, 2, 4, 8, 16, 32 */ +static uint16_t alloc_irtes(struct dmar_drhd_rt *dmar_unit, const uint16_t num) +{ + uint16_t irte_idx; + uint64_t mask = (1UL << num) - 1U; + uint64_t test_mask; + + ASSERT((bitmap_weight(num) == 1U) && (num <= 32U)); + + spinlock_obtain(&dmar_unit->lock); + for (irte_idx = 0U; irte_idx < CONFIG_MAX_IR_ENTRIES; irte_idx += num) { + test_mask = mask << (irte_idx & 0x3FU); + if ((dmar_unit->irte_alloc_bitmap[irte_idx >> 6U] & test_mask) == 0UL) { + dmar_unit->irte_alloc_bitmap[irte_idx >> 6U] |= test_mask; + break; + } + } + spinlock_release(&dmar_unit->lock); + + return (irte_idx < CONFIG_MAX_IR_ENTRIES) ? irte_idx: INVALID_IRTE_ID; +} + +int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry *irte, + uint16_t idx_in, uint16_t *idx_out) { struct dmar_drhd_rt *dmar_unit; union dmar_ir_entry *ir_table, *ir_entry; @@ -1279,42 +1303,49 @@ int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry } if (is_dmar_unit_valid(dmar_unit, sid)) { - ret = 0; dmar_enable_intr_remapping(dmar_unit); ir_table = (union dmar_ir_entry *)hpa2hva(dmar_unit->ir_table_addr); - ir_entry = ir_table + index; - - if (intr_src->pid_paddr != 0UL) { - union dmar_ir_entry irte_pi; - - /* irte is in remapped mode format, convert to posted mode format */ - irte_pi.value.lo_64 = 0UL; - irte_pi.value.hi_64 = 0UL; - - irte_pi.bits.post.vector = irte->bits.remap.vector; - - irte_pi.bits.post.svt = 0x1UL; - irte_pi.bits.post.sid = sid.value; - irte_pi.bits.post.present = 0x1UL; - irte_pi.bits.post.mode = 0x1UL; - - irte_pi.bits.post.pda_l = (intr_src->pid_paddr) >> 6U; - irte_pi.bits.post.pda_h = (intr_src->pid_paddr) >> 32U; - - *ir_entry = irte_pi; - } else { - /* Fields that have not been initialized explicitly default to 0 */ - irte->bits.remap.svt = 0x1UL; - irte->bits.remap.sid = sid.value; - irte->bits.remap.present = 0x1UL; - irte->bits.remap.trigger_mode = trigger_mode; - - *ir_entry = *irte; + *idx_out = idx_in; + if (idx_in == INVALID_IRTE_ID) { + *idx_out = alloc_irtes(dmar_unit, 1U); } - iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); - dmar_invalid_iec(dmar_unit, index, 0U, false); + if (*idx_out < CONFIG_MAX_IR_ENTRIES) { + ir_entry = ir_table + *idx_out; + + if (intr_src->pid_paddr != 0UL) { + union dmar_ir_entry irte_pi; + + /* irte is in remapped mode format, convert to posted mode format */ + irte_pi.value.lo_64 = 0UL; + irte_pi.value.hi_64 = 0UL; + + irte_pi.bits.post.vector = irte->bits.remap.vector; + + irte_pi.bits.post.svt = 0x1UL; + irte_pi.bits.post.sid = sid.value; + irte_pi.bits.post.present = 0x1UL; + irte_pi.bits.post.mode = 0x1UL; + + irte_pi.bits.post.pda_l = (intr_src->pid_paddr) >> 6U; + irte_pi.bits.post.pda_h = (intr_src->pid_paddr) >> 32U; + + *ir_entry = irte_pi; + } else { + /* Fields that have not been initialized explicitly default to 0 */ + irte->bits.remap.svt = 0x1UL; + irte->bits.remap.sid = sid.value; + irte->bits.remap.present = 0x1UL; + irte->bits.remap.trigger_mode = trigger_mode; + + *ir_entry = *irte; + } + iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); + dmar_invalid_iec(dmar_unit, *idx_out, 0U, false); + } + ret = 0; } + return ret; } @@ -1338,4 +1369,5 @@ void dmar_free_irte(const struct intr_source *intr_src, uint16_t index) iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); dmar_invalid_iec(dmar_unit, index, 0U, false); } + } diff --git a/hypervisor/include/arch/x86/vtd.h b/hypervisor/include/arch/x86/vtd.h index 7c0b50364..bfd550dd2 100644 --- a/hypervisor/include/arch/x86/vtd.h +++ b/hypervisor/include/arch/x86/vtd.h @@ -11,6 +11,7 @@ #include #define INVALID_DRHD_INDEX 0xFFFFFFFFU +#define INVALID_IRTE_ID 0xFFFFU /* * Intel IOMMU register specification per version 1.0 public spec. @@ -675,13 +676,15 @@ int32_t init_iommu(void); * * @param[in] intr_src filled with type of interrupt source and the source * @param[in] irte filled with info about interrupt deliverymode, destination and destination mode - * @param[in] index into Interrupt Remapping Table + * @param[in] idx_in if this value is INVALID_IRTE_ID, a new IRTE will be allocated, otherwise, use the IRTE directly. + * @param[out] idx_out return the actual IRTE index used, need to check whether the returned value is valid or not. * - * @retval -EINVAL if corresponding DMAR is not present - * @retval 0 otherwise + * @retval -EINVAL if corresponding DMAR is not preset + * @retval 0 on success, caller should check whether the returned start index is valid or not. * */ -int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry *irte, uint16_t index); +int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry *irte, + uint16_t idx_in, uint16_t *idx_out); /** * @brief Free RTE for Interrupt Remapping Table. diff --git a/hypervisor/pre_build/static_checks.c b/hypervisor/pre_build/static_checks.c index 950df9957..971d216be 100644 --- a/hypervisor/pre_build/static_checks.c +++ b/hypervisor/pre_build/static_checks.c @@ -29,6 +29,10 @@ typedef int32_t CAT_(CTA_DummyType,__LINE__)[(expr) ? 1 : -1] #error "CONFIG_HV_RAM_SIZE must be integral multiple of 2MB" #endif +#if ((CONFIG_MAX_IR_ENTRIES < 256U) || (CONFIG_MAX_IR_ENTRIES & (CONFIG_MAX_IR_ENTRIES -1)) != 0U) +#error "CONFIG_MAX_IR_ENTRIES must >=256 and be 2^n" +#endif + /* Build time sanity checks to make sure hard-coded offset * is matching the actual offset! */