HV: deny pre-launched VM ptdev bar access from SOS

This patch denies Service VM the access permission to device
resources owned by pre-launched VMs.
Rationale:
 * Pre-launched VMs in ACRN are independent of service VM,
   and should be immune to attacks from service VM. However,
   current implementation exposes the bar resource of passthru
   devices to service VM for some reason. This makes it possible
   for service VM to crash or attack pre-launched VMs.
 * It is same for hypervisor owned devices.

NOTE:
 * The MMIO spaces pre-allocated to VFs are still presented to
  Service VM. The SR-IOV capable devices assigned to pre-launched
  VMs doesn't have the SR-IOV capability. So the MMIO address spaces
  pre-allocated by BIOS for VFs are not decoded by hardware and
  couldn't be enabled by guest. SOS may live with seeing the address
  space or not. We will revisit later.

Tracked-On: #5615
Signed-off-by: Tao Yuhong <yuhong.tao@intel.com>
Reviewed-by: Fei Li <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Tao Yuhong 2021-01-29 19:40:16 -05:00 committed by wenlingz
parent bc40e455aa
commit 6e7ce4a73f
3 changed files with 77 additions and 7 deletions

View File

@ -280,6 +280,58 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
}
}
static void deny_pci_bar_access(struct acrn_vm *sos, const struct pci_pdev *pdev)
{
uint32_t idx, mask;
struct pci_vbar vbar = {};
uint64_t base = 0UL, size = 0UL;
uint64_t *pml4_page;
pml4_page = (uint64_t *)sos->arch_vm.nworld_eptp;
for ( idx= 0; idx < pdev->nr_bars; idx++) {
vbar.bar_type.bits = pdev->bars[idx].phy_bar;
if (!is_pci_reserved_bar(&vbar)) {
base = pdev->bars[idx].phy_bar;
size = pdev->bars[idx].size_mask;
if (is_pci_mem64lo_bar(&vbar)) {
idx++;
base |= (((uint64_t)pdev->bars[idx].phy_bar) << 32UL);
size |= (((uint64_t)pdev->bars[idx].size_mask) << 32UL);
}
mask = (is_pci_io_bar(&vbar)) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
base &= mask;
size &= mask;
size = size & ~(size - 1UL);
if ((base != 0UL)) {
if (is_pci_io_bar(&vbar)) {
base &= 0xffffU;
deny_guest_pio_access(sos, base, size);
} else {
/*for passthru device MMIO BAR base must be 4K aligned. This is the requirement of passthru devices.*/
ASSERT((base & PAGE_MASK) != 0U, "%02x:%02x.%d bar[%d] 0x%lx, is not 4K aligned!",
pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f, idx, base);
size = round_page_up(size);
ept_del_mr(sos, pml4_page, base, size);
}
}
}
}
}
static void deny_pdevs(struct acrn_vm *sos, struct acrn_vm_pci_dev_config *pci_devs, uint16_t pci_dev_num)
{
uint16_t i;
for (i = 0; i < pci_dev_num; i++) {
if ( pci_devs[i].pdev != NULL) {
deny_pci_bar_access(sos, pci_devs[i].pdev);
}
}
}
/**
* @param[inout] vm pointer to a vm descriptor
*
@ -349,6 +401,8 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
vm_config = get_vm_config(vm_id);
if (vm_config->load_order == PRE_LAUNCHED_VM) {
ept_del_mr(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
/* Remove MMIO/IO bars of pre-launched VM's ptdev */
deny_pdevs(vm, vm_config->pci_devs, vm_config->pci_dev_num);
}
for (i = 0U; i < MAX_MMIO_DEV_NUM; i++) {

View File

@ -252,7 +252,7 @@ bool pdev_need_bar_restore(const struct pci_pdev *pdev)
for (idx = 0U; idx < pdev->nr_bars; idx++) {
bar = pci_pdev_read_cfg(pdev->bdf, pci_bar_offset(idx), 4U);
if (bar != pdev->bars[idx]) {
if (bar != pdev->bars[idx].phy_bar) {
need_restore = true;
break;
}
@ -261,12 +261,21 @@ bool pdev_need_bar_restore(const struct pci_pdev *pdev)
return need_restore;
}
static void get_pci_bar_resource(union pci_bdf bdf, uint32_t offset, struct pci_bar_resource *res)
{
res->phy_bar = pci_pdev_read_cfg(bdf, offset, 4U);
pci_pdev_write_cfg(bdf, offset, 4U, ~0U);
res->size_mask = pci_pdev_read_cfg(bdf, offset, 4U);
pci_pdev_write_cfg(bdf, offset, 4U, res->phy_bar);
}
static inline void pdev_save_bar(struct pci_pdev *pdev)
{
uint32_t idx;
for (idx = 0U; idx < pdev->nr_bars; idx++) {
pdev->bars[idx] = pci_pdev_read_cfg(pdev->bdf, pci_bar_offset(idx), 4U);
get_pci_bar_resource(pdev->bdf, pci_bar_offset(idx), &pdev->bars[idx]);
}
}
@ -275,7 +284,7 @@ void pdev_restore_bar(const struct pci_pdev *pdev)
uint32_t idx;
for (idx = 0U; idx < pdev->nr_bars; idx++) {
pci_pdev_write_cfg(pdev->bdf, pci_bar_offset(idx), 4U, pdev->bars[idx]);
pci_pdev_write_cfg(pdev->bdf, pci_bar_offset(idx), 4U, pdev->bars[idx].phy_bar);
}
}
@ -788,9 +797,7 @@ struct pci_pdev *init_pdev(uint16_t pbdf, uint32_t drhd_index)
pdev->base_class = (uint8_t)pci_pdev_read_cfg(bdf, PCIR_CLASS, 1U);
pdev->sub_class = (uint8_t)pci_pdev_read_cfg(bdf, PCIR_SUBCLASS, 1U);
pdev->nr_bars = pci_pdev_get_nr_bars(hdr_type);
if (hdr_layout == PCIM_HDRTYPE_NORMAL) {
pdev_save_bar(pdev);
}
pdev_save_bar(pdev);
if ((pci_pdev_read_cfg(bdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) != 0U) {
pci_enumerate_cap(pdev);

View File

@ -246,6 +246,14 @@ struct pci_sriov_cap {
bool hide_sriov;
};
/* PCI BAR size is detected at run time. We don't want to re-detect it to avoid malfunction of
* the device. We have record physical bar values, we need to record size_mask.
*/
struct pci_bar_resource {
uint32_t phy_bar; /* the origional raw data read from physical BAR */
uint32_t size_mask; /* read value of physical BAR after write 0xffffffff */
};
struct pci_pdev {
uint8_t hdr_type;
uint8_t base_class;
@ -259,7 +267,7 @@ struct pci_pdev {
/* The bar info of the physical PCI device. */
uint32_t nr_bars; /* 6 for normal device, 2 for bridge, 1 for cardbus */
uint32_t bars[PCI_STD_NUM_BARS];
struct pci_bar_resource bars[PCI_STD_NUM_BARS]; /* For common bar resource recording */
/* The bus/device/function triple of the physical PCI device. */
union pci_bdf bdf;
@ -384,4 +392,5 @@ bool pdev_need_bar_restore(const struct pci_pdev *pdev);
void pdev_restore_bar(const struct pci_pdev *pdev);
void pci_switch_to_mmio_cfg_ops(void);
void reserve_vmsix_on_msi_irtes(struct pci_pdev *pdev);
#endif /* PCI_H_ */