HV: vpci: refine vbar sizing
For a pci BAR, its size aligned bits have fixed to 0(except the memory type bits, they have another fixed value), they are read-only. When write ~0U to BAR for sizing, (type_bits | size_mask) is written into BAR. So do not need to distinguish between sizing vBAR and programming vBAR. When write a value to vBAR, always store (value & size_mask | type_bit) to vfcg. pci_vdev_read_vbar() is unnecessary, because it is only need to read vcfg. Tracked-On: #6011 Signed-off-by: Tao Yuhong <yuhong.tao@intel.com> Reviewed-by: Eddie Dong <eddie.dong@intel.com> Reviewed-by: Li Fei <fei1.li@intel.com>
This commit is contained in:
parent
5ecca6b256
commit
cb75de2163
|
@ -223,11 +223,7 @@ static int32_t ivshmem_mmio_handler(struct io_request *io_req, void *data)
|
|||
|
||||
static int32_t read_ivshmem_vdev_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
||||
{
|
||||
if (vbar_access(vdev, offset)) {
|
||||
*val = pci_vdev_read_vbar(vdev, pci_bar_index(offset));
|
||||
} else {
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
}
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -93,19 +93,6 @@ struct pci_vdev *pci_find_vdev(struct acrn_vpci *vpci, union pci_bdf vbdf)
|
|||
return vdev;
|
||||
}
|
||||
|
||||
uint32_t pci_vdev_read_vbar(const struct pci_vdev *vdev, uint32_t idx)
|
||||
{
|
||||
uint32_t bar, offset;
|
||||
|
||||
offset = pci_bar_offset(idx);
|
||||
bar = pci_vdev_read_vcfg(vdev, offset, 4U);
|
||||
/* Sizing BAR */
|
||||
if (bar == ~0U) {
|
||||
bar = vdev->vbars[idx].mask | vdev->vbars[idx].bar_type.bits;
|
||||
}
|
||||
return bar;
|
||||
}
|
||||
|
||||
static bool is_pci_mem_bar_base_valid(struct acrn_vm *vm, uint64_t base)
|
||||
{
|
||||
struct acrn_vpci *vpci = &vm->vpci;
|
||||
|
@ -124,14 +111,13 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
|
|||
vbar = &vdev->vbars[idx];
|
||||
offset = pci_bar_offset(idx);
|
||||
lo = pci_vdev_read_vcfg(vdev, offset, 4U);
|
||||
if ((!is_pci_reserved_bar(vbar)) && (lo != ~0U)) {
|
||||
if ((!is_pci_reserved_bar(vbar)) && (lo != (vbar->mask | vbar->bar_type.bits))) {
|
||||
base = lo & vbar->mask;
|
||||
|
||||
if (is_pci_mem64lo_bar(vbar)) {
|
||||
vbar = &vdev->vbars[idx + 1U];
|
||||
hi = pci_vdev_read_vcfg(vdev, (offset + 4U), 4U);
|
||||
if (hi != ~0U) {
|
||||
hi &= vbar->mask;
|
||||
if (hi != vbar->mask) {
|
||||
base |= ((uint64_t)hi << 32U);
|
||||
} else {
|
||||
base = 0UL;
|
||||
|
|
|
@ -36,11 +36,7 @@ static int32_t read_vmcs9900_cfg(const struct pci_vdev *vdev,
|
|||
uint32_t offset, uint32_t bytes,
|
||||
uint32_t * val)
|
||||
{
|
||||
if (vbar_access(vdev, offset)) {
|
||||
*val = pci_vdev_read_vbar(vdev, pci_bar_index(offset));
|
||||
} else {
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
}
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -431,7 +431,7 @@ static void read_cfg_header(const struct pci_vdev *vdev,
|
|||
if (vbar_access(vdev, offset)) {
|
||||
/* bar access must be 4 bytes and offset must also be 4 bytes aligned */
|
||||
if ((bytes == 4U) && ((offset & 0x3U) == 0U)) {
|
||||
*val = pci_vdev_read_vbar(vdev, pci_bar_index(offset));
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
} else {
|
||||
*val = ~0U;
|
||||
}
|
||||
|
@ -780,21 +780,15 @@ void vpci_update_one_vbar(struct pci_vdev *vdev, uint32_t bar_idx, uint32_t val,
|
|||
map_pcibar map_cb, unmap_pcibar unmap_cb)
|
||||
{
|
||||
struct pci_vbar *vbar = &vdev->vbars[bar_idx];
|
||||
uint32_t offset = pci_bar_offset(bar_idx);
|
||||
uint32_t update_idx = bar_idx;
|
||||
|
||||
if (vbar->is_mem64hi) {
|
||||
update_idx -= 1U;
|
||||
}
|
||||
unmap_cb(vdev, update_idx);
|
||||
if (val != ~0U) {
|
||||
pci_vdev_write_vbar(vdev, bar_idx, val);
|
||||
if (map_cb != NULL) {
|
||||
map_cb(vdev, update_idx);
|
||||
}
|
||||
} else {
|
||||
pci_vdev_write_vcfg(vdev, offset, 4U, val);
|
||||
vdev->vbars[update_idx].base_gpa = 0UL;
|
||||
pci_vdev_write_vbar(vdev, bar_idx, val);
|
||||
if ((map_cb != NULL) && (vdev->vbars[update_idx].base_gpa != 0UL)) {
|
||||
map_cb(vdev, update_idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -174,7 +174,6 @@ uint32_t pci_vdev_read_vcfg(const struct pci_vdev *vdev, uint32_t offset, uint32
|
|||
void pci_vdev_write_vcfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
uint32_t vpci_add_capability(struct pci_vdev *vdev, uint8_t *capdata, uint8_t caplen);
|
||||
|
||||
uint32_t pci_vdev_read_vbar(const struct pci_vdev *vdev, uint32_t idx);
|
||||
void pci_vdev_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
|
||||
|
||||
void vdev_pt_hide_sriov_cap(struct pci_vdev *vdev);
|
||||
|
|
Loading…
Reference in New Issue