HV: remove the populate_msi_struct() function
Remove the populate_msi_struct() function, put msi initialization specific functionality into msi.x, and put msix initialization specific functionality into msix.c Rename mmio_hva to mmio_hpa and change related code to fix misra c violation: Cast from pointer to integral type. : (void* to unsigned long): ( uint64_t ) hpa2hva ( bar -> base ) Tracked-On: #2534 Signed-off-by: dongshen <dongsheng.x.zhang@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
3158c851ae
commit
562628b99e
|
@ -34,6 +34,8 @@
|
|||
#include <vpci.h>
|
||||
#include "pci_priv.h"
|
||||
|
||||
static int32_t vmsi_init(struct pci_vdev *vdev);
|
||||
|
||||
static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
|
||||
{
|
||||
bool ret;
|
||||
|
@ -166,7 +168,8 @@ static int32_t vmsi_deinit(struct pci_vdev *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_vdev_ops pci_ops_vdev_msi = {
|
||||
const struct pci_vdev_ops pci_ops_vdev_msi = {
|
||||
.init = vmsi_init,
|
||||
.deinit = vmsi_deinit,
|
||||
.cfgwrite = vmsi_cfgwrite,
|
||||
.cfgread = vmsi_cfgread,
|
||||
|
@ -187,7 +190,7 @@ static void buf_write32(uint8_t buf[], uint32_t val)
|
|||
buf[3] = (uint8_t)((val >> 24U) & 0xFFU);
|
||||
}
|
||||
|
||||
void populate_msi_struct(struct pci_vdev *vdev)
|
||||
static int32_t vmsi_init(struct pci_vdev *vdev)
|
||||
{
|
||||
struct pci_pdev *pdev = vdev->pdev;
|
||||
uint32_t val;
|
||||
|
@ -197,9 +200,6 @@ void populate_msi_struct(struct pci_vdev *vdev)
|
|||
vdev->msi.capoff = pdev->msi.capoff;
|
||||
vdev->msi.caplen = pdev->msi.caplen;
|
||||
|
||||
/* Assign MSI handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msi);
|
||||
|
||||
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msi.capoff], pdev->msi.caplen,
|
||||
(void *)&pdev->msi.cap[0U], pdev->msi.caplen);
|
||||
|
||||
|
@ -210,15 +210,6 @@ void populate_msi_struct(struct pci_vdev *vdev)
|
|||
buf_write32(&vdev->cfgdata.data_8[pdev->msi.capoff], val);
|
||||
}
|
||||
|
||||
if (pdev->msix.capoff != 0U) {
|
||||
vdev->msix.capoff = pdev->msix.capoff;
|
||||
vdev->msix.caplen = pdev->msix.caplen;
|
||||
|
||||
/* Assign MSI-X handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msix);
|
||||
|
||||
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
|
||||
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ static int32_t vmsix_remap_entry(const struct pci_vdev *vdev, uint32_t index, bo
|
|||
{
|
||||
struct msix_table_entry *pentry;
|
||||
struct ptirq_msi_info info;
|
||||
uint64_t hva;
|
||||
void *hva;
|
||||
int32_t ret;
|
||||
|
||||
info.is_msix = 1;
|
||||
|
@ -70,7 +70,7 @@ static int32_t vmsix_remap_entry(const struct pci_vdev *vdev, uint32_t index, bo
|
|||
ret = ptirq_msix_remap(vdev->vpci->vm, vdev->vbdf.value, (uint16_t)index, &info);
|
||||
if (ret == 0) {
|
||||
/* Write the table entry to the physical structure */
|
||||
hva = vdev->msix.mmio_hva + vdev->msix.table_offset;
|
||||
hva = hpa2hva(vdev->msix.mmio_hpa + vdev->msix.table_offset);
|
||||
pentry = (struct msix_table_entry *)hva + index;
|
||||
|
||||
/*
|
||||
|
@ -280,7 +280,7 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
|||
struct pci_vdev *vdev;
|
||||
int32_t ret = 0;
|
||||
uint64_t offset;
|
||||
uint64_t hva;
|
||||
void *hva;
|
||||
|
||||
vdev = (struct pci_vdev *)handler_private_data;
|
||||
offset = mmio->address - vdev->msix.mmio_gpa;
|
||||
|
@ -288,13 +288,13 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
|||
if (msixtable_access(vdev, (uint32_t)offset)) {
|
||||
vmsix_table_rw(vdev, mmio, (uint32_t)offset);
|
||||
} else {
|
||||
hva = vdev->msix.mmio_hva + offset;
|
||||
hva = hpa2hva(vdev->msix.mmio_hpa + offset);
|
||||
|
||||
/* Only DWORD and QWORD are permitted */
|
||||
if ((mmio->size != 4U) && (mmio->size != 8U)) {
|
||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
} else if (hva != NULL) {
|
||||
stac();
|
||||
/* MSI-X PBA and Capability Table could be in the same range */
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
|
@ -319,7 +319,7 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int32_t vmsix_init(struct pci_vdev *vdev)
|
||||
static int32_t vmsix_init_helper(struct pci_vdev *vdev)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t addr_hi, addr_lo;
|
||||
|
@ -342,7 +342,7 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
|
|||
|
||||
bar = &pdev->bar[msix->table_bar];
|
||||
if (bar != NULL) {
|
||||
vdev->msix.mmio_hva = (uint64_t)hpa2hva(bar->base);
|
||||
vdev->msix.mmio_hpa = bar->base;
|
||||
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(bar->base);
|
||||
vdev->msix.mmio_size = bar->size;
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
|
|||
*/
|
||||
|
||||
/* The higher boundary of the 4KB aligned address range for MSI-X table */
|
||||
addr_hi = msix->mmio_gpa + msix->table_offset + msix->table_count * MSIX_TABLE_ENTRY_SIZE;
|
||||
addr_hi = msix->mmio_gpa + msix->table_offset + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
||||
addr_hi = round_page_up(addr_hi);
|
||||
|
||||
/* The lower boundary of the 4KB aligned address range for MSI-X table */
|
||||
|
@ -383,6 +383,24 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int32_t vmsix_init(struct pci_vdev *vdev)
|
||||
{
|
||||
struct pci_pdev *pdev = vdev->pdev;
|
||||
int32_t ret = 0;
|
||||
|
||||
if (pdev->msix.capoff != 0U) {
|
||||
vdev->msix.capoff = pdev->msix.capoff;
|
||||
vdev->msix.caplen = pdev->msix.caplen;
|
||||
|
||||
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
|
||||
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
|
||||
|
||||
ret = vmsix_init_helper(vdev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int32_t vmsix_deinit(struct pci_vdev *vdev)
|
||||
{
|
||||
vdev->msix.intercepted_size = 0U;
|
||||
|
|
|
@ -71,14 +71,13 @@ static inline void pci_vdev_write_cfg_u32(struct pci_vdev *vdev, uint32_t offset
|
|||
extern const struct vpci_ops partition_mode_vpci_ops;
|
||||
#else
|
||||
extern const struct vpci_ops sharing_mode_vpci_ops;
|
||||
extern const struct pci_vdev_ops pci_ops_vdev_msi;
|
||||
extern const struct pci_vdev_ops pci_ops_vdev_msix;
|
||||
#endif
|
||||
|
||||
uint32_t pci_vdev_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes);
|
||||
void pci_vdev_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
|
||||
void populate_msi_struct(struct pci_vdev *vdev);
|
||||
|
||||
void add_vdev_handler(struct pci_vdev *vdev, const struct pci_vdev_ops *ops);
|
||||
|
||||
struct pci_vdev *pci_find_vdev_by_vbdf(const struct acrn_vpci *vpci, union pci_bdf vbdf);
|
||||
|
|
|
@ -137,7 +137,11 @@ static void init_vdev_for_pdev(struct pci_pdev *pdev, const void *cb_data)
|
|||
|
||||
vdev = alloc_pci_vdev(vm, pdev);
|
||||
if (vdev != NULL) {
|
||||
populate_msi_struct(vdev);
|
||||
/* Assign MSI handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msi);
|
||||
|
||||
/* Assign MSI-X handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msix);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ struct pci_msi {
|
|||
struct pci_msix {
|
||||
struct msix_table_entry tables[CONFIG_MAX_MSIX_TABLE_NUM];
|
||||
uint64_t mmio_gpa;
|
||||
uint64_t mmio_hva;
|
||||
uint64_t mmio_hpa;
|
||||
uint64_t mmio_size;
|
||||
uint64_t intercepted_gpa;
|
||||
uint64_t intercepted_size;
|
||||
|
|
Loading…
Reference in New Issue