hv: cpuid: make leaf 0x4 per-cpu in hybrid architecture

Leaf 0x4 returns deterministic cache parameters for each level. In
hybrid architecture, P-cores and E-cores have different cache
information.

Add leaf 0x4 to per-cpu list in hybrid architecture and handle specific
cpuid access.

Tracked-On: #8608
Signed-off-by: Haiwei Li <haiwei.li@intel.com>
This commit is contained in:
Haiwei Li 2024-04-18 18:23:10 +08:00 committed by acrnsi-robot
parent f7506424e4
commit 59a8cc4c28
3 changed files with 67 additions and 20 deletions

View File

@ -443,6 +443,50 @@ static int32_t set_vcpuid_vcat_10h(struct acrn_vm *vm)
} }
#endif #endif
static void guest_cpuid_04h(__unused struct acrn_vm *vm, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
struct vcpuid_entry entry;
cpuid_subleaf(CPUID_CACHE, *ecx, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
if (entry.eax != 0U) {
#ifdef CONFIG_VCAT_ENABLED
if (is_vcat_configured(vm)) {
/* set_vcpuid_vcat_04h will not change entry.eax */
result = set_vcpuid_vcat_04h(vm, &entry);
}
#endif
}
*eax = entry.eax;
*ebx = entry.ebx;
*ecx = entry.ecx;
*edx = entry.edx;
}
static int32_t set_vcpuid_cache(struct acrn_vm *vm)
{
int32_t result = 0;
struct vcpuid_entry entry;
uint32_t i;
entry.leaf = CPUID_CACHE;
entry.flags = CPUID_CHECK_SUBLEAF;
for (i = 0U; ; i++) {
entry.subleaf = i;
entry.ecx = i;
guest_cpuid_04h(vm, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
if (entry.eax == 0U) {
break;
}
result = set_vcpuid_entry(vm, &entry);
if (result != 0) {
/* wants to break out of switch */
break;
}
}
return result;
}
static int32_t set_vcpuid_extended_function(struct acrn_vm *vm) static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
{ {
uint32_t i, limit; uint32_t i, limit;
@ -526,6 +570,15 @@ static inline void percpu_cpuid_init(void)
pcpu_cpuids.leaf_nr = sizeof(percpu_leaves)/sizeof(uint32_t); pcpu_cpuids.leaf_nr = sizeof(percpu_leaves)/sizeof(uint32_t);
memcpy_s(pcpu_cpuids.leaves, sizeof(percpu_leaves), memcpy_s(pcpu_cpuids.leaves, sizeof(percpu_leaves),
percpu_leaves, sizeof(percpu_leaves)); percpu_leaves, sizeof(percpu_leaves));
/* hybrid related percpu leaves*/
if (pcpu_has_cap(X86_FEATURE_HYBRID)) {
/* 0x4U */
uint32_t hybrid_leaves[] = {CPUID_CACHE};
memcpy_s((pcpu_cpuids.leaves + pcpu_cpuids.leaf_nr * sizeof(uint32_t)),
sizeof(hybrid_leaves), hybrid_leaves, sizeof(hybrid_leaves));
pcpu_cpuids.leaf_nr += sizeof(hybrid_leaves)/sizeof(uint32_t);
}
} }
int32_t set_vcpuid_entries(struct acrn_vm *vm) int32_t set_vcpuid_entries(struct acrn_vm *vm)
@ -533,7 +586,7 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
int32_t result; int32_t result;
struct vcpuid_entry entry; struct vcpuid_entry entry;
uint32_t limit; uint32_t limit;
uint32_t i, j; uint32_t i;
struct cpuinfo_x86 *cpu_info = get_pcpu_info(); struct cpuinfo_x86 *cpu_info = get_pcpu_info();
init_vcpuid_entry(0U, 0U, 0U, &entry); init_vcpuid_entry(0U, 0U, 0U, &entry);
@ -554,24 +607,9 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
} }
switch (i) { switch (i) {
case 0x04U: /* 0x4U */
for (j = 0U; ; j++) { case CPUID_CACHE:
init_vcpuid_entry(i, j, CPUID_CHECK_SUBLEAF, &entry); result = set_vcpuid_cache(vm);
if (entry.eax == 0U) {
break;
}
#ifdef CONFIG_VCAT_ENABLED
if (is_vcat_configured(vm)) {
result = set_vcpuid_vcat_04h(vm, &entry);
}
#endif
result = set_vcpuid_entry(vm, &entry);
if (result != 0) {
/* wants to break out of switch */
break;
}
}
break; break;
/* MONITOR/MWAIT */ /* MONITOR/MWAIT */
case 0x05U: case 0x05U:
@ -883,6 +921,11 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
guest_cpuid_01h(vcpu, eax, ebx, ecx, edx); guest_cpuid_01h(vcpu, eax, ebx, ecx, edx);
break; break;
/* 0x04U for hybrid arch */
case CPUID_CACHE:
guest_cpuid_04h(vcpu->vm, eax, ebx, ecx, edx);
break;
case 0x0bU: case 0x0bU:
guest_cpuid_0bh(vcpu, eax, ebx, ecx, edx); guest_cpuid_0bh(vcpu, eax, ebx, ecx, edx);
break; break;
@ -905,7 +948,7 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
default: default:
/* /*
* In this switch statement, leaf 0x01/0x0b/0x0d/0x19/0x1f/0x80000001 * In this switch statement, leaf 0x01/0x04/0x0b/0x0d/0x19/0x1f/0x80000001
* shall be handled specifically. All the other cases * shall be handled specifically. All the other cases
* just return physical value. * just return physical value.
*/ */

View File

@ -85,6 +85,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007 (EDX)*/ /* Intel-defined CPU features, CPUID level 0x00000007 (EDX)*/
#define X86_FEATURE_MDS_CLEAR ((FEAT_7_0_EDX << 5U) + 10U) #define X86_FEATURE_MDS_CLEAR ((FEAT_7_0_EDX << 5U) + 10U)
#define X86_FEATURE_HYBRID ((FEAT_7_0_EDX << 5U) + 15U)
#define X86_FEATURE_IBRS_IBPB ((FEAT_7_0_EDX << 5U) + 26U) #define X86_FEATURE_IBRS_IBPB ((FEAT_7_0_EDX << 5U) + 26U)
#define X86_FEATURE_STIBP ((FEAT_7_0_EDX << 5U) + 27U) #define X86_FEATURE_STIBP ((FEAT_7_0_EDX << 5U) + 27U)
#define X86_FEATURE_L1D_FLUSH ((FEAT_7_0_EDX << 5U) + 28U) #define X86_FEATURE_L1D_FLUSH ((FEAT_7_0_EDX << 5U) + 28U)

View File

@ -120,6 +120,8 @@
#define CPUID_ECX_SGX_LC (1U<<30U) #define CPUID_ECX_SGX_LC (1U<<30U)
/* CPUID.07H:ECX.PKS*/ /* CPUID.07H:ECX.PKS*/
#define CPUID_ECX_PKS (1U<<31U) #define CPUID_ECX_PKS (1U<<31U)
/* CPUID.07H:EDX.Hybrid */
#define CPUID_EDX_HYBRID (1U<<15U)
/* CPUID.07H:EDX.CET_IBT */ /* CPUID.07H:EDX.CET_IBT */
#define CPUID_EDX_CET_IBT (1U<<20U) #define CPUID_EDX_CET_IBT (1U<<20U)
/* CPUID.07H:EDX.IBRS_IBPB*/ /* CPUID.07H:EDX.IBRS_IBPB*/
@ -166,6 +168,7 @@
#define CPUID_FEATURES 1U #define CPUID_FEATURES 1U
#define CPUID_TLB 2U #define CPUID_TLB 2U
#define CPUID_SERIALNUM 3U #define CPUID_SERIALNUM 3U
#define CPUID_CACHE 4U
#define CPUID_EXTEND_FEATURE 7U #define CPUID_EXTEND_FEATURE 7U
#define CPUID_EXTEND_TOPOLOGY 0xBU #define CPUID_EXTEND_TOPOLOGY 0xBU
#define CPUID_XSAVE_FEATURES 0xDU #define CPUID_XSAVE_FEATURES 0xDU