Merge branch 'kvm-5.19-early-fixes' into HEAD
This commit is contained in:
commit
a280e35846
|
@ -439,7 +439,7 @@ do { \
|
|||
[ptr] "+m" (*_ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] ltype (__new) \
|
||||
: "memory", "cc"); \
|
||||
: "memory"); \
|
||||
if (unlikely(__err)) \
|
||||
goto label; \
|
||||
if (unlikely(!success)) \
|
||||
|
|
|
@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
|
|||
roots_to_free |= KVM_MMU_ROOT_CURRENT;
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
||||
if (is_obsolete_root(kvm, mmu->root.hpa))
|
||||
if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
|
||||
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
|
||||
}
|
||||
|
||||
|
|
|
@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Step the iterator back up a level in the paging structure. Should only be
|
||||
* used when the iterator is below the root level.
|
||||
*/
|
||||
void tdp_iter_step_up(struct tdp_iter *iter)
|
||||
{
|
||||
WARN_ON(!try_step_up(iter));
|
||||
}
|
||||
|
||||
/*
|
||||
* Step to the next SPTE in a pre-order traversal of the paging structure.
|
||||
* To get to the next SPTE, the iterator either steps down towards the goal
|
||||
|
|
|
@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
|
|||
int min_level, gfn_t next_last_level_gfn);
|
||||
void tdp_iter_next(struct tdp_iter *iter);
|
||||
void tdp_iter_restart(struct tdp_iter *iter);
|
||||
void tdp_iter_step_up(struct tdp_iter *iter);
|
||||
|
||||
#endif /* __KVM_X86_MMU_TDP_ITER_H */
|
||||
|
|
|
@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
|||
gfn_t start = slot->base_gfn;
|
||||
gfn_t end = start + slot->npages;
|
||||
struct tdp_iter iter;
|
||||
int max_mapping_level;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
retry:
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
|
||||
continue;
|
||||
|
||||
|
@ -1755,15 +1755,41 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
|||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* This is a leaf SPTE. Check if the PFN it maps can
|
||||
* be mapped at a higher level.
|
||||
*/
|
||||
pfn = spte_to_pfn(iter.old_spte);
|
||||
if (kvm_is_reserved_pfn(pfn) ||
|
||||
iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
|
||||
pfn, PG_LEVEL_NUM))
|
||||
|
||||
if (kvm_is_reserved_pfn(pfn))
|
||||
continue;
|
||||
|
||||
max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
|
||||
iter.gfn, pfn, PG_LEVEL_NUM);
|
||||
|
||||
WARN_ON(max_mapping_level < iter.level);
|
||||
|
||||
/*
|
||||
* If this page is already mapped at the highest
|
||||
* viable level, there's nothing more to do.
|
||||
*/
|
||||
if (max_mapping_level == iter.level)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The page can be remapped at a higher level, so step
|
||||
* up to zap the parent SPTE.
|
||||
*/
|
||||
while (max_mapping_level > iter.level)
|
||||
tdp_iter_step_up(&iter);
|
||||
|
||||
/* Note, a successful atomic zap also does a remote TLB flush. */
|
||||
if (tdp_mmu_zap_spte_atomic(kvm, &iter))
|
||||
goto retry;
|
||||
tdp_mmu_zap_spte_atomic(kvm, &iter);
|
||||
|
||||
/*
|
||||
* If the atomic zap fails, the iter will recurse back into
|
||||
* the same subtree to retry.
|
||||
*/
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -982,7 +982,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
|
|||
if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
|
||||
WARN_ON(!svm->tsc_scaling_enabled);
|
||||
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
|
||||
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
}
|
||||
|
||||
svm->nested.ctl.nested_cr3 = 0;
|
||||
|
@ -1387,7 +1387,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.tsc_scaling_ratio =
|
||||
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
|
||||
svm->tsc_ratio_msr);
|
||||
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
}
|
||||
|
||||
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
|
||||
|
|
|
@ -465,11 +465,24 @@ static int has_svm(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
void __svm_write_tsc_multiplier(u64 multiplier)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (multiplier == __this_cpu_read(current_tsc_ratio))
|
||||
goto out;
|
||||
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
|
||||
__this_cpu_write(current_tsc_ratio, multiplier);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void svm_hardware_disable(void)
|
||||
{
|
||||
/* Make sure we clean up behind us */
|
||||
if (tsc_scaling)
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
|
||||
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
|
||||
|
||||
cpu_svm_disable();
|
||||
|
||||
|
@ -515,8 +528,7 @@ static int svm_hardware_enable(void)
|
|||
* Set the default value, even if we don't use TSC scaling
|
||||
* to avoid having stale value in the msr
|
||||
*/
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
|
||||
__this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
|
||||
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
|
||||
}
|
||||
|
||||
|
||||
|
@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|||
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||
}
|
||||
|
||||
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
|
||||
static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
|
||||
{
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
|
||||
__svm_write_tsc_multiplier(multiplier);
|
||||
}
|
||||
|
||||
|
||||
/* Evaluate instruction intercepts that depend on guest CPUID features. */
|
||||
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_svm *svm)
|
||||
|
@ -1363,13 +1376,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
sev_es_prepare_switch_to_guest(hostsa);
|
||||
}
|
||||
|
||||
if (tsc_scaling) {
|
||||
u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
|
||||
if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
|
||||
__this_cpu_write(current_tsc_ratio, tsc_ratio);
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
|
||||
}
|
||||
}
|
||||
if (tsc_scaling)
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
|
||||
if (likely(tsc_aux_uret_slot >= 0))
|
||||
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
|
||||
|
|
|
@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
|
|||
bool has_error_code, u32 error_code);
|
||||
int nested_svm_exit_special(struct vcpu_svm *svm);
|
||||
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
|
||||
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
|
||||
void __svm_write_tsc_multiplier(u64 multiplier);
|
||||
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
|
||||
struct vmcb_control_area *control);
|
||||
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
|
||||
|
|
|
@ -9,12 +9,6 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
|
|||
int ret;
|
||||
|
||||
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
|
||||
clear_notify_signal();
|
||||
if (task_work_pending(current))
|
||||
task_work_run();
|
||||
}
|
||||
|
||||
if (ti_work & _TIF_SIGPENDING) {
|
||||
kvm_handle_signal_exit(vcpu);
|
||||
return -EINTR;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ static inline void nop_loop(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1000000; i++)
|
||||
for (i = 0; i < 100000000; i++)
|
||||
asm volatile("nop");
|
||||
}
|
||||
|
||||
|
@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void)
|
|||
tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
|
||||
GUEST_ASSERT(tsc_freq > 0);
|
||||
|
||||
/* First, check MSR-based clocksource */
|
||||
/* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
|
||||
r1 = rdtsc();
|
||||
t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
|
||||
r1 = (r1 + rdtsc()) / 2;
|
||||
nop_loop();
|
||||
r2 = rdtsc();
|
||||
t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
|
||||
r2 = (r2 + rdtsc()) / 2;
|
||||
|
||||
GUEST_ASSERT(r2 > r1 && t2 > t1);
|
||||
|
||||
|
@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
|
|||
tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
|
||||
TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
|
||||
|
||||
/* First, check MSR-based clocksource */
|
||||
/* For increased accuracy, take mean rdtsc() before and afrer ioctl */
|
||||
r1 = rdtsc();
|
||||
t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
|
||||
r1 = (r1 + rdtsc()) / 2;
|
||||
nop_loop();
|
||||
r2 = rdtsc();
|
||||
t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
|
||||
r2 = (r2 + rdtsc()) / 2;
|
||||
|
||||
TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
|
||||
|
||||
|
|
|
@ -4300,8 +4300,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
|
|||
kvm_put_kvm_no_destroy(kvm);
|
||||
mutex_lock(&kvm->lock);
|
||||
list_del(&dev->vm_node);
|
||||
if (ops->release)
|
||||
ops->release(dev);
|
||||
mutex_unlock(&kvm->lock);
|
||||
ops->destroy(dev);
|
||||
if (ops->destroy)
|
||||
ops->destroy(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue