diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 6adb0fe2522e1cc87536adc94e5267bcbcf48706..63f1bae99c1ab0278ff240e759c04fce01a563f4 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2803,7 +2803,8 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) for (i = 0; i < max_queues; i++) { virtio_net_del_queue(n, i); } - + /* delete also control vq */ + virtio_del_queue(vdev, max_queues * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 79e582b6022ad0ba82c4ba9f016d8f17ccaa5b8c..0ee10f488ddc76730e72eba930d0d058a268a1ae 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -6139,9 +6139,14 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); /* Intel Processor Trace requires CPUID[0x14] */ - if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && - kvm_enabled() && cpu->intel_pt_auto_level) { - x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); + if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { + if (cpu->intel_pt_auto_level) { + x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); + } else if (cpu->env.cpuid_min_level < 0x14) { + mark_unavailable_features(cpu, FEAT_7_0_EBX, + CPUID_7_0_EBX_INTEL_PT, + "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\""); + } } /* CPU topology with multi-dies support requires CPUID[0x1F] */ @@ -6250,7 +6255,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) g_free(name); goto out; } + } + if (cpu->max_features && accel_uses_host_cpuid()) { if (enable_cpu_pm) { host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, &cpu->mwait.ecx, &cpu->mwait.edx); @@ -6258,6 +6265,15 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) } } + if (cpu->ucode_rev == 0) { + /* The default is the same as KVM's. */ + if (IS_AMD_CPU(env)) { + cpu->ucode_rev = 0x01000065; + } else { + cpu->ucode_rev = 0x100000000ULL; + } + } + /* mwait extended info: needed for Core compatibility */ /* We always wake on interrupt even if host does not have the capability */ cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; @@ -6937,6 +6953,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), + DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 2e606d34a04b65668d046f833c5018850bc006c2..8a02464f9050cb311365d497a2e7df1f8d53e96a 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -345,6 +345,7 @@ typedef enum X86Seg { #define MSR_IA32_SPEC_CTRL 0x48 #define MSR_VIRT_SSBD 0xc001011f #define MSR_IA32_PRED_CMD 0x49 +#define MSR_IA32_UCODE_REV 0x8b #define MSR_IA32_CORE_CAPABILITY 0xcf #define MSR_IA32_ARCH_CAPABILITIES 0x10a #define MSR_IA32_TSCDEADLINE 0x6e0 @@ -1557,6 +1558,8 @@ struct X86CPU { CPUNegativeOffsetState neg; CPUX86State env; + uint64_t ucode_rev; + uint32_t hyperv_spinlock_attempts; char *hyperv_vendor_id; bool hyperv_synic_kvm_only; diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index 1b04bd7e94f14efc64a244bd090013fb5d445a08..cd40520c16e675c10e602552ce2fd137eb91ca78 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -664,8 +664,6 @@ static void exec_lods(struct CPUX86State *env, struct x86_decode *decode) RIP(env) += decode->len; } -#define MSR_IA32_UCODE_REV 0x00000017 - void simulate_rdmsr(struct CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); @@ -681,7 +679,7 @@ void simulate_rdmsr(struct CPUState *cpu) val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); break; case MSR_IA32_UCODE_REV: - val = (0x100000000ULL << 32) | 0x100000000ULL; + val = x86_cpu->ucode_rev; break; case MSR_EFER: val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER); diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 7328746d9248125292a99adefaa446c7cccbcb7c..60060087fddec75abe865b1a159cc83c6a6cc32a 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -63,6 +63,8 @@ * 255 kvm_msr_entry structs */ #define MSR_BUF_SIZE 4096 +static void kvm_init_msrs(X86CPU *cpu); + const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), KVM_CAP_INFO(EXT_CPUID), @@ -1777,6 +1779,8 @@ int kvm_arch_init_vcpu(CPUState *cs) has_msr_tsc_aux = false; } + kvm_init_msrs(cpu); + r = hyperv_init_vcpu(cpu); if (r) { goto fail; @@ -2592,11 +2596,53 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) VMCS12_MAX_FIELD_INDEX << 1); } +static int kvm_buf_set_msrs(X86CPU *cpu) +{ + int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); + if (ret < 0) { + return ret; + } + + if (ret < cpu->kvm_msr_buf->nmsrs) { + struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; + error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, + (uint32_t)e->index, (uint64_t)e->data); + } + + assert(ret == cpu->kvm_msr_buf->nmsrs); + return 0; +} + +static void kvm_init_msrs(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + + kvm_msr_buf_reset(cpu); + if (has_msr_arch_capabs) { + kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, + env->features[FEAT_ARCH_CAPABILITIES]); + } + + if (has_msr_core_capabs) { + kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, + env->features[FEAT_CORE_CAPABILITY]); + } + + /* + * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but + * all kernels with MSR features should have them. + */ + if (kvm_feature_msrs && cpu_has_vmx(env)) { + kvm_msr_entry_add_vmx(cpu, env->features); + } + + assert(kvm_buf_set_msrs(cpu) == 0); +} + static int kvm_put_msrs(X86CPU *cpu, int level) { CPUX86State *env = &cpu->env; int i; - int ret; kvm_msr_buf_reset(cpu); @@ -2648,17 +2694,6 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } #endif - /* If host supports feature MSR, write down. */ - if (has_msr_arch_capabs) { - kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, - env->features[FEAT_ARCH_CAPABILITIES]); - } - - if (has_msr_core_capabs) { - kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, - env->features[FEAT_CORE_CAPABILITY]); - } - /* * The following MSRs have side effects on the guest or are too heavy * for normal writeback. Limit them to reset or full state updates. @@ -2831,14 +2866,6 @@ static int kvm_put_msrs(X86CPU *cpu, int level) /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ - - /* - * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but - * all kernels with MSR features should have them. - */ - if (kvm_feature_msrs && cpu_has_vmx(env)) { - kvm_msr_entry_add_vmx(cpu, env->features); - } } if (env->mcg_cap) { @@ -2854,19 +2881,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); - if (ret < 0) { - return ret; - } - - if (ret < cpu->kvm_msr_buf->nmsrs) { - struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; - error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, - (uint32_t)e->index, (uint64_t)e->data); - } - - assert(ret == cpu->kvm_msr_buf->nmsrs); - return 0; + return kvm_buf_set_msrs(cpu); } diff --git a/target/i386/kvm_i386.h b/target/i386/kvm_i386.h index 06fe06bdb3d6d647d8cfb0eee87b99c017398d3b..d98c6f69d08af549fe5f31fc4264a8d202f7a219 100644 --- a/target/i386/kvm_i386.h +++ b/target/i386/kvm_i386.h @@ -66,4 +66,5 @@ bool kvm_enable_x2apic(void); bool kvm_has_x2apic_api(void); bool kvm_hv_vpindex_settable(void); + #endif diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c index 3eff6885f8a63ff525008bc0247712b8fcde1fa6..aed16fe3f0255323f8dfc146078b3e99fa8bc7d4 100644 --- a/target/i386/misc_helper.c +++ b/target/i386/misc_helper.c @@ -229,6 +229,7 @@ void helper_rdmsr(CPUX86State *env) #else void helper_wrmsr(CPUX86State *env) { + X86CPU *x86_cpu = env_archcpu(env); uint64_t val; cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); @@ -371,6 +372,9 @@ void helper_wrmsr(CPUX86State *env) env->msr_bndcfgs = val; cpu_sync_bndcs_hflags(env); break; + case MSR_IA32_UCODE_REV: + val = x86_cpu->ucode_rev; + break; default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +