Commit 18ab37ba authored by Liran Alon's avatar Liran Alon Committed by Paolo Bonzini
Browse files

target/i386: kvm: Block migration for vCPUs exposed with nested virtualization



Commit d98f2607 ("target/i386: kvm: add VMX migration blocker")
added a migration blocker for vCPU exposed with Intel VMX.
However, migration should also be blocked for vCPU exposed with
AMD SVM.

Both cases should be blocked because QEMU should extract additional
vCPU state from KVM that should be migrated as part of vCPU VMState.
E.g. Whether vCPU is running in guest-mode or host-mode.

Fixes: d98f2607 ("target/i386: kvm: add VMX migration blocker")
Reviewed-by: default avatarMaran Wilson <maran.wilson@oracle.com>
Signed-off-by: default avatarLiran Alon <liran.alon@oracle.com>
Message-Id: <20190619162140.133674-6-liran.alon@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bceeeef9
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -5215,12 +5215,6 @@ static int x86_cpu_filter_features(X86CPU *cpu)
    return rv;
}

#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
                           (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
                           (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
                         (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
                         (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
    CPUState *cs = CPU(dev);
+12 −0
Original line number Diff line number Diff line
@@ -722,6 +722,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];

#define CPUID_VENDOR_HYGON    "HygonGenuine"

#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
                           (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
                           (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
                         (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
                         (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)

#define CPUID_MWAIT_IBE     (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX     (1U << 0) /* enumeration supported */

@@ -1848,6 +1855,11 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
    }
}

static inline bool cpu_has_vmx(CPUX86State *env)
{
    return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
}

/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
+7 −7
Original line number Diff line number Diff line
@@ -1299,7 +1299,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
}

static Error *invtsc_mig_blocker;
static Error *vmx_mig_blocker;
static Error *nested_virt_mig_blocker;

#define KVM_MAX_CPUID_ENTRIES  100

@@ -1597,13 +1597,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
                                  !!(c->ecx & CPUID_EXT_SMX);
    }

    if ((env->features[FEAT_1_ECX] & CPUID_EXT_VMX) && !vmx_mig_blocker) {
        error_setg(&vmx_mig_blocker,
                   "Nested VMX virtualization does not support live migration yet");
        r = migrate_add_blocker(vmx_mig_blocker, &local_err);
    if (cpu_has_nested_virt(env) && !nested_virt_mig_blocker) {
        error_setg(&nested_virt_mig_blocker,
                   "Nested virtualization does not support live migration yet");
        r = migrate_add_blocker(nested_virt_mig_blocker, &local_err);
        if (local_err) {
            error_report_err(local_err);
            error_free(vmx_mig_blocker);
            error_free(nested_virt_mig_blocker);
            return r;
        }
    }
@@ -1674,7 +1674,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
 fail:
    migrate_del_blocker(invtsc_mig_blocker);
 fail2:
    migrate_del_blocker(vmx_mig_blocker);
    migrate_del_blocker(nested_virt_mig_blocker);

    return r;
}