Commit 2ca8a8be authored by Eduardo Habkost's avatar Eduardo Habkost
Browse files

target-i386: xsave: Calculate set of xsave components on realize



Instead of doing complex calculations and calling
kvm_arch_get_supported_cpuid() inside cpu_x86_cpuid(), calculate
the set of required XSAVE components earlier, at realize time.

Signed-off-by: default avatarEduardo Habkost <ehabkost@redhat.com>
Reviewed-by: default avatarRichard Henderson <rth@twiddle.net>
Signed-off-by: default avatarEduardo Habkost <ehabkost@redhat.com>
parent 1fda6198
Loading
Loading
Loading
Loading
+32 −23
Original line number Diff line number Diff line
@@ -2504,9 +2504,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
        *ebx &= 0xffff; /* The count doesn't need to be reliable. */
        break;
    case 0xD: {
        uint64_t ena_mask;
        int i;

        /* Processor Extended State */
        *eax = 0;
        *ebx = 0;
@@ -2516,32 +2513,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
            break;
        }

        ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
        for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
            const ExtSaveArea *esa = &x86_ext_save_areas[i];
            if (env->features[esa->feature] & esa->bits) {
                ena_mask |= (1ULL << i);
            }
        }

        if (kvm_enabled()) {
            KVMState *s = cs->kvm_state;
            uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
            kvm_mask <<= 32;
            kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
            ena_mask &= kvm_mask;
        }

        if (count == 0) {
            *ecx = xsave_area_size(ena_mask);;
            *eax = ena_mask;
            *edx = ena_mask >> 32;
            *ecx = xsave_area_size(env->xsave_components);
            *eax = env->xsave_components;
            *edx = env->xsave_components >> 32;
            *ebx = *ecx;
        } else if (count == 1) {
            *eax = env->features[FEAT_XSAVE];
        } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
            const ExtSaveArea *esa = &x86_ext_save_areas[count];
            if ((ena_mask >> count) & 1) {
            if ((env->xsave_components >> count) & 1) {
                *eax = esa->size;
                *ebx = esa->offset;
            }
@@ -2971,6 +2952,33 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
    }
}

/* Calculate XSAVE components based on the configured CPU feature flags */
static void x86_cpu_enable_xsave_components(X86CPU *cpu)
{
    CPUX86State *env = &cpu->env;
    int i;

    if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
        return;
    }

    env->xsave_components = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
    for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
        const ExtSaveArea *esa = &x86_ext_save_areas[i];
        if (env->features[esa->feature] & esa->bits) {
            env->xsave_components |= (1ULL << i);
        }
    }

    if (kvm_enabled()) {
        KVMState *s = kvm_state;
        uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
        kvm_mask <<= 32;
        kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
        env->xsave_components &= kvm_mask;
    }
}

#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
                           (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
                           (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
@@ -3016,6 +3024,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
        cpu->env.features[w] &= ~minus_features[w];
    }

    x86_cpu_enable_xsave_components(cpu);

    /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
    x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+1 −0
Original line number Diff line number Diff line
@@ -1122,6 +1122,7 @@ typedef struct CPUX86State {
    uint32_t cpuid_vendor3;
    uint32_t cpuid_version;
    FeatureWordArray features;
    uint64_t xsave_components;
    uint32_t cpuid_model[12];

    /* MTRRs */