Commit d71b62a1 authored by Eduardo Habkost's avatar Eduardo Habkost
Browse files

target-i386: kvm: Allocate kvm_msrs struct once per VCPU



Instead of using 2400 bytes in the stack for 150 MSR entries in
kvm_get_msrs() and kvm_put_msrs(), allocate a buffer once for
each VCPU.

Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarEduardo Habkost <ehabkost@redhat.com>
parent 42ecabaa
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1124,6 +1124,8 @@ typedef struct CPUX86State {
    TPRAccess tpr_access_type;
} CPUX86State;

struct kvm_msrs;

/**
 * X86CPU:
 * @env: #CPUX86State
@@ -1176,6 +1178,8 @@ struct X86CPU {
    struct DeviceState *apic_state;
    struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
    Notifier machine_done;

    struct kvm_msrs *kvm_msr_buf;
};

static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
+19 −18
Original line number Diff line number Diff line
@@ -57,6 +57,9 @@
#define MSR_KVM_WALL_CLOCK  0x11
#define MSR_KVM_SYSTEM_TIME 0x12

#define MSR_BUF_SIZE \
    (sizeof(struct kvm_msrs) + 150 * sizeof(struct kvm_msr_entry))

#ifndef BUS_MCEERR_AR
#define BUS_MCEERR_AR 4
#endif
@@ -914,6 +917,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
    if (has_xsave) {
        env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
    }
    cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);

    if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
        has_msr_mtrr = true;
@@ -1462,6 +1466,11 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
    entry->data = value;
}

static void kvm_msr_buf_reset(X86CPU *cpu)
{
    memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
}

static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
    CPUX86State *env = &cpu->env;
@@ -1528,14 +1537,12 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
static int kvm_put_msrs(X86CPU *cpu, int level)
{
    CPUX86State *env = &cpu->env;
    struct {
        struct kvm_msrs info;
        struct kvm_msr_entry entries[150];
    } msr_data;
    struct kvm_msr_entry *msrs = msr_data.entries;
    struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
    int n = 0, i;
    int ret;

    kvm_msr_buf_reset(cpu);

    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
@@ -1724,11 +1731,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
        }
    }

    msr_data.info = (struct kvm_msrs) {
        .nmsrs = n,
    };
    cpu->kvm_msr_buf->nmsrs = n;

    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
    if (ret < 0) {
        return ret;
    }
@@ -1944,13 +1949,11 @@ static int kvm_get_sregs(X86CPU *cpu)
static int kvm_get_msrs(X86CPU *cpu)
{
    CPUX86State *env = &cpu->env;
    struct {
        struct kvm_msrs info;
        struct kvm_msr_entry entries[150];
    } msr_data;
    struct kvm_msr_entry *msrs = msr_data.entries;
    struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
    int ret, i, n;

    kvm_msr_buf_reset(cpu);

    n = 0;
    msrs[n++].index = MSR_IA32_SYSENTER_CS;
    msrs[n++].index = MSR_IA32_SYSENTER_ESP;
@@ -2092,11 +2095,9 @@ static int kvm_get_msrs(X86CPU *cpu)
        }
    }

    msr_data.info = (struct kvm_msrs) {
        .nmsrs = n,
    };
    cpu->kvm_msr_buf->nmsrs = n;

    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
    if (ret < 0) {
        return ret;
    }