Commit 14e6fe12 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

*_run_on_cpu: introduce run_on_cpu_data type



This changes the *_run_on_cpu APIs (and helpers) to pass data in a
run_on_cpu_data type instead of a plain void *. This is because we
sometimes want to pass a target address (target_ulong) and this fails on
32 bit hosts emulating 64 bit guests.

Signed-off-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Message-Id: <20161027151030.20863-24-alex.bennee@linaro.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 12e9700d
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -109,7 +109,7 @@ void cpu_list_remove(CPUState *cpu)
struct qemu_work_item {
    struct qemu_work_item *next;
    run_on_cpu_func func;
    void *data;
    run_on_cpu_data data;
    bool free, exclusive, done;
};

@@ -129,7 +129,7 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
    qemu_cpu_kick(cpu);
}

void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
                   QemuMutex *mutex)
{
    struct qemu_work_item wi;
@@ -154,7 +154,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
    }
}

void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
    struct qemu_work_item *wi;

@@ -296,7 +296,8 @@ void cpu_exec_end(CPUState *cpu)
    }
}

void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
                           run_on_cpu_data data)
{
    struct qemu_work_item *wi;

+4 −3
Original line number Diff line number Diff line
@@ -556,7 +556,7 @@ static const VMStateDescription vmstate_timers = {
    }
};

static void cpu_throttle_thread(CPUState *cpu, void *opaque)
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
{
    double pct;
    double throttle_ratio;
@@ -587,7 +587,8 @@ static void cpu_throttle_timer_tick(void *opaque)
    }
    CPU_FOREACH(cpu) {
        if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
            async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
            async_run_on_cpu(cpu, cpu_throttle_thread,
                             RUN_ON_CPU_NULL);
        }
    }

@@ -914,7 +915,7 @@ void qemu_init_cpu_loop(void)
    qemu_thread_get_self(&io_thread);
}

void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
    do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
}
+7 −7
Original line number Diff line number Diff line
@@ -133,9 +133,9 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
    }
}

static void kvm_apic_put(CPUState *cs, void *data)
static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
{
    APICCommonState *s = data;
    APICCommonState *s = data.host_ptr;
    struct kvm_lapic_state kapic;
    int ret;

@@ -151,12 +151,12 @@ static void kvm_apic_put(CPUState *cs, void *data)

static void kvm_apic_post_load(APICCommonState *s)
{
    run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
    run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}

static void do_inject_external_nmi(CPUState *cpu, void *data)
static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
{
    APICCommonState *s = data;
    APICCommonState *s = data.host_ptr;
    uint32_t lvt;
    int ret;

@@ -174,7 +174,7 @@ static void do_inject_external_nmi(CPUState *cpu, void *data)

static void kvm_apic_external_nmi(APICCommonState *s)
{
    run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
    run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
}

static void kvm_send_msi(MSIMessage *msg)
@@ -213,7 +213,7 @@ static void kvm_apic_reset(APICCommonState *s)
    /* Not used by KVM, which uses the CPU mp_state instead.  */
    s->wait_for_sipi = 0;

    run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
    run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}

static void kvm_apic_realize(DeviceState *dev, Error **errp)
+6 −7
Original line number Diff line number Diff line
@@ -487,10 +487,9 @@ typedef struct VAPICEnableTPRReporting {
    bool enable;
} VAPICEnableTPRReporting;

static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
{
    VAPICEnableTPRReporting *info = data;

    VAPICEnableTPRReporting *info = data.host_ptr;
    apic_enable_tpr_access_reporting(info->apic, info->enable);
}

@@ -505,7 +504,7 @@ static void vapic_enable_tpr_reporting(bool enable)
    CPU_FOREACH(cs) {
        cpu = X86_CPU(cs);
        info.apic = cpu->apic_state;
        run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
        run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
    }
}

@@ -738,9 +737,9 @@ static void vapic_realize(DeviceState *dev, Error **errp)
    nb_option_roms++;
}

static void do_vapic_enable(CPUState *cs, void *data)
static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
{
    VAPICROMState *s = data;
    VAPICROMState *s = data.host_ptr;
    X86CPU *cpu = X86_CPU(cs);

    static const uint8_t enabled = 1;
@@ -762,7 +761,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,

    if (s->state == VAPIC_ACTIVE) {
        if (smp_cpus == 1) {
            run_on_cpu(first_cpu, do_vapic_enable, s);
            run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
        } else {
            zero = g_malloc0(s->rom_state.vapic_size);
            cpu_physical_memory_write(s->vapic_paddr, zero,
+3 −3
Original line number Diff line number Diff line
@@ -84,11 +84,11 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
    env->tlb_dirty = true;
}

static void spin_kick(CPUState *cs, void *data)
static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
    PowerPCCPU *cpu = POWERPC_CPU(cs);
    CPUPPCState *env = &cpu->env;
    SpinInfo *curspin = data;
    SpinInfo *curspin = data.host_ptr;
    hwaddr map_size = 64 * 1024 * 1024;
    hwaddr map_start;

@@ -147,7 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,

    if (!(ldq_p(&curspin->addr) & 1)) {
        /* run CPU */
        run_on_cpu(cpu, spin_kick, curspin);
        run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
    }
}

Loading