Commit 959f593c authored by Sergey Fedorov's avatar Sergey Fedorov Committed by Paolo Bonzini
Browse files

linux-user: Use QemuMutex and QemuCond



Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.

Signed-off-by: default avatarSergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: default avatarSergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Signed-off-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Message-Id: <1470158864-17651-7-git-send-email-alex.bennee@linaro.org>
Reviewed-by: default avatarRichard Henderson <rth@twiddle.net>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a5403c69
Loading
Loading
Loading
Loading
+32 −23
Original line number Diff line number Diff line
@@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
   We don't require a full sync, only that no cpus are executing guest code.
   The alternative is to map target atomic ops onto host equivalents,
   which requires quite a lot of per host/target work.  */
static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
static QemuMutex cpu_list_lock;
static QemuMutex exclusive_lock;
static QemuCond exclusive_cond;
static QemuCond exclusive_resume;
static int pending_cpus;

void qemu_init_cpu_loop(void)
{
    qemu_mutex_init(&cpu_list_lock);
    qemu_mutex_init(&exclusive_lock);
    qemu_cond_init(&exclusive_cond);
    qemu_cond_init(&exclusive_resume);
}

/* Make sure everything is in a consistent state for calling fork().  */
void fork_start(void)
{
    qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
    pthread_mutex_lock(&exclusive_lock);
    qemu_mutex_lock(&exclusive_lock);
    mmap_fork_start();
}

@@ -138,14 +146,14 @@ void fork_end(int child)
            }
        }
        pending_cpus = 0;
        pthread_mutex_init(&exclusive_lock, NULL);
        pthread_mutex_init(&cpu_list_mutex, NULL);
        pthread_cond_init(&exclusive_cond, NULL);
        pthread_cond_init(&exclusive_resume, NULL);
        qemu_mutex_init(&exclusive_lock);
        qemu_mutex_init(&cpu_list_lock);
        qemu_cond_init(&exclusive_cond);
        qemu_cond_init(&exclusive_resume);
        qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
        gdbserver_fork(thread_cpu);
    } else {
        pthread_mutex_unlock(&exclusive_lock);
        qemu_mutex_unlock(&exclusive_lock);
        qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
    }
}
@@ -155,7 +163,7 @@ void fork_end(int child)
static inline void exclusive_idle(void)
{
    while (pending_cpus) {
        pthread_cond_wait(&exclusive_resume, &exclusive_lock);
        qemu_cond_wait(&exclusive_resume, &exclusive_lock);
    }
}

@@ -165,7 +173,7 @@ static inline void start_exclusive(void)
{
    CPUState *other_cpu;

    pthread_mutex_lock(&exclusive_lock);
    qemu_mutex_lock(&exclusive_lock);
    exclusive_idle();

    pending_cpus = 1;
@@ -176,8 +184,8 @@ static inline void start_exclusive(void)
            cpu_exit(other_cpu);
        }
    }
    if (pending_cpus > 1) {
        pthread_cond_wait(&exclusive_cond, &exclusive_lock);
    while (pending_cpus > 1) {
        qemu_cond_wait(&exclusive_cond, &exclusive_lock);
    }
}

@@ -185,42 +193,42 @@ static inline void start_exclusive(void)
static inline void __attribute__((unused)) end_exclusive(void)
{
    pending_cpus = 0;
    pthread_cond_broadcast(&exclusive_resume);
    pthread_mutex_unlock(&exclusive_lock);
    qemu_cond_broadcast(&exclusive_resume);
    qemu_mutex_unlock(&exclusive_lock);
}

/* Wait for exclusive ops to finish, and begin cpu execution.  */
static inline void cpu_exec_start(CPUState *cpu)
{
    pthread_mutex_lock(&exclusive_lock);
    qemu_mutex_lock(&exclusive_lock);
    exclusive_idle();
    cpu->running = true;
    pthread_mutex_unlock(&exclusive_lock);
    qemu_mutex_unlock(&exclusive_lock);
}

/* Mark cpu as not executing, and release pending exclusive ops.  */
static inline void cpu_exec_end(CPUState *cpu)
{
    pthread_mutex_lock(&exclusive_lock);
    qemu_mutex_lock(&exclusive_lock);
    cpu->running = false;
    if (pending_cpus > 1) {
        pending_cpus--;
        if (pending_cpus == 1) {
            pthread_cond_signal(&exclusive_cond);
            qemu_cond_signal(&exclusive_cond);
        }
    }
    exclusive_idle();
    pthread_mutex_unlock(&exclusive_lock);
    qemu_mutex_unlock(&exclusive_lock);
}

void cpu_list_lock(void)
{
    pthread_mutex_lock(&cpu_list_mutex);
    qemu_mutex_lock(&cpu_list_lock);
}

void cpu_list_unlock(void)
{
    pthread_mutex_unlock(&cpu_list_mutex);
    qemu_mutex_unlock(&cpu_list_lock);
}


@@ -4211,6 +4219,7 @@ int main(int argc, char **argv, char **envp)
    int ret;
    int execfd;

    qemu_init_cpu_loop();
    module_call_init(MODULE_INIT_QOM);

    if ((envlist = envlist_create()) == NULL) {