Commit 3c825bb7 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging



* x86 TCG fixes for 64-bit call gates (Andrew)
* qumu-guest-agent freeze-hook tweak (Christian)
* pm_smbus improvements (Corey)
* Move validation to pre_plug for pc-dimm (David)
* Fix memory leaks (Eduardo, Marc-André)
* synchronization profiler (Emilio)
* Convert the CPU list to RCU (Emilio)
* LSI support for PPR Extended Message (George)
* vhost-scsi support for protection information (Greg)
* Mark mptsas as a storage device in the help (Guenter)
* checkpatch tweak cherry-picked from Linux (me)
* Typos, cleanups and dead-code removal (Julia, Marc-André)
* qemu-pr-helper support for old libmultipath (Murilo)
* Annotate fallthroughs (me)
* MemoryRegionOps cleanup (me, Peter)
* Make s390 qtests independent from libqos, which doesn't actually support it (me)
* Make cpu_get_ticks independent from BQL (me)
* Introspection fixes (Thomas)
* Support QEMU_MODULE_DIR environment variable (ryang)

# gpg: Signature made Thu 23 Aug 2018 17:46:30 BST
# gpg:                using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (69 commits)
  KVM: cleanup unnecessary #ifdef KVM_CAP_...
  target/i386: update MPX flags when CPL changes
  i2c: pm_smbus: Add the ability to force block transfer enable
  i2c: pm_smbus: Don't delay host status register busy bit when interrupts are enabled
  i2c: pm_smbus: Add interrupt handling
  i2c: pm_smbus: Add block transfer capability
  i2c: pm_smbus: Make the I2C block read command read-only
  i2c: pm_smbus: Fix the semantics of block I2C transfers
  i2c: pm_smbus: Clean up some style issues
  pc-dimm: assign and verify the "addr" property during pre_plug
  pc: drop memory region alignment check for 0
  util/oslib-win32: indicate alignment for qemu_anon_ram_alloc()
  pc-dimm: assign and verify the "slot" property during pre_plug
  ipmi: Use proper struct reference for BT vmstate
  vhost-scsi: expose 't10_pi' property for VIRTIO_SCSI_F_T10_PI
  vhost-scsi: unify vhost-scsi get_features implementations
  vhost-user-scsi: move host_features into VHostSCSICommon
  cpus: allow cpu_get_ticks out of BQL
  cpus: protect TimerState writes with a spinlock
  seqlock: add QemuLockable support
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 5ccac548 b2e78fac
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -1696,7 +1696,6 @@ F: qom/
X: qom/cpu.c
F: tests/check-qom-interface.c
F: tests/check-qom-proplist.c
F: tests/qom-test.c

QMP
M: Markus Armbruster <armbru@redhat.com>
@@ -1708,6 +1707,16 @@ F: scripts/qmp/
F: tests/qmp-test.c
T: git git://repo.or.cz/qemu/armbru.git qapi-next

qtest
M: Paolo Bonzini <pbonzini@redhat.com>
M: Thomas Huth <thuth@redhat.com>
M: Laurent Vivier <lvivier@redhat.com>
S: Maintained
F: qtest.c
F: tests/libqtest.*
F: tests/libqos/
F: tests/*-test.c

Register API
M: Alistair Francis <alistair@alistair23.me>
S: Maintained
+0 −2
Original line number Diff line number Diff line
@@ -1639,10 +1639,8 @@ static int kvm_init(MachineState *ms)
        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
    }

#ifdef KVM_CAP_READONLY_MEM
    kvm_readonly_mem_allowed =
        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
#endif

    kvm_eventfds_allowed =
        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
+23 −4
Original line number Diff line number Diff line
@@ -3612,6 +3612,7 @@ fi
# libmpathpersist probe

if test "$mpath" != "no" ; then
  # probe for the new API
  cat > $TMPC <<EOF
#include <libudev.h>
#include <mpath_persist.h>
@@ -3633,9 +3634,27 @@ int main(void) {
EOF
  if compile_prog "" "-ludev -lmultipath -lmpathpersist" ; then
    mpathpersist=yes
    mpathpersist_new_api=yes
  else
    # probe for the old API
    cat > $TMPC <<EOF
#include <libudev.h>
#include <mpath_persist.h>
unsigned mpath_mx_alloc_len = 1024;
int logsink;
int main(void) {
    struct udev *udev = udev_new();
    mpath_lib_init(udev);
    return 0;
}
EOF
    if compile_prog "" "-ludev -lmultipath -lmpathpersist" ; then
      mpathpersist=yes
      mpathpersist_new_api=no
    else
      mpathpersist=no
    fi
  fi
else
  mpathpersist=no
fi
@@ -6409,9 +6428,6 @@ if test "$bluez" = "yes" ; then
  echo "CONFIG_BLUEZ=y" >> $config_host_mak
  echo "BLUEZ_CFLAGS=$bluez_cflags" >> $config_host_mak
fi
if test "$glib_subprocess" = "yes" ; then
  echo "CONFIG_HAS_GLIB_SUBPROCESS_TESTS=y" >> $config_host_mak
fi
if test "$gtk" = "yes" ; then
  echo "CONFIG_GTK=m" >> $config_host_mak
  echo "CONFIG_GTKABI=$gtkabi" >> $config_host_mak
@@ -6495,6 +6511,9 @@ if test "$virtfs" = "yes" ; then
fi
if test "$mpath" = "yes" ; then
  echo "CONFIG_MPATH=y" >> $config_host_mak
  if test "$mpathpersist_new_api" = "yes"; then
    echo "CONFIG_MPATH_NEW_API=y" >> $config_host_mak
  fi
fi
if test "$vhost_scsi" = "yes" ; then
  echo "CONFIG_VHOST_SCSI=y" >> $config_host_mak
+2 −2
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ void cpu_list_add(CPUState *cpu)
    } else {
        assert(!cpu_index_auto_assigned);
    }
    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
    QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
    qemu_mutex_unlock(&qemu_cpu_list_lock);

    finish_safe_work(cpu);
@@ -101,7 +101,7 @@ void cpu_list_remove(CPUState *cpu)

    assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));

    QTAILQ_REMOVE(&cpus, cpu, node);
    QTAILQ_REMOVE_RCU(&cpus, cpu, node);
    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
    qemu_mutex_unlock(&qemu_cpu_list_lock);
}
+117 −69
Original line number Diff line number Diff line
@@ -121,8 +121,6 @@ static bool all_cpu_threads_idle(void)
/* Protected by TimersState seqlock */

static bool icount_sleep = true;
/* Conversion factor from emulated instructions to virtual clock ticks.  */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
#define MAX_ICOUNT_SHIFT 10

@@ -131,20 +129,27 @@ typedef struct TimersState {
    int64_t cpu_ticks_prev;
    int64_t cpu_ticks_offset;

    /* cpu_clock_offset can be read out of BQL, so protect it with
     * this lock.
    /* Protect fields that can be respectively read outside the
     * BQL, and written from multiple threads.
     */
    QemuSeqLock vm_clock_seqlock;
    int64_t cpu_clock_offset;
    int32_t cpu_ticks_enabled;
    int64_t dummy;
    QemuSpin vm_clock_lock;

    int16_t cpu_ticks_enabled;

    /* Conversion factor from emulated instructions to virtual clock ticks.  */
    int16_t icount_time_shift;

    /* Compensate for varying guest execution speed.  */
    int64_t qemu_icount_bias;

    int64_t vm_clock_warp_start;
    int64_t cpu_clock_offset;

    /* Only written by TCG thread */
    int64_t qemu_icount;

    /* for adjusting icount */
    int64_t vm_clock_warp_start;
    QEMUTimer *icount_rt_timer;
    QEMUTimer *icount_vm_timer;
    QEMUTimer *icount_warp_timer;
@@ -245,16 +250,19 @@ void cpu_update_icount(CPUState *cpu)
    int64_t executed = cpu_get_icount_executed(cpu);
    cpu->icount_budget -= executed;

#ifdef CONFIG_ATOMIC64
#ifndef CONFIG_ATOMIC64
    seqlock_write_lock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
#endif
    atomic_set__nocheck(&timers_state.qemu_icount,
                        atomic_read__nocheck(&timers_state.qemu_icount) +
                        executed);
#else /* FIXME: we need 64bit atomics to do this safely */
    timers_state.qemu_icount += executed;
                        timers_state.qemu_icount + executed);
#ifndef CONFIG_ATOMIC64
    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                         &timers_state.vm_clock_lock);
#endif
}

int64_t cpu_get_icount_raw(void)
static int64_t cpu_get_icount_raw_locked(void)
{
    CPUState *cpu = current_cpu;

@@ -266,20 +274,30 @@ int64_t cpu_get_icount_raw(void)
        /* Take into account what has run */
        cpu_update_icount(cpu);
    }
#ifdef CONFIG_ATOMIC64
    /* The read is protected by the seqlock, so __nocheck is okay.  */
    return atomic_read__nocheck(&timers_state.qemu_icount);
#else /* FIXME: we need 64bit atomics to do this safely */
    return timers_state.qemu_icount;
#endif
}

/* Return the virtual CPU time, based on the instruction counter.  */
static int64_t cpu_get_icount_locked(void)
{
    int64_t icount = cpu_get_icount_raw();
    return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
    int64_t icount = cpu_get_icount_raw_locked();
    return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount);
}

int64_t cpu_get_icount_raw(void)
{
    int64_t icount;
    unsigned start;

    do {
        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
        icount = cpu_get_icount_raw_locked();
    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));

    return icount;
}

/* Return the virtual CPU time, based on the instruction counter.  */
int64_t cpu_get_icount(void)
{
    int64_t icount;
@@ -295,14 +313,29 @@ int64_t cpu_get_icount(void)

int64_t cpu_icount_to_ns(int64_t icount)
{
    return icount << icount_time_shift;
    return icount << atomic_read(&timers_state.icount_time_shift);
}

static int64_t cpu_get_ticks_locked(void)
{
    int64_t ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_host_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
        /* Non increasing ticks may happen if the host uses software suspend.  */
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
        ticks = timers_state.cpu_ticks_prev;
    }

    timers_state.cpu_ticks_prev = ticks;
    return ticks;
}

/* return the time elapsed in VM between vm_start and vm_stop.  Unless
 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
 * counter.
 *
 * Caller must hold the BQL
 */
int64_t cpu_get_ticks(void)
{
@@ -312,19 +345,9 @@ int64_t cpu_get_ticks(void)
        return cpu_get_icount();
    }

    ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_host_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
        /* Note: non increasing ticks may happen if the host uses
           software suspend */
        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
        ticks = timers_state.cpu_ticks_prev;
    }

    timers_state.cpu_ticks_prev = ticks;
    qemu_spin_lock(&timers_state.vm_clock_lock);
    ticks = cpu_get_ticks_locked();
    qemu_spin_unlock(&timers_state.vm_clock_lock);
    return ticks;
}

@@ -361,14 +384,15 @@ int64_t cpu_get_clock(void)
 */
void cpu_enable_ticks(void)
{
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_begin(&timers_state.vm_clock_seqlock);
    seqlock_write_lock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
    if (!timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
        timers_state.cpu_clock_offset -= get_clock();
        timers_state.cpu_ticks_enabled = 1;
    }
    seqlock_write_end(&timers_state.vm_clock_seqlock);
    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
}

/* disable cpu_get_ticks() : the clock is stopped. You must not call
@@ -377,14 +401,15 @@ void cpu_enable_ticks(void)
 */
void cpu_disable_ticks(void)
{
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_begin(&timers_state.vm_clock_seqlock);
    seqlock_write_lock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
    if (timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset += cpu_get_host_ticks();
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
        timers_state.cpu_ticks_enabled = 0;
    }
    seqlock_write_end(&timers_state.vm_clock_seqlock);
    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                         &timers_state.vm_clock_lock);
}

/* Correlation between real and virtual time is always going to be
@@ -407,7 +432,8 @@ static void icount_adjust(void)
        return;
    }

    seqlock_write_begin(&timers_state.vm_clock_seqlock);
    seqlock_write_lock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
    cur_time = cpu_get_clock_locked();
    cur_icount = cpu_get_icount_locked();

@@ -415,20 +441,24 @@ static void icount_adjust(void)
    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
    if (delta > 0
        && last_delta + ICOUNT_WOBBLE < delta * 2
        && icount_time_shift > 0) {
        && timers_state.icount_time_shift > 0) {
        /* The guest is getting too far ahead.  Slow time down.  */
        icount_time_shift--;
        atomic_set(&timers_state.icount_time_shift,
                   timers_state.icount_time_shift - 1);
    }
    if (delta < 0
        && last_delta - ICOUNT_WOBBLE > delta * 2
        && icount_time_shift < MAX_ICOUNT_SHIFT) {
        && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
        /* The guest is getting too far behind.  Speed time up.  */
        icount_time_shift++;
        atomic_set(&timers_state.icount_time_shift,
                   timers_state.icount_time_shift + 1);
    }
    last_delta = delta;
    timers_state.qemu_icount_bias = cur_icount
                              - (timers_state.qemu_icount << icount_time_shift);
    seqlock_write_end(&timers_state.vm_clock_seqlock);
    atomic_set__nocheck(&timers_state.qemu_icount_bias,
                        cur_icount - (timers_state.qemu_icount
                                      << timers_state.icount_time_shift));
    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                         &timers_state.vm_clock_lock);
}

static void icount_adjust_rt(void *opaque)
@@ -448,7 +478,8 @@ static void icount_adjust_vm(void *opaque)

static int64_t qemu_icount_round(int64_t count)
{
    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
    int shift = atomic_read(&timers_state.icount_time_shift);
    return (count + (1 << shift) - 1) >> shift;
}

static void icount_warp_rt(void)
@@ -468,7 +499,8 @@ static void icount_warp_rt(void)
        return;
    }

    seqlock_write_begin(&timers_state.vm_clock_seqlock);
    seqlock_write_lock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);
    if (runstate_is_running()) {
        int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
                                     cpu_get_clock_locked());
@@ -484,10 +516,12 @@ static void icount_warp_rt(void)
            int64_t delta = clock - cur_icount;
            warp_delta = MIN(warp_delta, delta);
        }
        timers_state.qemu_icount_bias += warp_delta;
        atomic_set__nocheck(&timers_state.qemu_icount_bias,
                            timers_state.qemu_icount_bias + warp_delta);
    }
    timers_state.vm_clock_warp_start = -1;
    seqlock_write_end(&timers_state.vm_clock_seqlock);
    seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                       &timers_state.vm_clock_lock);

    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -512,9 +546,12 @@ void qtest_clock_warp(int64_t dest)
        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
        int64_t warp = qemu_soonest_timeout(dest - clock, deadline);

        seqlock_write_begin(&timers_state.vm_clock_seqlock);
        timers_state.qemu_icount_bias += warp;
        seqlock_write_end(&timers_state.vm_clock_seqlock);
        seqlock_write_lock(&timers_state.vm_clock_seqlock,
                           &timers_state.vm_clock_lock);
        atomic_set__nocheck(&timers_state.qemu_icount_bias,
                            timers_state.qemu_icount_bias + warp);
        seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                             &timers_state.vm_clock_lock);

        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
        timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
@@ -581,9 +618,12 @@ void qemu_start_warp_timer(void)
             * It is useful when we want a deterministic execution time,
             * isolated from host latencies.
             */
            seqlock_write_begin(&timers_state.vm_clock_seqlock);
            timers_state.qemu_icount_bias += deadline;
            seqlock_write_end(&timers_state.vm_clock_seqlock);
            seqlock_write_lock(&timers_state.vm_clock_seqlock,
                               &timers_state.vm_clock_lock);
            atomic_set__nocheck(&timers_state.qemu_icount_bias,
                                timers_state.qemu_icount_bias + deadline);
            seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                                 &timers_state.vm_clock_lock);
            qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
        } else {
            /*
@@ -594,12 +634,14 @@ void qemu_start_warp_timer(void)
             * you will not be sending network packets continuously instead of
             * every 100ms.
             */
            seqlock_write_begin(&timers_state.vm_clock_seqlock);
            seqlock_write_lock(&timers_state.vm_clock_seqlock,
                               &timers_state.vm_clock_lock);
            if (timers_state.vm_clock_warp_start == -1
                || timers_state.vm_clock_warp_start > clock) {
                timers_state.vm_clock_warp_start = clock;
            }
            seqlock_write_end(&timers_state.vm_clock_seqlock);
            seqlock_write_unlock(&timers_state.vm_clock_seqlock,
                                 &timers_state.vm_clock_lock);
            timer_mod_anticipate(timers_state.icount_warp_timer,
                                 clock + deadline);
        }
@@ -700,7 +742,7 @@ static const VMStateDescription vmstate_timers = {
    .minimum_version_id = 1,
    .fields = (VMStateField[]) {
        VMSTATE_INT64(cpu_ticks_offset, TimersState),
        VMSTATE_INT64(dummy, TimersState),
        VMSTATE_UNUSED(8),
        VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
        VMSTATE_END_OF_LIST()
    },
@@ -812,7 +854,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
    }
    if (strcmp(option, "auto") != 0) {
        errno = 0;
        icount_time_shift = strtol(option, &rem_str, 0);
        timers_state.icount_time_shift = strtol(option, &rem_str, 0);
        if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
            error_setg(errp, "icount: Invalid shift value");
        }
@@ -828,7 +870,7 @@ void configure_icount(QemuOpts *opts, Error **errp)

    /* 125MIPS seems a reasonable initial guess at the guest speed.
       It will be corrected fairly quickly anyway.  */
    icount_time_shift = 3;
    timers_state.icount_time_shift = 3;

    /* Have both realtime and virtual time triggers for speed adjustment.
       The realtime trigger catches emulated time passing too slowly,
@@ -1491,7 +1533,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
            atomic_mb_set(&cpu->exit_request, 0);
        }

        qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
        qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
        deal_with_unplugged_cpus();
    }

@@ -1762,10 +1804,16 @@ bool qemu_mutex_iothread_locked(void)
    return iothread_locked;
}

void qemu_mutex_lock_iothread(void)
/*
 * The BQL is taken from so many places that it is worth profiling the
 * callers directly, instead of funneling them all through a single function.
 */
void qemu_mutex_lock_iothread_impl(const char *file, int line)
{
    QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);

    g_assert(!qemu_mutex_iothread_locked());
    qemu_mutex_lock(&qemu_global_mutex);
    bql_lock(&qemu_global_mutex, file, line);
    iothread_locked = true;
}

Loading