Commit 099bea11 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/famz/tags/staging-pull-request' into staging



Block and testing patches

- Paolo's AIO fixes.
- VMDK streamOptimized corner case fix
- VM testing improvment on -cpu

# gpg: Signature made Wed 26 Sep 2018 03:54:08 BST
# gpg:                using RSA key CA35624C6A9171C6
# gpg: Good signature from "Fam Zheng <famz@redhat.com>"
# Primary key fingerprint: 5003 7CB7 9706 0F76 F021  AD56 CA35 624C 6A91 71C6

* remotes/famz/tags/staging-pull-request:
  vmdk: align end of file to a sector boundary
  tests/vm: Use -cpu max rather than -cpu host
  aio-posix: do skip system call if ctx->notifier polling succeeds
  aio-posix: compute timeout before polling
  aio-posix: fix concurrent access to poll_disable_cnt

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents aa8e26de 51b3c6b7
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -1698,6 +1698,27 @@ static int coroutine_fn
vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
                           uint64_t bytes, QEMUIOVector *qiov)
{
    if (bytes == 0) {
        /* The caller will write bytes 0 to signal EOF.
         * When receive it, we align EOF to a sector boundary. */
        BDRVVmdkState *s = bs->opaque;
        int i, ret;
        int64_t length;

        for (i = 0; i < s->num_extents; i++) {
            length = bdrv_getlength(s->extents[i].file->bs);
            if (length < 0) {
                return length;
            }
            length = QEMU_ALIGN_UP(length, BDRV_SECTOR_SIZE);
            ret = bdrv_truncate(s->extents[i].file, length,
                                PREALLOC_MODE_OFF, NULL);
            if (ret < 0) {
                return ret;
            }
        }
        return 0;
    }
    return vmdk_co_pwritev(bs, offset, bytes, qiov, 0);
}

+1 −2
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ class BaseVM(object):
            self._stdout = self._devnull
        self._args = [ \
            "-nodefaults", "-m", "4G",
            "-cpu", "max",
            "-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22",
            "-device", "virtio-net-pci,netdev=vnet",
            "-vnc", "127.0.0.1:0,to=20",
@@ -72,11 +73,9 @@ class BaseVM(object):
        if vcpus:
            self._args += ["-smp", str(vcpus)]
        if os.access("/dev/kvm", os.R_OK | os.W_OK):
            self._args += ["-cpu", "host"]
            self._args += ["-enable-kvm"]
        else:
            logging.info("KVM not available, not using -enable-kvm")
            self._args += ["-cpu", "max"]
        self._data_args = []

    def _download_with_cache(self, url, sha256sum=None):
+51 −37
Original line number Diff line number Diff line
@@ -211,6 +211,7 @@ void aio_set_fd_handler(AioContext *ctx,
    AioHandler *node;
    bool is_new = false;
    bool deleted = false;
    int poll_disable_change;

    qemu_lockcnt_lock(&ctx->list_lock);

@@ -244,11 +245,9 @@ void aio_set_fd_handler(AioContext *ctx,
            QLIST_REMOVE(node, node);
            deleted = true;
        }

        if (!node->io_poll) {
            ctx->poll_disable_cnt--;
        }
        poll_disable_change = -!node->io_poll;
    } else {
        poll_disable_change = !io_poll - (node && !node->io_poll);
        if (node == NULL) {
            /* Alloc and insert if it's not already there */
            node = g_new0(AioHandler, 1);
@@ -257,10 +256,6 @@ void aio_set_fd_handler(AioContext *ctx,

            g_source_add_poll(&ctx->source, &node->pfd);
            is_new = true;

            ctx->poll_disable_cnt += !io_poll;
        } else {
            ctx->poll_disable_cnt += !io_poll - !node->io_poll;
        }

        /* Update handler with latest information */
@@ -274,6 +269,15 @@ void aio_set_fd_handler(AioContext *ctx,
        node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
    }

    /* No need to order poll_disable_cnt writes against other updates;
     * the counter is only used to avoid wasting time and latency on
     * iterated polling when the system call will be ultimately necessary.
     * Changing handlers is a rare event, and a little wasted polling until
     * the aio_notify below is not an issue.
     */
    atomic_set(&ctx->poll_disable_cnt,
               atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);

    aio_epoll_update(ctx, node, is_new);
    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);
@@ -486,7 +490,7 @@ static void add_pollfd(AioHandler *node)
    npfd++;
}

static bool run_poll_handlers_once(AioContext *ctx)
static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
{
    bool progress = false;
    AioHandler *node;
@@ -494,10 +498,12 @@ static bool run_poll_handlers_once(AioContext *ctx)
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (!node->deleted && node->io_poll &&
            aio_node_check(ctx, node->is_external) &&
            node->io_poll(node->opaque) &&
            node->opaque != &ctx->notifier) {
            node->io_poll(node->opaque)) {
            *timeout = 0;
            if (node->opaque != &ctx->notifier) {
                progress = true;
            }
        }

        /* Caller handles freeing deleted nodes.  Don't do it here. */
    }
@@ -518,31 +524,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
 *
 * Returns: true if progress was made, false otherwise
 */
static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
{
    bool progress;
    int64_t end_time;
    int64_t start_time, elapsed_time;

    assert(ctx->notify_me);
    assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
    assert(ctx->poll_disable_cnt == 0);

    trace_run_poll_handlers_begin(ctx, max_ns);

    end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
    trace_run_poll_handlers_begin(ctx, max_ns, *timeout);

    start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
    do {
        progress = run_poll_handlers_once(ctx);
    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
        progress = run_poll_handlers_once(ctx, timeout);
        elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
    } while (!progress && elapsed_time < max_ns
             && !atomic_read(&ctx->poll_disable_cnt));

    trace_run_poll_handlers_end(ctx, progress);
    /* If time has passed with no successful polling, adjust *timeout to
     * keep the same ending time.
     */
    if (*timeout != -1) {
        *timeout -= MIN(*timeout, elapsed_time);
    }

    trace_run_poll_handlers_end(ctx, progress, *timeout);
    return progress;
}

/* try_poll_mode:
 * @ctx: the AioContext
 * @blocking: busy polling is only attempted when blocking is true
 * @timeout: timeout for blocking wait, computed by the caller and updated if
 *    polling succeeds.
 *
 * ctx->notify_me must be non-zero so this function can detect aio_notify().
 *
@@ -550,28 +563,25 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
 *
 * Returns: true if progress was made, false otherwise
 */
static bool try_poll_mode(AioContext *ctx, bool blocking)
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
{
    if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
    /* See qemu_soonest_timeout() uint64_t hack */
        int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
                             (uint64_t)ctx->poll_ns);
    int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);

        if (max_ns) {
    if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
        poll_set_started(ctx, true);

            if (run_poll_handlers(ctx, max_ns)) {
        if (run_poll_handlers(ctx, max_ns, timeout)) {
            return true;
        }
    }
    }

    poll_set_started(ctx, false);

    /* Even if we don't run busy polling, try polling once in case it can make
     * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
     */
    return run_poll_handlers_once(ctx);
    return run_poll_handlers_once(ctx, timeout);
}

bool aio_poll(AioContext *ctx, bool blocking)
@@ -601,8 +611,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
        start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
    }

    progress = try_poll_mode(ctx, blocking);
    if (!progress) {
    timeout = blocking ? aio_compute_timeout(ctx) : 0;
    progress = try_poll_mode(ctx, &timeout);
    assert(!(timeout && progress));

    /* If polling is allowed, non-blocking aio_poll does not need the
     * system call---a single round of run_poll_handlers_once suffices.
     */
    if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
        assert(npfd == 0);

        /* fill pollfds */
@@ -616,8 +632,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
            }
        }

        timeout = blocking ? aio_compute_timeout(ctx) : 0;

        /* wait until next event */
        if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
            AioHandler epoll_handler;
+2 −2
Original line number Diff line number Diff line
# See docs/devel/tracing.txt for syntax documentation.

# util/aio-posix.c
run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64