Commit 799044b6 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging



Block layer patches

# gpg: Signature made Fri 22 Dec 2017 14:09:01 GMT
# gpg:                using RSA key 0x7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* remotes/kevin/tags/for-upstream: (35 commits)
  block: Keep nodes drained between reopen_queue/multiple
  commit: Simplify reopen of base
  test-bdrv-drain: Test graph changes in drained section
  block: Allow graph changes in subtree drained section
  test-bdrv-drain: Recursive draining with multiple parents
  test-bdrv-drain: Test behaviour in coroutine context
  test-bdrv-drain: Tests for bdrv_subtree_drain
  block: Add bdrv_subtree_drained_begin/end()
  block: Don't notify parents in drain call chain
  test-bdrv-drain: Test nested drain sections
  block: Nested drain_end must still call callbacks
  block: Don't block_job_pause_all() in bdrv_drain_all()
  test-bdrv-drain: Test drain vs. block jobs
  blockjob: Pause job on draining any job BDS
  test-bdrv-drain: Test bs->quiesce_counter
  test-bdrv-drain: Test callback for bdrv_drain
  block: Make bdrv_drain() driver callbacks non-recursive
  block: Assert drain_all is only called from main AioContext
  block: Remove unused bdrv_requests_pending
  block: Mention -drive cyls/heads/secs/trans/serial/addr in deprecation chapter
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 1e10eb53 1a63a907
Loading
Loading
Loading
Loading
+60 −15
Original line number Diff line number Diff line
@@ -822,6 +822,18 @@ static void bdrv_child_cb_drained_end(BdrvChild *child)
    bdrv_drained_end(bs);
}

static void bdrv_child_cb_attach(BdrvChild *child)
{
    BlockDriverState *bs = child->opaque;
    bdrv_apply_subtree_drain(child, bs);
}

static void bdrv_child_cb_detach(BdrvChild *child)
{
    BlockDriverState *bs = child->opaque;
    bdrv_unapply_subtree_drain(child, bs);
}

static int bdrv_child_cb_inactivate(BdrvChild *child)
{
    BlockDriverState *bs = child->opaque;
@@ -889,6 +901,8 @@ const BdrvChildRole child_file = {
    .inherit_options = bdrv_inherited_options,
    .drained_begin   = bdrv_child_cb_drained_begin,
    .drained_end     = bdrv_child_cb_drained_end,
    .attach          = bdrv_child_cb_attach,
    .detach          = bdrv_child_cb_detach,
    .inactivate      = bdrv_child_cb_inactivate,
};

@@ -911,6 +925,8 @@ const BdrvChildRole child_format = {
    .inherit_options = bdrv_inherited_fmt_options,
    .drained_begin   = bdrv_child_cb_drained_begin,
    .drained_end     = bdrv_child_cb_drained_end,
    .attach          = bdrv_child_cb_attach,
    .detach          = bdrv_child_cb_detach,
    .inactivate      = bdrv_child_cb_inactivate,
};

@@ -953,6 +969,8 @@ static void bdrv_backing_attach(BdrvChild *c)
                    parent->backing_blocker);
    bdrv_op_unblock(backing_hd, BLOCK_OP_TYPE_BACKUP_TARGET,
                    parent->backing_blocker);

    bdrv_child_cb_attach(c);
}

static void bdrv_backing_detach(BdrvChild *c)
@@ -963,6 +981,8 @@ static void bdrv_backing_detach(BdrvChild *c)
    bdrv_op_unblock_all(c->bs, parent->backing_blocker);
    error_free(parent->backing_blocker);
    parent->backing_blocker = NULL;

    bdrv_child_cb_detach(c);
}

/*
@@ -1924,6 +1944,8 @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
    assert(role == &child_backing || role == &child_file);

    if (!backing) {
        int flags = bdrv_reopen_get_flags(reopen_queue, bs);

        /* Apart from the modifications below, the same permissions are
         * forwarded and left alone as for filters */
        bdrv_filter_default_perms(bs, c, role, reopen_queue, perm, shared,
@@ -1936,7 +1958,9 @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,

        /* bs->file always needs to be consistent because of the metadata. We
         * can never allow other users to resize or write to it. */
        if (!(flags & BDRV_O_NO_IO)) {
            perm |= BLK_PERM_CONSISTENT_READ;
        }
        shared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
    } else {
        /* We want consistent read from backing files if the parent needs it.
@@ -1968,16 +1992,22 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
                                      BlockDriverState *new_bs)
{
    BlockDriverState *old_bs = child->bs;
    int i;

    if (old_bs && new_bs) {
        assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
    }
    if (old_bs) {
        /* Detach first so that the recursive drain sections coming from @child
         * are already gone and we only end the drain sections that came from
         * elsewhere. */
        if (child->role->detach) {
            child->role->detach(child);
        }
        if (old_bs->quiesce_counter && child->role->drained_end) {
            for (i = 0; i < old_bs->quiesce_counter; i++) {
                child->role->drained_end(child);
            }
        if (child->role->detach) {
            child->role->detach(child);
        }
        QLIST_REMOVE(child, next_parent);
    }
@@ -1987,9 +2017,14 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
    if (new_bs) {
        QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
        if (new_bs->quiesce_counter && child->role->drained_begin) {
            for (i = 0; i < new_bs->quiesce_counter; i++) {
                child->role->drained_begin(child);
            }
        }

        /* Attach only after starting new drained sections, so that recursive
         * drain sections coming from @child don't get an extra .drained_begin
         * callback. */
        if (child->role->attach) {
            child->role->attach(child);
        }
@@ -2731,6 +2766,7 @@ BlockDriverState *bdrv_open(const char *filename, const char *reference,
 * returns a pointer to bs_queue, which is either the newly allocated
 * bs_queue, or the existing bs_queue being used.
 *
 * bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple().
 */
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
                                                 BlockDriverState *bs,
@@ -2746,6 +2782,11 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
    BdrvChild *child;
    QDict *old_options, *explicit_options;

    /* Make sure that the caller remembered to use a drained section. This is
     * important to avoid graph changes between the recursive queuing here and
     * bdrv_reopen_multiple(). */
    assert(bs->quiesce_counter > 0);

    if (bs_queue == NULL) {
        bs_queue = g_new0(BlockReopenQueue, 1);
        QSIMPLEQ_INIT(bs_queue);
@@ -2870,6 +2911,8 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
 * If all devices prepare successfully, then the changes are committed
 * to all devices.
 *
 * All affected nodes must be drained between bdrv_reopen_queue() and
 * bdrv_reopen_multiple().
 */
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp)
{
@@ -2879,11 +2922,8 @@ int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **er

    assert(bs_queue != NULL);

    aio_context_release(ctx);
    bdrv_drain_all_begin();
    aio_context_acquire(ctx);

    QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
        assert(bs_entry->state.bs->quiesce_counter > 0);
        if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
            error_propagate(errp, local_err);
            goto cleanup;
@@ -2912,8 +2952,6 @@ cleanup:
    }
    g_free(bs_queue);

    bdrv_drain_all_end();

    return ret;
}

@@ -2923,12 +2961,18 @@ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
{
    int ret = -1;
    Error *local_err = NULL;
    BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
    BlockReopenQueue *queue;

    bdrv_subtree_drained_begin(bs);

    queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
    ret = bdrv_reopen_multiple(bdrv_get_aio_context(bs), queue, &local_err);
    if (local_err != NULL) {
        error_propagate(errp, local_err);
    }

    bdrv_subtree_drained_end(bs);

    return ret;
}

@@ -4601,10 +4645,11 @@ void bdrv_img_create(const char *filename, const char *fmt,
        back_flags = flags;
        back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);

        if (backing_fmt) {
        backing_options = qdict_new();
        if (backing_fmt) {
            qdict_put_str(backing_options, "driver", backing_fmt);
        }
        qdict_put_bool(backing_options, BDRV_OPT_FORCE_SHARE, true);

        bs = bdrv_open(full_backing, NULL, backing_options, back_flags,
                       &local_err);
@@ -4754,7 +4799,7 @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
    AioContext *ctx = bdrv_get_aio_context(bs);

    aio_disable_external(ctx);
    bdrv_parent_drained_begin(bs);
    bdrv_parent_drained_begin(bs, NULL);
    bdrv_drain(bs); /* ensure there are no in-flight requests */

    while (aio_poll(ctx, false)) {
@@ -4768,7 +4813,7 @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
     */
    aio_context_acquire(new_context);
    bdrv_attach_aio_context(bs, new_context);
    bdrv_parent_drained_end(bs);
    bdrv_parent_drained_end(bs, NULL);
    aio_enable_external(ctx);
    aio_context_release(new_context);
}
+1 −7
Original line number Diff line number Diff line
@@ -277,7 +277,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
                  const char *filter_node_name, Error **errp)
{
    CommitBlockJob *s;
    BlockReopenQueue *reopen_queue = NULL;
    int orig_base_flags;
    BlockDriverState *iter;
    BlockDriverState *commit_top_bs = NULL;
@@ -299,12 +298,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
    /* convert base to r/w, if necessary */
    orig_base_flags = bdrv_get_flags(base);
    if (!(orig_base_flags & BDRV_O_RDWR)) {
        reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
                                         orig_base_flags | BDRV_O_RDWR);
    }

    if (reopen_queue) {
        bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
        bdrv_reopen(base, orig_base_flags | BDRV_O_RDWR, &local_err);
        if (local_err != NULL) {
            error_propagate(errp, local_err);
            goto fail;
+117 −47
Original line number Diff line number Diff line
@@ -40,22 +40,28 @@
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
    int64_t offset, int bytes, BdrvRequestFlags flags);

void bdrv_parent_drained_begin(BlockDriverState *bs)
void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
{
    BdrvChild *c, *next;

    QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
        if (c == ignore) {
            continue;
        }
        if (c->role->drained_begin) {
            c->role->drained_begin(c);
        }
    }
}

void bdrv_parent_drained_end(BlockDriverState *bs)
void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
{
    BdrvChild *c, *next;

    QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
        if (c == ignore) {
            continue;
        }
        if (c->role->drained_end) {
            c->role->drained_end(c);
        }
@@ -134,29 +140,13 @@ void bdrv_disable_copy_on_read(BlockDriverState *bs)
    assert(old >= 1);
}

/* Check if any requests are in-flight (including throttled requests) */
bool bdrv_requests_pending(BlockDriverState *bs)
{
    BdrvChild *child;

    if (atomic_read(&bs->in_flight)) {
        return true;
    }

    QLIST_FOREACH(child, &bs->children, next) {
        if (bdrv_requests_pending(child->bs)) {
            return true;
        }
    }

    return false;
}

typedef struct {
    Coroutine *co;
    BlockDriverState *bs;
    bool done;
    bool begin;
    bool recursive;
    BdrvChild *parent;
} BdrvCoDrainData;

static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
@@ -175,8 +165,10 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
    bdrv_wakeup(bs);
}

static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive)
{
    BdrvChild *child, *tmp;
    BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};

    if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
@@ -187,16 +179,19 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
    data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data);
    bdrv_coroutine_enter(bs, data.co);
    BDRV_POLL_WHILE(bs, !data.done);

    if (recursive) {
        QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
            bdrv_drain_invoke(child->bs, begin, true);
        }
    }
}

static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
static bool bdrv_drain_recurse(BlockDriverState *bs)
{
    BdrvChild *child, *tmp;
    bool waited;

    /* Ensure any pending metadata writes are submitted to bs->file.  */
    bdrv_drain_invoke(bs, begin);

    /* Wait for drained requests to finish */
    waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);

@@ -215,7 +210,7 @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
             */
            bdrv_ref(bs);
        }
        waited |= bdrv_drain_recurse(bs, begin);
        waited |= bdrv_drain_recurse(bs);
        if (in_main_loop) {
            bdrv_unref(bs);
        }
@@ -224,6 +219,11 @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
    return waited;
}

static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
                                  BdrvChild *parent);
static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
                                BdrvChild *parent);

static void bdrv_co_drain_bh_cb(void *opaque)
{
    BdrvCoDrainData *data = opaque;
@@ -232,9 +232,9 @@ static void bdrv_co_drain_bh_cb(void *opaque)

    bdrv_dec_in_flight(bs);
    if (data->begin) {
        bdrv_drained_begin(bs);
        bdrv_do_drained_begin(bs, data->recursive, data->parent);
    } else {
        bdrv_drained_end(bs);
        bdrv_do_drained_end(bs, data->recursive, data->parent);
    }

    data->done = true;
@@ -242,7 +242,8 @@ static void bdrv_co_drain_bh_cb(void *opaque)
}

static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
                                                bool begin)
                                                bool begin, bool recursive,
                                                BdrvChild *parent)
{
    BdrvCoDrainData data;

@@ -256,6 +257,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
        .bs = bs,
        .done = false,
        .begin = begin,
        .recursive = recursive,
        .parent = parent,
    };
    bdrv_inc_in_flight(bs);
    aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
@@ -267,37 +270,99 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
    assert(data.done);
}

void bdrv_drained_begin(BlockDriverState *bs)
void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
                           BdrvChild *parent)
{
    BdrvChild *child, *next;

    if (qemu_in_coroutine()) {
        bdrv_co_yield_to_drain(bs, true);
        bdrv_co_yield_to_drain(bs, true, recursive, parent);
        return;
    }

    /* Stop things in parent-to-child order */
    if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
        aio_disable_external(bdrv_get_aio_context(bs));
        bdrv_parent_drained_begin(bs);
    }

    bdrv_drain_recurse(bs, true);
    bdrv_parent_drained_begin(bs, parent);
    bdrv_drain_invoke(bs, true, false);
    bdrv_drain_recurse(bs);

    if (recursive) {
        bs->recursive_quiesce_counter++;
        QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
            bdrv_do_drained_begin(child->bs, true, child);
        }
    }
}

void bdrv_drained_end(BlockDriverState *bs)
void bdrv_drained_begin(BlockDriverState *bs)
{
    bdrv_do_drained_begin(bs, false, NULL);
}

void bdrv_subtree_drained_begin(BlockDriverState *bs)
{
    bdrv_do_drained_begin(bs, true, NULL);
}

void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
                         BdrvChild *parent)
{
    BdrvChild *child, *next;
    int old_quiesce_counter;

    if (qemu_in_coroutine()) {
        bdrv_co_yield_to_drain(bs, false);
        bdrv_co_yield_to_drain(bs, false, recursive, parent);
        return;
    }
    assert(bs->quiesce_counter > 0);
    if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
        return;
    }
    old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);

    bdrv_parent_drained_end(bs);
    bdrv_drain_recurse(bs, false);
    /* Re-enable things in child-to-parent order */
    bdrv_drain_invoke(bs, false, false);
    bdrv_parent_drained_end(bs, parent);
    if (old_quiesce_counter == 1) {
        aio_enable_external(bdrv_get_aio_context(bs));
    }

    if (recursive) {
        bs->recursive_quiesce_counter--;
        QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
            bdrv_do_drained_end(child->bs, true, child);
        }
    }
}

void bdrv_drained_end(BlockDriverState *bs)
{
    bdrv_do_drained_end(bs, false, NULL);
}

void bdrv_subtree_drained_end(BlockDriverState *bs)
{
    bdrv_do_drained_end(bs, true, NULL);
}

void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
{
    int i;

    for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
        bdrv_do_drained_begin(child->bs, true, child);
    }
}

void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
{
    int i;

    for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
        bdrv_do_drained_end(child->bs, true, child);
    }
}

/*
 * Wait for pending requests to complete on a single BlockDriverState subtree,
 * and suspend block driver's internal I/O until next request arrives.
@@ -342,14 +407,20 @@ void bdrv_drain_all_begin(void)
    BdrvNextIterator it;
    GSList *aio_ctxs = NULL, *ctx;

    block_job_pause_all();
    /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
     * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
     * nodes in several different AioContexts, so make sure we're in the main
     * context. */
    assert(qemu_get_current_aio_context() == qemu_get_aio_context());

    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        /* Stop things in parent-to-child order */
        aio_context_acquire(aio_context);
        bdrv_parent_drained_begin(bs);
        aio_disable_external(aio_context);
        bdrv_parent_drained_begin(bs, NULL);
        bdrv_drain_invoke(bs, true, true);
        aio_context_release(aio_context);

        if (!g_slist_find(aio_ctxs, aio_context)) {
@@ -372,7 +443,7 @@ void bdrv_drain_all_begin(void)
            aio_context_acquire(aio_context);
            for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
                if (aio_context == bdrv_get_aio_context(bs)) {
                    waited |= bdrv_drain_recurse(bs, true);
                    waited |= bdrv_drain_recurse(bs);
                }
            }
            aio_context_release(aio_context);
@@ -390,14 +461,13 @@ void bdrv_drain_all_end(void)
    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        /* Re-enable things in child-to-parent order */
        aio_context_acquire(aio_context);
        bdrv_drain_invoke(bs, false, true);
        bdrv_parent_drained_end(bs, NULL);
        aio_enable_external(aio_context);
        bdrv_parent_drained_end(bs);
        bdrv_drain_recurse(bs, false);
        aio_context_release(aio_context);
    }

    block_job_resume_all();
}

void bdrv_drain_all(void)
+8 −43
Original line number Diff line number Diff line
@@ -1672,34 +1672,12 @@ static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs,
    return status;
}

/* handle reading after the end of the backing file */
int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
                        int64_t offset, int bytes)
{
    uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE;
    int n1;

    if ((offset + bytes) <= bs_size) {
        return bytes;
    }

    if (offset >= bs_size) {
        n1 = 0;
    } else {
        n1 = bs_size - offset;
    }

    qemu_iovec_memset(qiov, n1, 0, bytes - n1);

    return n1;
}

static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
                                        uint64_t bytes, QEMUIOVector *qiov,
                                        int flags)
{
    BDRVQcow2State *s = bs->opaque;
    int offset_in_cluster, n1;
    int offset_in_cluster;
    int ret;
    unsigned int cur_bytes; /* number of bytes in current iteration */
    uint64_t cluster_offset = 0;
@@ -1734,27 +1712,14 @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
        case QCOW2_CLUSTER_UNALLOCATED:

            if (bs->backing) {
                /* read from the base image */
                n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov,
                                         offset, cur_bytes);
                if (n1 > 0) {
                    QEMUIOVector local_qiov;

                    qemu_iovec_init(&local_qiov, hd_qiov.niov);
                    qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1);

                BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
                qemu_co_mutex_unlock(&s->lock);
                    ret = bdrv_co_preadv(bs->backing, offset, n1,
                                         &local_qiov, 0);
                ret = bdrv_co_preadv(bs->backing, offset, cur_bytes,
                                     &hd_qiov, 0);
                qemu_co_mutex_lock(&s->lock);

                    qemu_iovec_destroy(&local_qiov);

                if (ret < 0) {
                    goto fail;
                }
                }
            } else {
                /* Note: in this case, no need to wait */
                qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes);
+0 −3
Original line number Diff line number Diff line
@@ -528,9 +528,6 @@ uint32_t offset_to_reftable_index(BDRVQcow2State *s, uint64_t offset)
}

/* qcow2.c functions */
int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
                  int64_t sector_num, int nb_sectors);

int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
                                     int refcount_order, bool generous_increase,
                                     uint64_t *refblock_count);
Loading