Commit 5931ed56 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-11' into staging



Block patches for the 5.0 softfreeze:
- qemu-img measure for LUKS
- Improve block-copy's performance by reducing inter-request
  dependencies
- Make curl's detection of accept-ranges more robust
- Memleak fixes
- iotest fix

# gpg: Signature made Wed 11 Mar 2020 13:19:01 GMT
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2020-03-11:
  block/block-copy: hide structure definitions
  block/block-copy: reduce intersecting request lock
  block/block-copy: rename start to offset in interfaces
  block/block-copy: refactor interfaces to use bytes instead of end
  block/block-copy: factor out find_conflicting_inflight_req
  block/block-copy: use block_status
  block/block-copy: specialcase first copy_range request
  block/block-copy: fix progress calculation
  job: refactor progress to separate object
  block/qcow2-threads: fix qcow2_decompress
  qemu-img: free memory before re-assign
  block/qcow2: do free crypto_opts in qcow2_close()
  iotests: Fix nonportable use of od --endian
  block/curl: HTTP header field names are case insensitive
  block/curl: HTTP header fields allow whitespace around values
  iotests: add 288 luks qemu-img measure test
  qemu-img: allow qemu-img measure --object without a filename
  luks: implement .bdrv_measure()
  luks: extract qcrypto_block_calculate_payload_offset()

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 6e8a73e9 397f4e9d
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@ typedef struct BDRVBackupTopState {
    BlockCopyState *bcs;
    BdrvChild *target;
    bool active;
    int64_t cluster_size;
} BDRVBackupTopState;

static coroutine_fn int backup_top_co_preadv(
@@ -57,8 +58,8 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
        return 0;
    }

    off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
    end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
    off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
    end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);

    return block_copy(s->bcs, off, end - off, NULL);
}
@@ -238,6 +239,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
        goto fail;
    }

    state->cluster_size = cluster_size;
    state->bcs = block_copy_state_new(top->backing, state->target,
                                      cluster_size, write_flags, &local_err);
    if (local_err) {
+15 −23
Original line number Diff line number Diff line
@@ -57,15 +57,6 @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
    BackupBlockJob *s = opaque;

    s->bytes_read += bytes;
    job_progress_update(&s->common.job, bytes);
}

static void backup_progress_reset_callback(void *opaque)
{
    BackupBlockJob *s = opaque;
    uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);

    job_progress_set_remaining(&s->common.job, estimate);
}

static int coroutine_fn backup_do_cow(BackupBlockJob *job,
@@ -111,7 +102,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)

    if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
        /* If we failed and synced, merge in the bits we didn't copy: */
        bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
        bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs),
                                         NULL, true);
    }
}
@@ -154,7 +145,8 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
        return;
    }

    bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
    bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0,
                          backup_job->len);
}

static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -199,7 +191,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
    BdrvDirtyBitmapIter *bdbi;
    int ret = 0;

    bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
    bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs));
    while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
        do {
            if (yield_and_check(job)) {
@@ -219,14 +211,14 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
    return ret;
}

static void backup_init_copy_bitmap(BackupBlockJob *job)
static void backup_init_bcs_bitmap(BackupBlockJob *job)
{
    bool ret;
    uint64_t estimate;
    BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs);

    if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
        ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
                                               job->sync_bitmap,
        ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap,
                                               NULL, true);
        assert(ret);
    } else {
@@ -235,12 +227,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
             * We can't hog the coroutine to initialize this thoroughly.
             * Set a flag and resume work when we are able to yield safely.
             */
            job->bcs->skip_unallocated = true;
            block_copy_set_skip_unallocated(job->bcs, true);
        }
        bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
        bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len);
    }

    estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
    estimate = bdrv_get_dirty_count(bcs_bitmap);
    job_progress_set_remaining(&job->common.job, estimate);
}

@@ -249,7 +241,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
    BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
    int ret = 0;

    backup_init_copy_bitmap(s);
    backup_init_bcs_bitmap(s);

    if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
        int64_t offset = 0;
@@ -268,12 +260,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp)

            offset += count;
        }
        s->bcs->skip_unallocated = false;
        block_copy_set_skip_unallocated(s->bcs, false);
    }

    if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
        /*
         * All bits are set in copy_bitmap to allow any cluster to be copied.
         * All bits are set in bcs bitmap to allow any cluster to be copied.
         * This does not actually require them to be copied.
         */
        while (!job_is_cancelled(job)) {
@@ -464,8 +456,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
    job->cluster_size = cluster_size;
    job->len = len;

    block_copy_set_callbacks(bcs, backup_progress_bytes_callback,
                             backup_progress_reset_callback, job);
    block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
    block_copy_set_progress_meter(bcs, &job->common.job.progress);

    /* Required permissions are already taken by backup-top target */
    block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
+316 −89
Original line number Diff line number Diff line
@@ -24,37 +24,136 @@
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)

static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
                                                       int64_t start,
                                                       int64_t end)
typedef struct BlockCopyInFlightReq {
    int64_t offset;
    int64_t bytes;
    QLIST_ENTRY(BlockCopyInFlightReq) list;
    CoQueue wait_queue; /* coroutines blocked on this request */
} BlockCopyInFlightReq;

typedef struct BlockCopyState {
    /*
     * BdrvChild objects are not owned or managed by block-copy. They are
     * provided by block-copy user and user is responsible for appropriate
     * permissions on these children.
     */
    BdrvChild *source;
    BdrvChild *target;
    BdrvDirtyBitmap *copy_bitmap;
    int64_t in_flight_bytes;
    int64_t cluster_size;
    bool use_copy_range;
    int64_t copy_size;
    uint64_t len;
    QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;

    BdrvRequestFlags write_flags;

    /*
     * skip_unallocated:
     *
     * Used by sync=top jobs, which first scan the source node for unallocated
     * areas and clear them in the copy_bitmap.  During this process, the bitmap
     * is thus not fully initialized: It may still have bits set for areas that
     * are unallocated and should actually not be copied.
     *
     * This is indicated by skip_unallocated.
     *
     * In this case, block_copy() will query the source’s allocation status,
     * skip unallocated regions, clear them in the copy_bitmap, and invoke
     * block_copy_reset_unallocated() every time it does.
     */
    bool skip_unallocated;

    ProgressMeter *progress;
    /* progress_bytes_callback: called when some copying progress is done. */
    ProgressBytesCallbackFunc progress_bytes_callback;
    void *progress_opaque;

    SharedResource *mem;
} BlockCopyState;

static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
                                                           int64_t offset,
                                                           int64_t bytes)
{
    BlockCopyInFlightReq *req;
    bool waited;

    do {
        waited = false;
    QLIST_FOREACH(req, &s->inflight_reqs, list) {
            if (end > req->start_byte && start < req->end_byte) {
                qemu_co_queue_wait(&req->wait_queue, NULL);
                waited = true;
                break;
        if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
            return req;
        }
    }
    } while (waited);

    return NULL;
}

/*
 * If there are no intersecting requests return false. Otherwise, wait for the
 * first found intersecting request to finish and return true.
 */
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
                                             int64_t bytes)
{
    BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);

    if (!req) {
        return false;
    }

    qemu_co_queue_wait(&req->wait_queue, NULL);

    return true;
}

/* Called only on full-dirty region */
static void block_copy_inflight_req_begin(BlockCopyState *s,
                                          BlockCopyInFlightReq *req,
                                          int64_t start, int64_t end)
                                          int64_t offset, int64_t bytes)
{
    req->start_byte = start;
    req->end_byte = end;
    assert(!find_conflicting_inflight_req(s, offset, bytes));

    bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
    s->in_flight_bytes += bytes;

    req->offset = offset;
    req->bytes = bytes;
    qemu_co_queue_init(&req->wait_queue);
    QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
}

static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
/*
 * block_copy_inflight_req_shrink
 *
 * Drop the tail of the request to be handled later. Set dirty bits back and
 * wake up all requests waiting for us (may be some of them are not intersecting
 * with shrunk request)
 */
static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
        BlockCopyInFlightReq *req, int64_t new_bytes)
{
    if (new_bytes == req->bytes) {
        return;
    }

    assert(new_bytes > 0 && new_bytes < req->bytes);

    s->in_flight_bytes -= req->bytes - new_bytes;
    bdrv_set_dirty_bitmap(s->copy_bitmap,
                          req->offset + new_bytes, req->bytes - new_bytes);

    req->bytes = new_bytes;
    qemu_co_queue_restart_all(&req->wait_queue);
}

static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
                                                     BlockCopyInFlightReq *req,
                                                     int ret)
{
    s->in_flight_bytes -= req->bytes;
    if (ret < 0) {
        bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
    }
    QLIST_REMOVE(req, list);
    qemu_co_queue_restart_all(&req->wait_queue);
}
@@ -70,16 +169,19 @@ void block_copy_state_free(BlockCopyState *s)
    g_free(s);
}

static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
{
    return MIN_NON_ZERO(INT_MAX,
                        MIN_NON_ZERO(source->bs->bl.max_transfer,
                                     target->bs->bl.max_transfer));
}

BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
                                     int64_t cluster_size,
                                     BdrvRequestFlags write_flags, Error **errp)
{
    BlockCopyState *s;
    BdrvDirtyBitmap *copy_bitmap;
    uint32_t max_transfer =
            MIN_NON_ZERO(INT_MAX,
                         MIN_NON_ZERO(source->bs->bl.max_transfer,
                                      target->bs->bl.max_transfer));

    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
                                           errp);
@@ -99,7 +201,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
        .mem = shres_create(BLOCK_COPY_MAX_MEM),
    };

    if (max_transfer < cluster_size) {
    if (block_copy_max_transfer(source, target) < cluster_size) {
        /*
         * copy_range does not respect max_transfer. We don't want to bother
         * with requests smaller than block-copy cluster size, so fallback to
@@ -114,12 +216,11 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
        s->copy_size = cluster_size;
    } else {
        /*
         * copy_range does not respect max_transfer (it's a TODO), so we factor
         * that in here.
         * We enable copy-range, but keep small copy_size, until first
         * successful copy_range (look at block_copy_do_copy).
         */
        s->use_copy_range = true;
        s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
                           QEMU_ALIGN_DOWN(max_transfer, cluster_size));
        s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
    }

    QLIST_INIT(&s->inflight_reqs);
@@ -127,48 +228,83 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
    return s;
}

void block_copy_set_callbacks(
void block_copy_set_progress_callback(
        BlockCopyState *s,
        ProgressBytesCallbackFunc progress_bytes_callback,
        ProgressResetCallbackFunc progress_reset_callback,
        void *progress_opaque)
{
    s->progress_bytes_callback = progress_bytes_callback;
    s->progress_reset_callback = progress_reset_callback;
    s->progress_opaque = progress_opaque;
}

void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
{
    s->progress = pm;
}

/*
 * block_copy_do_copy
 *
 * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
 * cover last cluster when s->len is not aligned to clusters.
 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
 * s->len only to cover last cluster when s->len is not aligned to clusters.
 *
 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
 *
 * Returns 0 on success.
 */
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
                                           int64_t start, int64_t end,
                                           bool *error_is_read)
                                           int64_t offset, int64_t bytes,
                                           bool zeroes, bool *error_is_read)
{
    int ret;
    int nbytes = MIN(end, s->len) - start;
    int64_t nbytes = MIN(offset + bytes, s->len) - offset;
    void *bounce_buffer = NULL;

    assert(QEMU_IS_ALIGNED(start, s->cluster_size));
    assert(QEMU_IS_ALIGNED(end, s->cluster_size));
    assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
    assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
    assert(offset < s->len);
    assert(offset + bytes <= s->len ||
           offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
    assert(nbytes < INT_MAX);

    if (zeroes) {
        ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
                                    ~BDRV_REQ_WRITE_COMPRESSED);
        if (ret < 0) {
            trace_block_copy_write_zeroes_fail(s, offset, ret);
            if (error_is_read) {
                *error_is_read = false;
            }
        }
        return ret;
    }

    if (s->use_copy_range) {
        ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
        ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
                                 0, s->write_flags);
        if (ret < 0) {
            trace_block_copy_copy_range_fail(s, start, ret);
            trace_block_copy_copy_range_fail(s, offset, ret);
            s->use_copy_range = false;
            s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
            /* Fallback to read+write with allocated buffer */
        } else {
            if (s->use_copy_range) {
                /*
                 * Successful copy-range. Now increase copy_size.  copy_range
                 * does not respect max_transfer (it's a TODO), so we factor
                 * that in here.
                 *
                 * Note: we double-check s->use_copy_range for the case when
                 * parallel block-copy request unsets it during previous
                 * bdrv_co_copy_range call.
                 */
                s->copy_size =
                        MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
                            QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
                                                                    s->target),
                                            s->cluster_size));
            }
            goto out;
        }
    }
@@ -176,24 +312,27 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
    /*
     * In case of failed copy_range request above, we may proceed with buffered
     * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
     * be properly limited, so don't care too much.
     * be properly limited, so don't care too much. Moreover the most likely
     * case (copy_range is unsupported for the configuration, so the very first
     * copy_range request fails) is handled by setting large copy_size only
     * after first successful copy_range.
     */

    bounce_buffer = qemu_blockalign(s->source->bs, nbytes);

    ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
    ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
    if (ret < 0) {
        trace_block_copy_read_fail(s, start, ret);
        trace_block_copy_read_fail(s, offset, ret);
        if (error_is_read) {
            *error_is_read = true;
        }
        goto out;
    }

    ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
    ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
                         s->write_flags);
    if (ret < 0) {
        trace_block_copy_write_fail(s, start, ret);
        trace_block_copy_write_fail(s, offset, ret);
        if (error_is_read) {
            *error_is_read = false;
        }
@@ -206,6 +345,38 @@ out:
    return ret;
}

static int block_copy_block_status(BlockCopyState *s, int64_t offset,
                                   int64_t bytes, int64_t *pnum)
{
    int64_t num;
    BlockDriverState *base;
    int ret;

    if (s->skip_unallocated && s->source->bs->backing) {
        base = s->source->bs->backing->bs;
    } else {
        base = NULL;
    }

    ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
                                  NULL, NULL);
    if (ret < 0 || num < s->cluster_size) {
        /*
         * On error or if failed to obtain large enough chunk just fallback to
         * copy one cluster.
         */
        num = s->cluster_size;
        ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
    } else if (offset + num == s->len) {
        num = QEMU_ALIGN_UP(num, s->cluster_size);
    } else {
        num = QEMU_ALIGN_DOWN(num, s->cluster_size);
    }

    *pnum = num;
    return ret;
}

/*
 * Check if the cluster starting at offset is allocated or not.
 * return via pnum the number of contiguous clusters sharing this allocation.
@@ -269,21 +440,28 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,

    if (!ret) {
        bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
        s->progress_reset_callback(s->progress_opaque);
        progress_set_remaining(s->progress,
                               bdrv_get_dirty_count(s->copy_bitmap) +
                               s->in_flight_bytes);
    }

    *count = bytes;
    return ret;
}

int coroutine_fn block_copy(BlockCopyState *s,
                            int64_t start, uint64_t bytes,
/*
 * block_copy_dirty_clusters
 *
 * Copy dirty clusters in @offset/@bytes range.
 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
 * clusters found and -errno on failure.
 */
static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
                                                  int64_t offset, int64_t bytes,
                                                  bool *error_is_read)
{
    int ret = 0;
    int64_t end = bytes + start; /* bytes */
    int64_t status_bytes;
    BlockCopyInFlightReq req;
    bool found_dirty = false;

    /*
     * block_copy() user is responsible for keeping source and target in same
@@ -292,60 +470,109 @@ int coroutine_fn block_copy(BlockCopyState *s,
    assert(bdrv_get_aio_context(s->source->bs) ==
           bdrv_get_aio_context(s->target->bs));

    assert(QEMU_IS_ALIGNED(start, s->cluster_size));
    assert(QEMU_IS_ALIGNED(end, s->cluster_size));

    block_copy_wait_inflight_reqs(s, start, bytes);
    block_copy_inflight_req_begin(s, &req, start, end);
    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));

    while (start < end) {
        int64_t next_zero, chunk_end;
    while (bytes) {
        BlockCopyInFlightReq req;
        int64_t next_zero, cur_bytes, status_bytes;

        if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
            trace_block_copy_skip(s, start);
            start += s->cluster_size;
        if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
            trace_block_copy_skip(s, offset);
            offset += s->cluster_size;
            bytes -= s->cluster_size;
            continue; /* already copied */
        }

        chunk_end = MIN(end, start + s->copy_size);
        found_dirty = true;

        cur_bytes = MIN(bytes, s->copy_size);

        next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
                                                chunk_end - start);
        next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
                                                cur_bytes);
        if (next_zero >= 0) {
            assert(next_zero > start); /* start is dirty */
            assert(next_zero < chunk_end); /* no need to do MIN() */
            chunk_end = next_zero;
            assert(next_zero > offset); /* offset is dirty */
            assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
            cur_bytes = next_zero - offset;
        }
        block_copy_inflight_req_begin(s, &req, offset, cur_bytes);

        ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
        assert(ret >= 0); /* never fail */
        cur_bytes = MIN(cur_bytes, status_bytes);
        block_copy_inflight_req_shrink(s, &req, cur_bytes);
        if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
            block_copy_inflight_req_end(s, &req, 0);
            progress_set_remaining(s->progress,
                                   bdrv_get_dirty_count(s->copy_bitmap) +
                                   s->in_flight_bytes);
            trace_block_copy_skip_range(s, offset, status_bytes);
            offset += status_bytes;
            bytes -= status_bytes;
            continue;
        }

        if (s->skip_unallocated) {
            ret = block_copy_reset_unallocated(s, start, &status_bytes);
            if (ret == 0) {
                trace_block_copy_skip_range(s, start, status_bytes);
                start += status_bytes;
                continue;
        trace_block_copy_process(s, offset);

        co_get_from_shres(s->mem, cur_bytes);
        ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
                                 error_is_read);
        co_put_to_shres(s->mem, cur_bytes);
        block_copy_inflight_req_end(s, &req, ret);
        if (ret < 0) {
            return ret;
        }
            /* Clamp to known allocated region */
            chunk_end = MIN(chunk_end, start + status_bytes);

        progress_work_done(s->progress, cur_bytes);
        s->progress_bytes_callback(cur_bytes, s->progress_opaque);
        offset += cur_bytes;
        bytes -= cur_bytes;
    }

        trace_block_copy_process(s, start);
    return found_dirty;
}

/*
 * block_copy
 *
 * Copy requested region, accordingly to dirty bitmap.
 * Collaborate with parallel block_copy requests: if they succeed it will help
 * us. If they fail, we will retry not-copied regions. So, if we return error,
 * it means that some I/O operation failed in context of _this_ block_copy call,
 * not some parallel operation.
 */
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
                            bool *error_is_read)
{
    int ret;

        bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
    do {
        ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);

        co_get_from_shres(s->mem, chunk_end - start);
        ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
        co_put_to_shres(s->mem, chunk_end - start);
        if (ret < 0) {
            bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
            break;
        if (ret == 0) {
            ret = block_copy_wait_one(s, offset, bytes);
        }

        s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
        start = chunk_end;
        ret = 0;
        /*
         * We retry in two cases:
         * 1. Some progress done
         *    Something was copied, which means that there were yield points
         *    and some new dirty bits may have appeared (due to failed parallel
         *    block-copy requests).
         * 2. We have waited for some intersecting block-copy request
         *    It may have failed and produced new dirty bits.
         */
    } while (ret > 0);

    return ret;
}

    block_copy_inflight_req_end(&req);
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
{
    return s->copy_bitmap;
}

    return ret;
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
{
    s->skip_unallocated = skip;
}
+62 −0

File changed.

Preview size limit exceeded, changes collapsed.

+28 −4

File changed.

Preview size limit exceeded, changes collapsed.

Loading