Commit 1f373162 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging



Pull request

Rebase notes:

011/36:[0003] [FC] 'block/backup: upgrade copy_bitmap to BdrvDirtyBitmap'
016/36:[----] [-C] 'iotests: Add virtio-scsi device helper'
017/36:[0002] [FC] 'iotests: add test 257 for bitmap-mode backups'
030/36:[0011] [FC] 'block/backup: teach TOP to never copy unallocated regions'
032/36:[0018] [FC] 'iotests/257: test traditional sync modes'

11: A new hbitmap call was added late in 4.1, changed to
    bdrv_dirty_bitmap_next_zero.
16: Context-only (self.has_quit is new context in 040)
17: Removed 'auto' to follow upstream trends in iotest fashion
30: Handled explicitly on-list with R-B from Max.
32: Fix capitalization in test, as mentioned on-list.

# gpg: Signature made Sat 17 Aug 2019 00:12:13 BST
# gpg:                using RSA key F9B7ABDBBCACDF95BE76CBD07DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>" [full]
# Primary key fingerprint: FAEB 9711 A12C F475 812F  18F2 88A9 064D 1835 61EB
#      Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76  CBD0 7DEF 8106 AAFC 390E

* remotes/jnsnow/tags/bitmaps-pull-request: (36 commits)
  tests/test-hbitmap: test next_zero and _next_dirty_area after truncate
  block/backup: refactor write_flags
  block/backup: deal with zero detection
  qapi: add dirty-bitmaps to query-named-block-nodes result
  iotests/257: test traditional sync modes
  block/backup: support bitmap sync modes for non-bitmap backups
  block/backup: teach TOP to never copy unallocated regions
  block/backup: add backup_is_cluster_allocated
  block/backup: centralize copy_bitmap initialization
  block/backup: improve sync=bitmap work estimates
  iotests/257: test API failures
  block/backup: hoist bitmap check into QMP interface
  iotests/257: Refactor backup helpers
  iotests/257: add EmulatedBitmap class
  iotests/257: add Pattern class
  iotests: test bitmap moving inside 254
  qapi: implement block-dirty-bitmap-remove transaction action
  blockdev: reduce aio_context locked sections in bitmap add/remove
  block/backup: loosen restriction on readonly bitmaps
  iotests: add test 257 for bitmap-mode backups
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents afd76053 a5f8a60b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -5346,7 +5346,7 @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
    for (bm = bdrv_dirty_bitmap_next(bs, NULL); bm;
         bm = bdrv_dirty_bitmap_next(bs, bm))
    {
        bdrv_dirty_bitmap_set_migration(bm, false);
        bdrv_dirty_bitmap_skip_store(bm, false);
    }

    ret = refresh_total_sectors(bs, bs->total_sectors);
+206 −106
Original line number Diff line number Diff line
@@ -38,24 +38,26 @@ typedef struct CowRequest {
typedef struct BackupBlockJob {
    BlockJob common;
    BlockBackend *target;
    /* bitmap for sync=incremental */

    BdrvDirtyBitmap *sync_bitmap;
    BdrvDirtyBitmap *copy_bitmap;

    MirrorSyncMode sync_mode;
    BitmapSyncMode bitmap_mode;
    BlockdevOnError on_source_error;
    BlockdevOnError on_target_error;
    CoRwlock flush_rwlock;
    uint64_t len;
    uint64_t bytes_read;
    int64_t cluster_size;
    bool compress;
    NotifierWithReturn before_write;
    QLIST_HEAD(, CowRequest) inflight_reqs;

    HBitmap *copy_bitmap;
    bool use_copy_range;
    int64_t copy_range_size;

    bool serialize_target_writes;
    BdrvRequestFlags write_flags;
    bool initializing_bitmap;
} BackupBlockJob;

static const BlockJobDriver backup_job_driver;
@@ -110,10 +112,9 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
    BlockBackend *blk = job->common.blk;
    int nbytes;
    int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
    int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;

    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
    hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
    bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
    nbytes = MIN(job->cluster_size, job->len - start);
    if (!*bounce_buffer) {
        *bounce_buffer = blk_blockalign(blk, job->cluster_size);
@@ -128,14 +129,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
        goto fail;
    }

    if (buffer_is_zero(*bounce_buffer, nbytes)) {
        ret = blk_co_pwrite_zeroes(job->target, start,
                                   nbytes, write_flags | BDRV_REQ_MAY_UNMAP);
    } else {
        ret = blk_co_pwrite(job->target, start,
                            nbytes, *bounce_buffer, write_flags |
                            (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
    }
    ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
                        job->write_flags);
    if (ret < 0) {
        trace_backup_do_cow_write_fail(job, start, ret);
        if (error_is_read) {
@@ -146,7 +141,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,

    return nbytes;
fail:
    hbitmap_set(job->copy_bitmap, start, job->cluster_size);
    bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
    return ret;

}
@@ -163,24 +158,96 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
    BlockBackend *blk = job->common.blk;
    int nbytes;
    int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
    int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;

    assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
    nbytes = MIN(job->copy_range_size, end - start);
    nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
    hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
    bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
                            job->cluster_size * nr_clusters);
    ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
                            read_flags, write_flags);
                            read_flags, job->write_flags);
    if (ret < 0) {
        trace_backup_do_cow_copy_range_fail(job, start, ret);
        hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
        bdrv_set_dirty_bitmap(job->copy_bitmap, start,
                              job->cluster_size * nr_clusters);
        return ret;
    }

    return nbytes;
}

/*
 * Check if the cluster starting at offset is allocated or not.
 * return via pnum the number of contiguous clusters sharing this allocation.
 */
static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
                                       int64_t *pnum)
{
    BlockDriverState *bs = blk_bs(s->common.blk);
    int64_t count, total_count = 0;
    int64_t bytes = s->len - offset;
    int ret;

    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));

    while (true) {
        ret = bdrv_is_allocated(bs, offset, bytes, &count);
        if (ret < 0) {
            return ret;
        }

        total_count += count;

        if (ret || count == 0) {
            /*
             * ret: partial segment(s) are considered allocated.
             * otherwise: unallocated tail is treated as an entire segment.
             */
            *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
            return ret;
        }

        /* Unallocated segment(s) with uncertain following segment(s) */
        if (total_count >= s->cluster_size) {
            *pnum = total_count / s->cluster_size;
            return 0;
        }

        offset += count;
        bytes -= count;
    }
}

/**
 * Reset bits in copy_bitmap starting at offset if they represent unallocated
 * data in the image. May reset subsequent contiguous bits.
 * @return 0 when the cluster at @offset was unallocated,
 *         1 otherwise, and -ret on error.
 */
static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
                                               int64_t offset, int64_t *count)
{
    int ret;
    int64_t clusters, bytes, estimate;

    ret = backup_is_cluster_allocated(s, offset, &clusters);
    if (ret < 0) {
        return ret;
    }

    bytes = clusters * s->cluster_size;

    if (!ret) {
        bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
        estimate = bdrv_get_dirty_count(s->copy_bitmap);
        job_progress_set_remaining(&s->common.job, estimate);
    }

    *count = bytes;
    return ret;
}

static int coroutine_fn backup_do_cow(BackupBlockJob *job,
                                      int64_t offset, uint64_t bytes,
                                      bool *error_is_read,
@@ -190,6 +257,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
    int ret = 0;
    int64_t start, end; /* bytes */
    void *bounce_buffer = NULL;
    int64_t status_bytes;

    qemu_co_rwlock_rdlock(&job->flush_rwlock);

@@ -204,17 +272,29 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
    while (start < end) {
        int64_t dirty_end;

        if (!hbitmap_get(job->copy_bitmap, start)) {
        if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
            trace_backup_do_cow_skip(job, start);
            start += job->cluster_size;
            continue; /* already copied */
        }

        dirty_end = hbitmap_next_zero(job->copy_bitmap, start, (end - start));
        dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
                                                (end - start));
        if (dirty_end < 0) {
            dirty_end = end;
        }

        if (job->initializing_bitmap) {
            ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
            if (ret == 0) {
                trace_backup_do_cow_skip_range(job, start, status_bytes);
                start += status_bytes;
                continue;
            }
            /* Clamp to known allocated region */
            dirty_end = MIN(dirty_end, start + status_bytes);
        }

        trace_backup_do_cow_process(job, start);

        if (job->use_copy_range) {
@@ -273,15 +353,29 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{
    BdrvDirtyBitmap *bm;
    BlockDriverState *bs = blk_bs(job->common.blk);
    bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \
                 && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER));

    if (ret < 0) {
        /* Merge the successor back into the parent, delete nothing. */
        bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
        assert(bm);
    } else {
        /* Everything is fine, delete this bitmap and install the backup. */
    if (sync) {
        /*
         * We succeeded, or we always intended to sync the bitmap.
         * Delete this bitmap and install the child.
         */
        bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
    } else {
        /*
         * We failed, or we never intended to sync the bitmap anyway.
         * Merge the successor back into the parent, keeping all data.
         */
        bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
    }

    assert(bm);

    if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
        /* If we failed and synced, merge in the bits we didn't copy: */
        bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap,
                                         NULL, true);
    }
}

@@ -304,14 +398,16 @@ static void backup_abort(Job *job)
static void backup_clean(Job *job)
{
    BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
    assert(s->target);
    blk_unref(s->target);
    s->target = NULL;
    BlockDriverState *bs = blk_bs(s->common.blk);

    if (s->copy_bitmap) {
        hbitmap_free(s->copy_bitmap);
        bdrv_release_dirty_bitmap(bs, s->copy_bitmap);
        s->copy_bitmap = NULL;
    }

    assert(s->target);
    blk_unref(s->target);
    s->target = NULL;
}

void backup_do_checkpoint(BlockJob *job, Error **errp)
@@ -326,7 +422,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
        return;
    }

    hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
    bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len);
}

static void backup_drain(BlockJob *job)
@@ -377,77 +473,57 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
    return false;
}

static bool bdrv_is_unallocated_range(BlockDriverState *bs,
                                      int64_t offset, int64_t bytes)
{
    int64_t end = offset + bytes;

    while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
        if (bytes == 0) {
            return true;
        }
        offset += bytes;
        bytes = end - offset;
    }

    return offset >= end;
}

static int coroutine_fn backup_loop(BackupBlockJob *job)
{
    int ret;
    bool error_is_read;
    int64_t offset;
    HBitmapIter hbi;
    BlockDriverState *bs = blk_bs(job->common.blk);

    hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
    while ((offset = hbitmap_iter_next(&hbi)) != -1) {
        if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
            bdrv_is_unallocated_range(bs, offset, job->cluster_size))
        {
            hbitmap_reset(job->copy_bitmap, offset, job->cluster_size);
            continue;
        }
    BdrvDirtyBitmapIter *bdbi;
    int ret = 0;

    bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
    while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
        do {
            if (yield_and_check(job)) {
                return 0;
                goto out;
            }
            ret = backup_do_cow(job, offset,
                                job->cluster_size, &error_is_read, false);
            if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
                           BLOCK_ERROR_ACTION_REPORT)
            {
                return ret;
                goto out;
            }
        } while (ret < 0);
    }

    return 0;
 out:
    bdrv_dirty_iter_free(bdbi);
    return ret;
}

/* init copy_bitmap from sync_bitmap */
static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
{
    uint64_t offset = 0;
    uint64_t bytes = job->len;

    while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
                                             &offset, &bytes))
static void backup_init_copy_bitmap(BackupBlockJob *job)
{
        hbitmap_set(job->copy_bitmap, offset, bytes);

        offset += bytes;
        if (offset >= job->len) {
            break;
    bool ret;
    uint64_t estimate;

    if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
        ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap,
                                               job->sync_bitmap,
                                               NULL, true);
        assert(ret);
    } else {
        if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
            /*
             * We can't hog the coroutine to initialize this thoroughly.
             * Set a flag and resume work when we are able to yield safely.
             */
            job->initializing_bitmap = true;
        }
        bytes = job->len - offset;
        bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
    }

    /* TODO job_progress_set_remaining() would make more sense */
    job_progress_update(&job->common.job,
        job->len - hbitmap_count(job->copy_bitmap));
    estimate = bdrv_get_dirty_count(job->copy_bitmap);
    job_progress_set_remaining(&job->common.job, estimate);
}

static int coroutine_fn backup_run(Job *job, Error **errp)
@@ -459,17 +535,31 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
    QLIST_INIT(&s->inflight_reqs);
    qemu_co_rwlock_init(&s->flush_rwlock);

    job_progress_set_remaining(job, s->len);

    if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
        backup_incremental_init_copy_bitmap(s);
    } else {
        hbitmap_set(s->copy_bitmap, 0, s->len);
    }
    backup_init_copy_bitmap(s);

    s->before_write.notify = backup_before_write_notify;
    bdrv_add_before_write_notifier(bs, &s->before_write);

    if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
        int64_t offset = 0;
        int64_t count;

        for (offset = 0; offset < s->len; ) {
            if (yield_and_check(s)) {
                ret = -ECANCELED;
                goto out;
            }

            ret = backup_bitmap_reset_unallocated(s, offset, &count);
            if (ret < 0) {
                goto out;
            }

            offset += count;
        }
        s->initializing_bitmap = false;
    }

    if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
        /* All bits are set in copy_bitmap to allow any cluster to be copied.
         * This does not actually require them to be copied. */
@@ -482,6 +572,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
        ret = backup_loop(s);
    }

 out:
    notifier_with_return_remove(&s->before_write);

    /* wait until pending backup_do_cow() calls have completed */
@@ -545,6 +636,7 @@ static int64_t backup_calculate_cluster_size(BlockDriverState *target,
BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
                  BlockDriverState *target, int64_t speed,
                  MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
                  BitmapSyncMode bitmap_mode,
                  bool compress,
                  BlockdevOnError on_source_error,
                  BlockdevOnError on_target_error,
@@ -556,11 +648,15 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
    BackupBlockJob *job = NULL;
    int ret;
    int64_t cluster_size;
    HBitmap *copy_bitmap = NULL;
    BdrvDirtyBitmap *copy_bitmap = NULL;

    assert(bs);
    assert(target);

    /* QMP interface protects us from these cases */
    assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
    assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP);

    if (bs == target) {
        error_setg(errp, "Source and target cannot be the same");
        return NULL;
@@ -592,10 +688,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
        return NULL;
    }

    if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
        if (!sync_bitmap) {
            error_setg(errp, "must provide a valid bitmap name for "
                             "\"incremental\" sync mode");
    if (sync_bitmap) {
        /* If we need to write to this bitmap, check that we can: */
        if (bitmap_mode != BITMAP_SYNC_MODE_NEVER &&
            bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) {
            return NULL;
        }

@@ -603,12 +699,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
        if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
            return NULL;
        }
    } else if (sync_bitmap) {
        error_setg(errp,
                   "a sync_bitmap was provided to backup_run, "
                   "but received an incompatible sync_mode (%s)",
                   MirrorSyncMode_str(sync_mode));
        return NULL;
    }

    len = bdrv_getlength(bs);
@@ -623,7 +713,11 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
        goto error;
    }

    copy_bitmap = hbitmap_alloc(len, ctz32(cluster_size));
    copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
    if (!copy_bitmap) {
        goto error;
    }
    bdrv_disable_dirty_bitmap(copy_bitmap);

    /* job->len is fixed, so we can't allow resize */
    job = block_job_create(job_id, &backup_job_driver, txn, bs,
@@ -649,12 +743,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
    job->on_source_error = on_source_error;
    job->on_target_error = on_target_error;
    job->sync_mode = sync_mode;
    job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
                       sync_bitmap : NULL;
    job->compress = compress;
    job->sync_bitmap = sync_bitmap;
    job->bitmap_mode = bitmap_mode;

    /*
     * Set write flags:
     * 1. Detect image-fleecing (and similar) schemes
     * 2. Handle compression
     */
    job->write_flags =
        (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);

    /* Detect image-fleecing (and similar) schemes */
    job->serialize_target_writes = bdrv_chain_contains(target, bs);
    job->cluster_size = cluster_size;
    job->copy_bitmap = copy_bitmap;
    copy_bitmap = NULL;
@@ -675,7 +775,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
 error:
    if (copy_bitmap) {
        assert(!job || !job->copy_bitmap);
        hbitmap_free(copy_bitmap);
        bdrv_release_dirty_bitmap(bs, copy_bitmap);
    }
    if (sync_bitmap) {
        bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
+68 −20
Original line number Diff line number Diff line
@@ -48,10 +48,9 @@ struct BdrvDirtyBitmap {
    bool inconsistent;          /* bitmap is persistent, but inconsistent.
                                   It cannot be used at all in any way, except
                                   a QMP user can remove it. */
    bool migration;             /* Bitmap is selected for migration, it should
                                   not be stored on the next inactivation
                                   (persistent flag doesn't matter until next
                                   invalidation).*/
    bool skip_store;            /* We are either migrating or deleting this
                                 * bitmap; it should not be stored on the next
                                 * inactivation. */
    QLIST_ENTRY(BdrvDirtyBitmap) list;
};

@@ -509,14 +508,19 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
}

/* Called within bdrv_dirty_bitmap_lock..unlock */
bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
                           int64_t offset)
bool bdrv_dirty_bitmap_get_locked(BdrvDirtyBitmap *bitmap, int64_t offset)
{
    if (bitmap) {
    return hbitmap_get(bitmap->bitmap, offset);
    } else {
        return false;
}

bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset)
{
    bool ret;
    bdrv_dirty_bitmap_lock(bitmap);
    ret = bdrv_dirty_bitmap_get_locked(bitmap, offset);
    bdrv_dirty_bitmap_unlock(bitmap);

    return ret;
}

/**
@@ -757,16 +761,16 @@ void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap)
}

/* Called with BQL taken. */
void bdrv_dirty_bitmap_set_migration(BdrvDirtyBitmap *bitmap, bool migration)
void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip)
{
    qemu_mutex_lock(bitmap->mutex);
    bitmap->migration = migration;
    bitmap->skip_store = skip;
    qemu_mutex_unlock(bitmap->mutex);
}

bool bdrv_dirty_bitmap_get_persistence(BdrvDirtyBitmap *bitmap)
{
    return bitmap->persistent && !bitmap->migration;
    return bitmap->persistent && !bitmap->skip_store;
}

bool bdrv_dirty_bitmap_inconsistent(const BdrvDirtyBitmap *bitmap)
@@ -778,7 +782,7 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs)
{
    BdrvDirtyBitmap *bm;
    QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
        if (bm->persistent && !bm->readonly && !bm->migration) {
        if (bm->persistent && !bm->readonly && !bm->skip_store) {
            return true;
        }
    }
@@ -810,6 +814,12 @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
    return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes);
}

/**
 * bdrv_merge_dirty_bitmap: merge src into dest.
 * Ensures permissions on bitmaps are reasonable; use for public API.
 *
 * @backup: If provided, make a copy of dest here prior to merge.
 */
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
                             HBitmap **backup, Error **errp)
{
@@ -833,6 +843,42 @@ void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
        goto out;
    }

    ret = bdrv_dirty_bitmap_merge_internal(dest, src, backup, false);
    assert(ret);

out:
    qemu_mutex_unlock(dest->mutex);
    if (src->mutex != dest->mutex) {
        qemu_mutex_unlock(src->mutex);
    }
}

/**
 * bdrv_dirty_bitmap_merge_internal: merge src into dest.
 * Does NOT check bitmap permissions; not suitable for use as public API.
 *
 * @backup: If provided, make a copy of dest here prior to merge.
 * @lock: If true, lock and unlock bitmaps on the way in/out.
 * returns true if the merge succeeded; false if unattempted.
 */
bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
                                      const BdrvDirtyBitmap *src,
                                      HBitmap **backup,
                                      bool lock)
{
    bool ret;

    assert(!bdrv_dirty_bitmap_readonly(dest));
    assert(!bdrv_dirty_bitmap_inconsistent(dest));
    assert(!bdrv_dirty_bitmap_inconsistent(src));

    if (lock) {
        qemu_mutex_lock(dest->mutex);
        if (src->mutex != dest->mutex) {
            qemu_mutex_lock(src->mutex);
        }
    }

    if (backup) {
        *backup = dest->bitmap;
        dest->bitmap = hbitmap_alloc(dest->size, hbitmap_granularity(*backup));
@@ -840,11 +886,13 @@ void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
    } else {
        ret = hbitmap_merge(dest->bitmap, src->bitmap, dest->bitmap);
    }
    assert(ret);

out:
    if (lock) {
        qemu_mutex_unlock(dest->mutex);
        if (src->mutex != dest->mutex) {
            qemu_mutex_unlock(src->mutex);
        }
    }

    return ret;
}
+5 −3
Original line number Diff line number Diff line
@@ -476,7 +476,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
        int64_t next_offset = offset + nb_chunks * s->granularity;
        int64_t next_chunk = next_offset / s->granularity;
        if (next_offset >= s->bdev_length ||
            !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
            !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
            break;
        }
        if (test_bit(next_chunk, s->in_flight_bitmap)) {
@@ -1755,8 +1755,10 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
    bool is_none_mode;
    BlockDriverState *base;

    if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
        error_setg(errp, "Sync mode 'incremental' not supported");
    if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
        (mode == MIRROR_SYNC_MODE_BITMAP)) {
        error_setg(errp, "Sync mode '%s' not supported",
                   MirrorSyncMode_str(mode));
        return;
    }
    is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
+5 −0
Original line number Diff line number Diff line
@@ -79,6 +79,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
        info->backing_file = g_strdup(bs->backing_file);
    }

    if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
        info->has_dirty_bitmaps = true;
        info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
    }

    info->detect_zeroes = bs->detect_zeroes;

    if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
Loading