Commit df1a7c99 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2019-08-06' into staging



Block patches for 4.1.0-rc4:
- Fix the backup block job when using copy offloading
- Fix the mirror block job when using the write-blocking copy mode
- Fix incremental backups after the image has been grown with the
  respective bitmap attached to it

# gpg: Signature made Tue 06 Aug 2019 12:57:07 BST
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2019-08-06:
  block/backup: disable copy_range for compressed backup
  iotests: Test unaligned blocking mirror write
  mirror: Only mirror granularity-aligned chunks
  iotests: Test incremental backup after truncation
  util/hbitmap: update orig_size on truncate
  iotests: Test backup job with two guest writes
  backup: Copy only dirty areas

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 9bb68d34 110571be
Loading
Loading
Loading
Loading
+12 −3
Original line number Diff line number Diff line
@@ -202,22 +202,31 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
    cow_request_begin(&cow_request, job, start, end);

    while (start < end) {
        int64_t dirty_end;

        if (!hbitmap_get(job->copy_bitmap, start)) {
            trace_backup_do_cow_skip(job, start);
            start += job->cluster_size;
            continue; /* already copied */
        }

        dirty_end = hbitmap_next_zero(job->copy_bitmap, start, (end - start));
        if (dirty_end < 0) {
            dirty_end = end;
        }

        trace_backup_do_cow_process(job, start);

        if (job->use_copy_range) {
            ret = backup_cow_with_offload(job, start, end, is_write_notifier);
            ret = backup_cow_with_offload(job, start, dirty_end,
                                          is_write_notifier);
            if (ret < 0) {
                job->use_copy_range = false;
            }
        }
        if (!job->use_copy_range) {
            ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier,
            ret = backup_cow_with_bounce_buffer(job, start, dirty_end,
                                                is_write_notifier,
                                                error_is_read, &bounce_buffer);
        }
        if (ret < 0) {
@@ -648,7 +657,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
    job->cluster_size = cluster_size;
    job->copy_bitmap = copy_bitmap;
    copy_bitmap = NULL;
    job->use_copy_range = true;
    job->use_copy_range = !compress; /* compression isn't supported for it */
    job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
                                        blk_get_max_transfer(job->target));
    job->copy_range_size = MAX(job->cluster_size,
+29 −0
Original line number Diff line number Diff line
@@ -1481,6 +1481,15 @@ static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
    *nshared = BLK_PERM_ALL;
}

static void bdrv_mirror_top_refresh_limits(BlockDriverState *bs, Error **errp)
{
    MirrorBDSOpaque *s = bs->opaque;

    if (s && s->job && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
        bs->bl.request_alignment = s->job->granularity;
    }
}

/* Dummy node that provides consistent read to its users without requiring it
 * from its backing file and that allows writes on the backing file chain. */
static BlockDriver bdrv_mirror_top = {
@@ -1493,6 +1502,7 @@ static BlockDriver bdrv_mirror_top = {
    .bdrv_co_block_status       = bdrv_co_block_status_from_backing,
    .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
    .bdrv_child_perm            = bdrv_mirror_top_child_perm,
    .bdrv_refresh_limits        = bdrv_mirror_top_refresh_limits,
};

static BlockJob *mirror_start_job(
@@ -1637,6 +1647,25 @@ static BlockJob *mirror_start_job(
        s->should_complete = true;
    }

    /*
     * Must be called before we start tracking writes, but after
     *
     *     ((MirrorBlockJob *)
     *         ((MirrorBDSOpaque *)
     *             mirror_top_bs->opaque
     *         )->job
     *     )->copy_mode
     *
     * has the correct value.
     * (We start tracking writes as of the following
     * bdrv_create_dirty_bitmap() call.)
     */
    bdrv_refresh_limits(mirror_top_bs, &local_err);
    if (local_err) {
        error_propagate(errp, local_err);
        goto fail;
    }

    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
    if (!s->dirty_bitmap) {
        goto fail;
+39 −0
Original line number Diff line number Diff line
@@ -133,6 +133,7 @@ class BackupTest(iotests.QMPTestCase):
        self.vm = iotests.VM()
        self.test_img = img_create('test')
        self.dest_img = img_create('dest')
        self.ref_img = img_create('ref')
        self.vm.add_drive(self.test_img)
        self.vm.launch()

@@ -140,6 +141,7 @@ class BackupTest(iotests.QMPTestCase):
        self.vm.shutdown()
        try_remove(self.test_img)
        try_remove(self.dest_img)
        try_remove(self.ref_img)

    def hmp_io_writes(self, drive, patterns):
        for pattern in patterns:
@@ -177,6 +179,43 @@ class BackupTest(iotests.QMPTestCase):
            self.assert_qmp(event, 'data/error', qerror)
            return False

    def test_overlapping_writes(self):
        # Write something to back up
        self.hmp_io_writes('drive0', [('42', '0M', '2M')])

        # Create a reference backup
        self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
                                 sync='full', target=self.ref_img,
                                 auto_dismiss=False)
        res = self.vm.qmp('block-job-dismiss', id='drive0')
        self.assert_qmp(res, 'return', {})

        # Now to the test backup: We simulate the following guest
        # writes:
        # (1) [1M + 64k, 1M + 128k): Afterwards, everything in that
        #     area should be in the target image, and we must not copy
        #     it again (because the source image has changed now)
        #     (64k is the job's cluster size)
        # (2) [1M, 2M): The backup job must not get overeager.  It
        #     must copy [1M, 1M + 64k) and [1M + 128k, 2M) separately,
        #     but not the area in between.

        self.qmp_backup(device='drive0', format=iotests.imgfmt, sync='full',
                        target=self.dest_img, speed=1, auto_dismiss=False)

        self.hmp_io_writes('drive0', [('23', '%ik' % (1024 + 64), '64k'),
                                      ('66', '1M', '1M')])

        # Let the job complete
        res = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
        self.assert_qmp(res, 'return', {})
        self.qmp_backup_wait('drive0')
        res = self.vm.qmp('block-job-dismiss', id='drive0')
        self.assert_qmp(res, 'return', {})

        self.assertTrue(iotests.compare_images(self.ref_img, self.dest_img),
                        'target image does not match reference image')

    def test_dismiss_false(self):
        res = self.vm.qmp('query-block-jobs')
        self.assert_qmp(res, 'return', [])
+2 −2
Original line number Diff line number Diff line
.........
..........
----------------------------------------------------------------------
Ran 9 tests
Ran 10 tests

OK
+34 −4
Original line number Diff line number Diff line
@@ -212,24 +212,27 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
        return bitmap


    def prepare_backup(self, bitmap=None, parent=None):
    def prepare_backup(self, bitmap=None, parent=None, **kwargs):
        if bitmap is None:
            bitmap = self.bitmaps[-1]
        if parent is None:
            parent, _ = bitmap.last_target()

        target, _ = bitmap.new_target()
        self.img_create(target, bitmap.drive['fmt'], parent=parent)
        self.img_create(target, bitmap.drive['fmt'], parent=parent,
                        **kwargs)
        return target


    def create_incremental(self, bitmap=None, parent=None,
                           parentFormat=None, validate=True):
                           parentFormat=None, validate=True,
                           target=None):
        if bitmap is None:
            bitmap = self.bitmaps[-1]
        if parent is None:
            parent, _ = bitmap.last_target()

        if target is None:
            target = self.prepare_backup(bitmap, parent)
        res = self.do_qmp_backup(job_id=bitmap.drive['id'],
                                 device=bitmap.drive['id'],
@@ -572,6 +575,33 @@ class TestIncrementalBackup(TestIncrementalBackupBase):
                          'bitmap0', self.drives[0],
                          granularity=64000)

    def test_growing_before_backup(self):
        '''
        Test: Add a bitmap, truncate the image, write past the old
              end, do a backup.

        Incremental backup should not ignore dirty bits past the old
        image end.
        '''
        self.assert_no_active_block_jobs()

        self.create_anchor_backup()

        self.add_bitmap('bitmap0', self.drives[0])

        res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
                          size=(65 * 1048576))
        self.assert_qmp(res, 'return', {})

        # Dirty the image past the old end
        self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')

        target = self.prepare_backup(size='65M')
        self.create_incremental(target=target)

        self.vm.shutdown()
        self.check_backups()


class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
    '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
Loading