Commit 0a4dc980 authored by Kevin Wolf's avatar Kevin Wolf
Browse files

Merge remote-tracking branch 'mreitz/tags/pull-block-2018-02-13' into queue-block



Block patches for the block queue

# gpg: Signature made Tue Feb 13 17:00:13 2018 CET
# gpg:                using RSA key F407DB0061D5CF40
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>"
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* mreitz/tags/pull-block-2018-02-13: (40 commits)
  iotests: Add l2-cache-entry-size to iotest 137
  iotests: Test downgrading an image using a small L2 slice size
  iotests: Test valid values of l2-cache-entry-size
  qcow2: Allow configuring the L2 slice size
  qcow2: Rename l2_table in count_cow_clusters()
  qcow2: Rename l2_table in count_contiguous_clusters_unallocated()
  qcow2: Rename l2_table in count_contiguous_clusters()
  qcow2: Rename l2_table in qcow2_alloc_compressed_cluster_offset()
  qcow2: Update qcow2_truncate() to support L2 slices
  qcow2: Update expand_zero_clusters_in_l1() to support L2 slices
  qcow2: Prepare expand_zero_clusters_in_l1() for adding L2 slice support
  qcow2: Read refcount before L2 table in expand_zero_clusters_in_l1()
  qcow2: Update qcow2_update_snapshot_refcount() to support L2 slices
  qcow2: Prepare qcow2_update_snapshot_refcount() for adding L2 slice support
  qcow2: Update zero_single_l2() to support L2 slices
  qcow2: Update discard_single_l2() to support L2 slices
  qcow2: Update handle_alloc() to support L2 slices
  qcow2: Update handle_copied() to support L2 slices
  qcow2: Update qcow2_alloc_cluster_link_l2() to support L2 slices
  qcow2: Update qcow2_get_cluster_offset() to support L2 slices
  ...

Signed-off-by: default avatarKevin Wolf <kwolf@redhat.com>
parents 74f1eabf 03b1b6f0
Loading
Loading
Loading
Loading
+0 −18
Original line number Diff line number Diff line
@@ -52,8 +52,6 @@ struct BdrvDirtyBitmap {
                                   Such operations must fail and both the image
                                   and this bitmap must remain unchanged while
                                   this flag is set. */
    bool autoload;              /* For persistent bitmaps: bitmap must be
                                   autoloaded on image opening */
    bool persistent;            /* bitmap must be saved to owner disk image */
    QLIST_ENTRY(BdrvDirtyBitmap) list;
};
@@ -104,7 +102,6 @@ void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap)
    g_free(bitmap->name);
    bitmap->name = NULL;
    bitmap->persistent = false;
    bitmap->autoload = false;
}

/* Called with BQL taken.  */
@@ -261,8 +258,6 @@ BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs,
    bitmap->successor = NULL;
    successor->persistent = bitmap->persistent;
    bitmap->persistent = false;
    successor->autoload = bitmap->autoload;
    bitmap->autoload = false;
    bdrv_release_dirty_bitmap(bs, bitmap);

    return successor;
@@ -666,19 +661,6 @@ bool bdrv_has_readonly_bitmaps(BlockDriverState *bs)
    return false;
}

/* Called with BQL taken. */
void bdrv_dirty_bitmap_set_autoload(BdrvDirtyBitmap *bitmap, bool autoload)
{
    qemu_mutex_lock(bitmap->mutex);
    bitmap->autoload = autoload;
    qemu_mutex_unlock(bitmap->mutex);
}

bool bdrv_dirty_bitmap_get_autoload(const BdrvDirtyBitmap *bitmap)
{
    return bitmap->autoload;
}

/* Called with BQL taken. */
void bdrv_dirty_bitmap_set_persistance(BdrvDirtyBitmap *bitmap, bool persistent)
{
+7 −5
Original line number Diff line number Diff line
@@ -933,14 +933,14 @@ static void set_readonly_helper(gpointer bitmap, gpointer value)
    bdrv_dirty_bitmap_set_readonly(bitmap, (bool)value);
}

/* qcow2_load_autoloading_dirty_bitmaps()
/* qcow2_load_dirty_bitmaps()
 * Return value is a hint for caller: true means that the Qcow2 header was
 * updated. (false doesn't mean that the header should be updated by the
 * caller, it just means that updating was not needed or the image cannot be
 * written to).
 * On failure the function returns false.
 */
bool qcow2_load_autoloading_dirty_bitmaps(BlockDriverState *bs, Error **errp)
bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, Error **errp)
{
    BDRVQcow2State *s = bs->opaque;
    Qcow2BitmapList *bm_list;
@@ -960,14 +960,16 @@ bool qcow2_load_autoloading_dirty_bitmaps(BlockDriverState *bs, Error **errp)
    }

    QSIMPLEQ_FOREACH(bm, bm_list, entry) {
        if ((bm->flags & BME_FLAG_AUTO) && !(bm->flags & BME_FLAG_IN_USE)) {
        if (!(bm->flags & BME_FLAG_IN_USE)) {
            BdrvDirtyBitmap *bitmap = load_bitmap(bs, bm, errp);
            if (bitmap == NULL) {
                goto fail;
            }

            if (!(bm->flags & BME_FLAG_AUTO)) {
                bdrv_disable_dirty_bitmap(bitmap);
            }
            bdrv_dirty_bitmap_set_persistance(bitmap, true);
            bdrv_dirty_bitmap_set_autoload(bitmap, true);
            bm->flags |= BME_FLAG_IN_USE;
            created_dirty_bitmaps =
                    g_slist_append(created_dirty_bitmaps, bitmap);
@@ -1369,7 +1371,7 @@ void qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp)
            bm->table.size = 0;
            QSIMPLEQ_INSERT_TAIL(&drop_tables, tb, entry);
        }
        bm->flags = bdrv_dirty_bitmap_get_autoload(bitmap) ? BME_FLAG_AUTO : 0;
        bm->flags = bdrv_dirty_bitmap_enabled(bitmap) ? BME_FLAG_AUTO : 0;
        bm->granularity_bits = ctz32(bdrv_dirty_bitmap_granularity(bitmap));
        bm->dirty_bitmap = bitmap;
    }
+40 −40
Original line number Diff line number Diff line
@@ -39,26 +39,23 @@ struct Qcow2Cache {
    Qcow2CachedTable       *entries;
    struct Qcow2Cache      *depends;
    int                     size;
    int                     table_size;
    bool                    depends_on_flush;
    void                   *table_array;
    uint64_t                lru_counter;
    uint64_t                cache_clean_lru_counter;
};

static inline void *qcow2_cache_get_table_addr(BlockDriverState *bs,
                    Qcow2Cache *c, int table)
static inline void *qcow2_cache_get_table_addr(Qcow2Cache *c, int table)
{
    BDRVQcow2State *s = bs->opaque;
    return (uint8_t *) c->table_array + (size_t) table * s->cluster_size;
    return (uint8_t *) c->table_array + (size_t) table * c->table_size;
}

static inline int qcow2_cache_get_table_idx(BlockDriverState *bs,
                  Qcow2Cache *c, void *table)
static inline int qcow2_cache_get_table_idx(Qcow2Cache *c, void *table)
{
    BDRVQcow2State *s = bs->opaque;
    ptrdiff_t table_offset = (uint8_t *) table - (uint8_t *) c->table_array;
    int idx = table_offset / s->cluster_size;
    assert(idx >= 0 && idx < c->size && table_offset % s->cluster_size == 0);
    int idx = table_offset / c->table_size;
    assert(idx >= 0 && idx < c->size && table_offset % c->table_size == 0);
    return idx;
}

@@ -74,15 +71,13 @@ static inline const char *qcow2_cache_get_name(BDRVQcow2State *s, Qcow2Cache *c)
    }
}

static void qcow2_cache_table_release(BlockDriverState *bs, Qcow2Cache *c,
                                      int i, int num_tables)
static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables)
{
/* Using MADV_DONTNEED to discard memory is a Linux-specific feature */
#ifdef CONFIG_LINUX
    BDRVQcow2State *s = bs->opaque;
    void *t = qcow2_cache_get_table_addr(bs, c, i);
    void *t = qcow2_cache_get_table_addr(c, i);
    int align = getpagesize();
    size_t mem_size = (size_t) s->cluster_size * num_tables;
    size_t mem_size = (size_t) c->table_size * num_tables;
    size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
    size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
    if (mem_size > offset && length > 0) {
@@ -98,7 +93,7 @@ static inline bool can_clean_entry(Qcow2Cache *c, int i)
        t->lru_counter <= c->cache_clean_lru_counter;
}

void qcow2_cache_clean_unused(BlockDriverState *bs, Qcow2Cache *c)
void qcow2_cache_clean_unused(Qcow2Cache *c)
{
    int i = 0;
    while (i < c->size) {
@@ -118,23 +113,30 @@ void qcow2_cache_clean_unused(BlockDriverState *bs, Qcow2Cache *c)
        }

        if (to_clean > 0) {
            qcow2_cache_table_release(bs, c, i - to_clean, to_clean);
            qcow2_cache_table_release(c, i - to_clean, to_clean);
        }
    }

    c->cache_clean_lru_counter = c->lru_counter;
}

Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
                               unsigned table_size)
{
    BDRVQcow2State *s = bs->opaque;
    Qcow2Cache *c;

    assert(num_tables > 0);
    assert(is_power_of_2(table_size));
    assert(table_size >= (1 << MIN_CLUSTER_BITS));
    assert(table_size <= s->cluster_size);

    c = g_new0(Qcow2Cache, 1);
    c->size = num_tables;
    c->table_size = table_size;
    c->entries = g_try_new0(Qcow2CachedTable, num_tables);
    c->table_array = qemu_try_blockalign(bs->file->bs,
                                         (size_t) num_tables * s->cluster_size);
                                         (size_t) num_tables * c->table_size);

    if (!c->entries || !c->table_array) {
        qemu_vfree(c->table_array);
@@ -146,7 +148,7 @@ Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
    return c;
}

int qcow2_cache_destroy(BlockDriverState *bs, Qcow2Cache *c)
int qcow2_cache_destroy(Qcow2Cache *c)
{
    int i;

@@ -203,13 +205,13 @@ static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)

    if (c == s->refcount_block_cache) {
        ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
                c->entries[i].offset, s->cluster_size);
                c->entries[i].offset, c->table_size);
    } else if (c == s->l2_table_cache) {
        ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
                c->entries[i].offset, s->cluster_size);
                c->entries[i].offset, c->table_size);
    } else {
        ret = qcow2_pre_write_overlap_check(bs, 0,
                c->entries[i].offset, s->cluster_size);
                c->entries[i].offset, c->table_size);
    }

    if (ret < 0) {
@@ -223,7 +225,7 @@ static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
    }

    ret = bdrv_pwrite(bs->file, c->entries[i].offset,
                      qcow2_cache_get_table_addr(bs, c, i), s->cluster_size);
                      qcow2_cache_get_table_addr(c, i), c->table_size);
    if (ret < 0) {
        return ret;
    }
@@ -309,7 +311,7 @@ int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
        c->entries[i].lru_counter = 0;
    }

    qcow2_cache_table_release(bs, c, 0, c->size);
    qcow2_cache_table_release(c, 0, c->size);

    c->lru_counter = 0;

@@ -331,7 +333,7 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
    trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
                          offset, read_from_disk);

    if (offset_into_cluster(s, offset)) {
    if (!QEMU_IS_ALIGNED(offset, c->table_size)) {
        qcow2_signal_corruption(bs, true, -1, -1, "Cannot get entry from %s "
                                "cache: Offset %#" PRIx64 " is unaligned",
                                qcow2_cache_get_name(s, c), offset);
@@ -339,7 +341,7 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
    }

    /* Check if the table is already cached */
    i = lookup_index = (offset / s->cluster_size * 4) % c->size;
    i = lookup_index = (offset / c->table_size * 4) % c->size;
    do {
        const Qcow2CachedTable *t = &c->entries[i];
        if (t->offset == offset) {
@@ -379,8 +381,8 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
        }

        ret = bdrv_pread(bs->file, offset,
                         qcow2_cache_get_table_addr(bs, c, i),
                         s->cluster_size);
                         qcow2_cache_get_table_addr(c, i),
                         c->table_size);
        if (ret < 0) {
            return ret;
        }
@@ -391,7 +393,7 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
    /* And return the right table */
found:
    c->entries[i].ref++;
    *table = qcow2_cache_get_table_addr(bs, c, i);
    *table = qcow2_cache_get_table_addr(c, i);

    trace_qcow2_cache_get_done(qemu_coroutine_self(),
                               c == s->l2_table_cache, i);
@@ -411,9 +413,9 @@ int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
    return qcow2_cache_do_get(bs, c, offset, table, false);
}

void qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
void qcow2_cache_put(Qcow2Cache *c, void **table)
{
    int i = qcow2_cache_get_table_idx(bs, c, *table);
    int i = qcow2_cache_get_table_idx(c, *table);

    c->entries[i].ref--;
    *table = NULL;
@@ -425,30 +427,28 @@ void qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
    assert(c->entries[i].ref >= 0);
}

void qcow2_cache_entry_mark_dirty(BlockDriverState *bs, Qcow2Cache *c,
     void *table)
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
{
    int i = qcow2_cache_get_table_idx(bs, c, table);
    int i = qcow2_cache_get_table_idx(c, table);
    assert(c->entries[i].offset != 0);
    c->entries[i].dirty = true;
}

void *qcow2_cache_is_table_offset(BlockDriverState *bs, Qcow2Cache *c,
                                  uint64_t offset)
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset)
{
    int i;

    for (i = 0; i < c->size; i++) {
        if (c->entries[i].offset == offset) {
            return qcow2_cache_get_table_addr(bs, c, i);
            return qcow2_cache_get_table_addr(c, i);
        }
    }
    return NULL;
}

void qcow2_cache_discard(BlockDriverState *bs, Qcow2Cache *c, void *table)
void qcow2_cache_discard(Qcow2Cache *c, void *table)
{
    int i = qcow2_cache_get_table_idx(bs, c, table);
    int i = qcow2_cache_get_table_idx(c, table);

    assert(c->entries[i].ref == 0);

@@ -456,5 +456,5 @@ void qcow2_cache_discard(BlockDriverState *bs, Qcow2Cache *c, void *table)
    c->entries[i].lru_counter = 0;
    c->entries[i].dirty = false;

    qcow2_cache_table_release(bs, c, i, 1);
    qcow2_cache_table_release(c, i, 1);
}
+267 −242

File changed.

Preview size limit exceeded, changes collapsed.

+107 −99
Original line number Diff line number Diff line
@@ -277,7 +277,7 @@ int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
    block_index = cluster_index & (s->refcount_block_size - 1);
    *refcount = s->get_refcount(refcount_block, block_index);

    qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
    qcow2_cache_put(s->refcount_block_cache, &refcount_block);

    return 0;
}
@@ -421,7 +421,7 @@ static int alloc_refcount_block(BlockDriverState *bs,

    /* Now the new refcount block needs to be written to disk */
    BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
    qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block);
    qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
    ret = qcow2_cache_flush(bs, s->refcount_block_cache);
    if (ret < 0) {
        goto fail;
@@ -449,7 +449,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
        return -EAGAIN;
    }

    qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
    qcow2_cache_put(s->refcount_block_cache, refcount_block);

    /*
     * If we come here, we need to grow the refcount table. Again, a new
@@ -501,7 +501,7 @@ static int alloc_refcount_block(BlockDriverState *bs,

fail:
    if (*refcount_block != NULL) {
        qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
        qcow2_cache_put(s->refcount_block_cache, refcount_block);
    }
    return ret;
}
@@ -623,7 +623,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
                goto fail;
            }
            memset(refblock_data, 0, s->cluster_size);
            qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
            qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
                                         refblock_data);

            new_table[i] = block_offset;
@@ -656,11 +656,11 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
                s->set_refcount(refblock_data, j, 1);
            }

            qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
            qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
                                         refblock_data);
        }

        qcow2_cache_put(bs, s->refcount_block_cache, &refblock_data);
        qcow2_cache_put(s->refcount_block_cache, &refblock_data);
    }

    assert(block_offset == table_offset);
@@ -836,7 +836,7 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
        /* Load the refcount block and allocate it if needed */
        if (table_index != old_table_index) {
            if (refcount_block) {
                qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
                qcow2_cache_put(s->refcount_block_cache, &refcount_block);
            }
            ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
            if (ret < 0) {
@@ -845,8 +845,7 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
        }
        old_table_index = table_index;

        qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
                                     refcount_block);
        qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);

        /* we can update the count and save it */
        block_index = cluster_index & (s->refcount_block_size - 1);
@@ -872,16 +871,16 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
        if (refcount == 0) {
            void *table;

            table = qcow2_cache_is_table_offset(bs, s->refcount_block_cache,
            table = qcow2_cache_is_table_offset(s->refcount_block_cache,
                                                offset);
            if (table != NULL) {
                qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
                qcow2_cache_discard(bs, s->refcount_block_cache, table);
                qcow2_cache_put(s->refcount_block_cache, &refcount_block);
                qcow2_cache_discard(s->refcount_block_cache, table);
            }

            table = qcow2_cache_is_table_offset(bs, s->l2_table_cache, offset);
            table = qcow2_cache_is_table_offset(s->l2_table_cache, offset);
            if (table != NULL) {
                qcow2_cache_discard(bs, s->l2_table_cache, table);
                qcow2_cache_discard(s->l2_table_cache, table);
            }

            if (s->discard_passthrough[type]) {
@@ -898,7 +897,7 @@ fail:

    /* Write last changed block to disk */
    if (refcount_block) {
        qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
        qcow2_cache_put(s->refcount_block_cache, &refcount_block);
    }

    /*
@@ -1184,17 +1183,20 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
    int64_t l1_table_offset, int l1_size, int addend)
{
    BDRVQcow2State *s = bs->opaque;
    uint64_t *l1_table, *l2_table, l2_offset, entry, l1_size2, refcount;
    uint64_t *l1_table, *l2_slice, l2_offset, entry, l1_size2, refcount;
    bool l1_allocated = false;
    int64_t old_entry, old_l2_offset;
    unsigned slice, slice_size2, n_slices;
    int i, j, l1_modified = 0, nb_csectors;
    int ret;

    assert(addend >= -1 && addend <= 1);

    l2_table = NULL;
    l2_slice = NULL;
    l1_table = NULL;
    l1_size2 = l1_size * sizeof(uint64_t);
    slice_size2 = s->l2_slice_size * sizeof(uint64_t);
    n_slices = s->cluster_size / slice_size2;

    s->cache_discards = true;

@@ -1237,17 +1239,19 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
                goto fail;
            }

            ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
                (void**) &l2_table);
            for (slice = 0; slice < n_slices; slice++) {
                ret = qcow2_cache_get(bs, s->l2_table_cache,
                                      l2_offset + slice * slice_size2,
                                      (void **) &l2_slice);
                if (ret < 0) {
                    goto fail;
                }

            for (j = 0; j < s->l2_size; j++) {
                for (j = 0; j < s->l2_slice_size; j++) {
                    uint64_t cluster_index;
                    uint64_t offset;

                entry = be64_to_cpu(l2_table[j]);
                    entry = be64_to_cpu(l2_slice[j]);
                    old_entry = entry;
                    entry &= ~QCOW_OFLAG_COPIED;
                    offset = entry & L2E_OFFSET_MASK;
@@ -1257,8 +1261,8 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
                        nb_csectors = ((entry >> s->csize_shift) &
                                       s->csize_mask) + 1;
                        if (addend != 0) {
                        ret = update_refcount(bs,
                                (entry & s->cluster_offset_mask) & ~511,
                            ret = update_refcount(
                                bs, (entry & s->cluster_offset_mask) & ~511,
                                nb_csectors * 512, abs(addend), addend < 0,
                                QCOW2_DISCARD_SNAPSHOT);
                            if (ret < 0) {
@@ -1272,11 +1276,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
                    case QCOW2_CLUSTER_NORMAL:
                    case QCOW2_CLUSTER_ZERO_ALLOC:
                        if (offset_into_cluster(s, offset)) {
                        qcow2_signal_corruption(bs, true, -1, -1, "Cluster "
                            /* Here l2_index means table (not slice) index */
                            int l2_index = slice * s->l2_slice_size + j;
                            qcow2_signal_corruption(
                                bs, true, -1, -1, "Cluster "
                                "allocation offset %#" PRIx64
                                " unaligned (L2 offset: %#"
                                PRIx64 ", L2 index: %#x)",
                                                offset, l2_offset, j);
                                offset, l2_offset, l2_index);
                            ret = -EIO;
                            goto fail;
                        }
@@ -1284,8 +1291,8 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
                        cluster_index = offset >> s->cluster_bits;
                        assert(cluster_index);
                        if (addend != 0) {
                        ret = qcow2_update_cluster_refcount(bs,
                                    cluster_index, abs(addend), addend < 0,
                            ret = qcow2_update_cluster_refcount(
                                bs, cluster_index, abs(addend), addend < 0,
                                QCOW2_DISCARD_SNAPSHOT);
                            if (ret < 0) {
                                goto fail;
@@ -1315,13 +1322,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
                            qcow2_cache_set_dependency(bs, s->l2_table_cache,
                                                       s->refcount_block_cache);
                        }
                    l2_table[j] = cpu_to_be64(entry);
                    qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
                                                 l2_table);
                        l2_slice[j] = cpu_to_be64(entry);
                        qcow2_cache_entry_mark_dirty(s->l2_table_cache,
                                                     l2_slice);
                    }
                }

            qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
                qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
            }

            if (addend != 0) {
                ret = qcow2_update_cluster_refcount(bs, l2_offset >>
@@ -1348,8 +1356,8 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,

    ret = bdrv_flush(bs);
fail:
    if (l2_table) {
        qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
    if (l2_slice) {
        qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
    }

    s->cache_discards = false;
@@ -2849,7 +2857,7 @@ static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
                                    new_reftable_size, new_refblock,
                                    new_refblock_empty, allocated, errp);
                    if (ret < 0) {
                        qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
                        qcow2_cache_put(s->refcount_block_cache, &refblock);
                        return ret;
                    }

@@ -2862,7 +2870,7 @@ static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
                if (new_refcount_bits < 64 && refcount >> new_refcount_bits) {
                    uint64_t offset;

                    qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
                    qcow2_cache_put(s->refcount_block_cache, &refblock);

                    offset = ((reftable_index << s->refcount_block_bits)
                              + refblock_index) << s->cluster_bits;
@@ -2883,7 +2891,7 @@ static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
                new_refblock_empty = new_refblock_empty && refcount == 0;
            }

            qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
            qcow2_cache_put(s->refcount_block_cache, &refblock);
        } else {
            /* No refblock means every refcount is 0 */
            for (refblock_index = 0; refblock_index < s->refcount_block_size;
@@ -3175,24 +3183,24 @@ static int qcow2_discard_refcount_block(BlockDriverState *bs,
                                offset_to_reftable_index(s, discard_block_offs),
                                discard_block_offs,
                                s->get_refcount(refblock, block_index));
        qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
        qcow2_cache_put(s->refcount_block_cache, &refblock);
        return -EINVAL;
    }
    s->set_refcount(refblock, block_index, 0);

    qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, refblock);
    qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refblock);

    qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
    qcow2_cache_put(s->refcount_block_cache, &refblock);

    if (cluster_index < s->free_cluster_index) {
        s->free_cluster_index = cluster_index;
    }

    refblock = qcow2_cache_is_table_offset(bs, s->refcount_block_cache,
    refblock = qcow2_cache_is_table_offset(s->refcount_block_cache,
                                           discard_block_offs);
    if (refblock) {
        /* discard refblock from the cache if refblock is cached */
        qcow2_cache_discard(bs, s->refcount_block_cache, refblock);
        qcow2_cache_discard(s->refcount_block_cache, refblock);
    }
    update_refcount_discard(bs, discard_block_offs, s->cluster_size);

@@ -3235,7 +3243,7 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
        } else {
            unused_block = buffer_is_zero(refblock, s->cluster_size);
        }
        qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
        qcow2_cache_put(s->refcount_block_cache, &refblock);

        reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]);
    }
Loading