Commit 27deebe8 authored by Frediano Ziglio's avatar Frediano Ziglio Committed by Kevin Wolf
Browse files

qcow: Remove QCowAIOCB



Embed qcow_aio_read_cb into qcow_co_readv and qcow_aio_write_cb into qcow_co_writev

Signed-off-by: default avatarFrediano Ziglio <freddy77@gmail.com>
Signed-off-by: default avatarKevin Wolf <kwolf@redhat.com>
parent 43ca85b5
Loading
Loading
Loading
Loading
+123 −168
Original line number Diff line number Diff line
@@ -488,89 +488,66 @@ static int qcow_read(BlockDriverState *bs, int64_t sector_num,
}
#endif

typedef struct QCowAIOCB {
    BlockDriverState *bs;
    int64_t sector_num;
    QEMUIOVector *qiov;
    uint8_t *buf;
    void *orig_buf;
    int nb_sectors;
} QCowAIOCB;

static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        int is_write, QCowAIOCB *acb)
{
    acb->bs = bs;
    acb->sector_num = sector_num;
    acb->qiov = qiov;

    if (qiov->niov > 1) {
        acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size);
        if (is_write)
            qemu_iovec_to_buffer(qiov, acb->buf);
    } else {
        acb->orig_buf = NULL;
        acb->buf = (uint8_t *)qiov->iov->iov_base;
    }
    acb->nb_sectors = nb_sectors;
    return acb;
}

static int qcow_aio_read_cb(QCowAIOCB *acb)
static int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
                         int nb_sectors, QEMUIOVector *qiov)
{
    BlockDriverState *bs = acb->bs;
    BDRVQcowState *s = bs->opaque;
    int index_in_cluster;
    int ret, n;
    int ret = 0, n;
    uint64_t cluster_offset;
    struct iovec hd_iov;
    QEMUIOVector hd_qiov;
    uint8_t *buf;
    void *orig_buf;

 redo:
    if (acb->nb_sectors == 0) {
        /* request completed */
        return 0;
    if (qiov->niov > 1) {
        buf = orig_buf = qemu_blockalign(bs, qiov->size);
    } else {
        orig_buf = NULL;
        buf = (uint8_t *)qiov->iov->iov_base;
    }

    qemu_co_mutex_lock(&s->lock);

    while (nb_sectors != 0) {
        /* prepare next request */
    cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
        cluster_offset = get_cluster_offset(bs, sector_num << 9,
                                                 0, 0, 0, 0);
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
        n = s->cluster_sectors - index_in_cluster;
    if (n > acb->nb_sectors) {
        n = acb->nb_sectors;
        if (n > nb_sectors) {
            n = nb_sectors;
        }

        if (!cluster_offset) {
            if (bs->backing_hd) {
                /* read from the base image */
            hd_iov.iov_base = (void *)acb->buf;
                hd_iov.iov_base = (void *)buf;
                hd_iov.iov_len = n * 512;
                qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
                qemu_co_mutex_unlock(&s->lock);
            ret = bdrv_co_readv(bs->backing_hd, acb->sector_num,
                ret = bdrv_co_readv(bs->backing_hd, sector_num,
                                    n, &hd_qiov);
                qemu_co_mutex_lock(&s->lock);
                if (ret < 0) {
                return -EIO;
                    goto fail;
                }
            } else {
                /* Note: in this case, no need to wait */
            memset(acb->buf, 0, 512 * n);
                memset(buf, 0, 512 * n);
            }
        } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
            /* add AIO support for compressed blocks ? */
            if (decompress_cluster(bs, cluster_offset) < 0) {
            return -EIO;
                goto fail;
            }
        memcpy(acb->buf,
            memcpy(buf,
                   s->cluster_cache + index_in_cluster * 512, 512 * n);
        } else {
            if ((cluster_offset & 511) != 0) {
            return -EIO;
                goto fail;
            }
        hd_iov.iov_base = (void *)acb->buf;
            hd_iov.iov_base = (void *)buf;
            hd_iov.iov_len = n * 512;
            qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
            qemu_co_mutex_unlock(&s->lock);
@@ -579,89 +556,85 @@ static int qcow_aio_read_cb(QCowAIOCB *acb)
                                n, &hd_qiov);
            qemu_co_mutex_lock(&s->lock);
            if (ret < 0) {
            return ret;
                break;
            }
    }

    /* post process the read buffer */
    if (!cluster_offset) {
        /* nothing to do */
    } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
        /* nothing to do */
    } else {
            if (s->crypt_method) {
            encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
                encrypt_sectors(s, sector_num, buf, buf,
                                n, 0,
                                &s->aes_decrypt_key);
            }
        }
        ret = 0;

    acb->nb_sectors -= n;
    acb->sector_num += n;
    acb->buf += n * 512;

    goto redo;
        nb_sectors -= n;
        sector_num += n;
        buf += n * 512;
    }

static int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
                         int nb_sectors, QEMUIOVector *qiov)
{
    BDRVQcowState *s = bs->opaque;
    QCowAIOCB acb;
    int ret;

    qcow_aio_setup(bs, sector_num, qiov, nb_sectors, 0, &acb);

    qemu_co_mutex_lock(&s->lock);
    ret = qcow_aio_read_cb(&acb);
done:
    qemu_co_mutex_unlock(&s->lock);

    if (acb.qiov->niov > 1) {
        qemu_iovec_from_buffer(acb.qiov, acb.orig_buf, acb.qiov->size);
        qemu_vfree(acb.orig_buf);
    if (qiov->niov > 1) {
        qemu_iovec_from_buffer(qiov, orig_buf, qiov->size);
        qemu_vfree(orig_buf);
    }

    return ret;

fail:
    ret = -EIO;
    goto done;
}

static int qcow_aio_write_cb(QCowAIOCB *acb)
static int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
                          int nb_sectors, QEMUIOVector *qiov)
{
    BlockDriverState *bs = acb->bs;
    BDRVQcowState *s = bs->opaque;
    int index_in_cluster;
    uint64_t cluster_offset;
    const uint8_t *src_buf;
    int ret, n;
    int ret = 0, n;
    uint8_t *cluster_data = NULL;
    struct iovec hd_iov;
    QEMUIOVector hd_qiov;
    uint8_t *buf;
    void *orig_buf;

redo:
    if (acb->nb_sectors == 0) {
        /* request completed */
        return 0;
    s->cluster_cache_offset = -1; /* disable compressed cache */

    if (qiov->niov > 1) {
        buf = orig_buf = qemu_blockalign(bs, qiov->size);
        qemu_iovec_to_buffer(qiov, buf);
    } else {
        orig_buf = NULL;
        buf = (uint8_t *)qiov->iov->iov_base;
    }

    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
    qemu_co_mutex_lock(&s->lock);

    while (nb_sectors != 0) {

        index_in_cluster = sector_num & (s->cluster_sectors - 1);
        n = s->cluster_sectors - index_in_cluster;
    if (n > acb->nb_sectors) {
        n = acb->nb_sectors;
        if (n > nb_sectors) {
            n = nb_sectors;
        }
    cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
        cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
                                            index_in_cluster,
                                            index_in_cluster + n);
        if (!cluster_offset || (cluster_offset & 511) != 0) {
        return -EIO;
            ret = -EIO;
            break;
        }
        if (s->crypt_method) {
            if (!cluster_data) {
                cluster_data = g_malloc0(s->cluster_size);
            }
        encrypt_sectors(s, acb->sector_num, cluster_data, acb->buf,
            encrypt_sectors(s, sector_num, cluster_data, buf,
                            n, 1, &s->aes_encrypt_key);
            src_buf = cluster_data;
        } else {
        src_buf = acb->buf;
            src_buf = buf;
        }

        hd_iov.iov_base = (void *)src_buf;
@@ -671,40 +644,22 @@ redo:
        ret = bdrv_co_writev(bs->file,
                             (cluster_offset >> 9) + index_in_cluster,
                             n, &hd_qiov);
    if (cluster_data) {
        free(cluster_data);
        cluster_data = NULL;
    }
        qemu_co_mutex_lock(&s->lock);
        if (ret < 0) {
        return ret;
            break;
        }
        ret = 0;

    acb->nb_sectors -= n;
    acb->sector_num += n;
    acb->buf += n * 512;

    goto redo;
        nb_sectors -= n;
        sector_num += n;
        buf += n * 512;
    }

static int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
                          int nb_sectors, QEMUIOVector *qiov)
{
    BDRVQcowState *s = bs->opaque;
    QCowAIOCB acb;
    int ret;

    s->cluster_cache_offset = -1; /* disable compressed cache */

    qcow_aio_setup(bs, sector_num, qiov, nb_sectors, 1, &acb);

    qemu_co_mutex_lock(&s->lock);
    ret = qcow_aio_write_cb(&acb);
    qemu_co_mutex_unlock(&s->lock);

    if (acb.qiov->niov > 1) {
        qemu_vfree(acb.orig_buf);
    if (qiov->niov > 1) {
        qemu_vfree(orig_buf);
    }
    free(cluster_data);

    return ret;
}