Commit 9c74a853 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging



# gpg: Signature made Mon 29 Feb 2016 20:08:16 GMT using RSA key ID C0DE3057
# gpg: Good signature from "Jeffrey Cody <jcody@redhat.com>"
# gpg:                 aka "Jeffrey Cody <jeff@codyprime.org>"
# gpg:                 aka "Jeffrey Cody <codyprime@gmail.com>"

* remotes/cody/tags/block-pull-request:
  iotests/124: Add cluster_size mismatch test
  block/backup: avoid copying less than full target clusters
  block/backup: make backup cluster size configurable
  mirror: Add mirror_wait_for_io
  mirror: Rewrite mirror_iteration
  vhdx: Simplify vhdx_set_shift_bits()
  vhdx: DIV_ROUND_UP() in vhdx_calc_bat_entries()
  iscsi: add support for getting CHAP password via QCryptoSecret API
  curl: add support for HTTP authentication parameters
  rbd: add support for getting password from QCryptoSecret object
  sheepdog: allow to delete snapshot
  block/nfs: add support for setting debug level

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 071608b5 cc199b16
Loading
Loading
Loading
Loading
+57 −30
Original line number Diff line number Diff line
@@ -21,10 +21,7 @@
#include "qemu/ratelimit.h"
#include "sysemu/block-backend.h"

#define BACKUP_CLUSTER_BITS 16
#define BACKUP_CLUSTER_SIZE (1 << BACKUP_CLUSTER_BITS)
#define BACKUP_SECTORS_PER_CLUSTER (BACKUP_CLUSTER_SIZE / BDRV_SECTOR_SIZE)

#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
#define SLICE_TIME 100000000ULL /* ns */

typedef struct CowRequest {
@@ -46,9 +43,16 @@ typedef struct BackupBlockJob {
    CoRwlock flush_rwlock;
    uint64_t sectors_read;
    HBitmap *bitmap;
    int64_t cluster_size;
    QLIST_HEAD(, CowRequest) inflight_reqs;
} BackupBlockJob;

/* Size of a cluster in sectors, instead of bytes. */
static inline int64_t cluster_size_sectors(BackupBlockJob *job)
{
  return job->cluster_size / BDRV_SECTOR_SIZE;
}

/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
                                                       int64_t start,
@@ -97,13 +101,14 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs,
    QEMUIOVector bounce_qiov;
    void *bounce_buffer = NULL;
    int ret = 0;
    int64_t sectors_per_cluster = cluster_size_sectors(job);
    int64_t start, end;
    int n;

    qemu_co_rwlock_rdlock(&job->flush_rwlock);

    start = sector_num / BACKUP_SECTORS_PER_CLUSTER;
    end = DIV_ROUND_UP(sector_num + nb_sectors, BACKUP_SECTORS_PER_CLUSTER);
    start = sector_num / sectors_per_cluster;
    end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);

    trace_backup_do_cow_enter(job, start, sector_num, nb_sectors);

@@ -118,12 +123,12 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs,

        trace_backup_do_cow_process(job, start);

        n = MIN(BACKUP_SECTORS_PER_CLUSTER,
        n = MIN(sectors_per_cluster,
                job->common.len / BDRV_SECTOR_SIZE -
                start * BACKUP_SECTORS_PER_CLUSTER);
                start * sectors_per_cluster);

        if (!bounce_buffer) {
            bounce_buffer = qemu_blockalign(bs, BACKUP_CLUSTER_SIZE);
            bounce_buffer = qemu_blockalign(bs, job->cluster_size);
        }
        iov.iov_base = bounce_buffer;
        iov.iov_len = n * BDRV_SECTOR_SIZE;
@@ -131,10 +136,10 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs,

        if (is_write_notifier) {
            ret = bdrv_co_readv_no_serialising(bs,
                                           start * BACKUP_SECTORS_PER_CLUSTER,
                                           start * sectors_per_cluster,
                                           n, &bounce_qiov);
        } else {
            ret = bdrv_co_readv(bs, start * BACKUP_SECTORS_PER_CLUSTER, n,
            ret = bdrv_co_readv(bs, start * sectors_per_cluster, n,
                                &bounce_qiov);
        }
        if (ret < 0) {
@@ -147,11 +152,11 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs,

        if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
            ret = bdrv_co_write_zeroes(job->target,
                                       start * BACKUP_SECTORS_PER_CLUSTER,
                                       start * sectors_per_cluster,
                                       n, BDRV_REQ_MAY_UNMAP);
        } else {
            ret = bdrv_co_writev(job->target,
                                 start * BACKUP_SECTORS_PER_CLUSTER, n,
                                 start * sectors_per_cluster, n,
                                 &bounce_qiov);
        }
        if (ret < 0) {
@@ -322,21 +327,22 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
    int64_t cluster;
    int64_t end;
    int64_t last_cluster = -1;
    int64_t sectors_per_cluster = cluster_size_sectors(job);
    BlockDriverState *bs = job->common.bs;
    HBitmapIter hbi;

    granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
    clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1);
    clusters_per_iter = MAX((granularity / job->cluster_size), 1);
    bdrv_dirty_iter_init(job->sync_bitmap, &hbi);

    /* Find the next dirty sector(s) */
    while ((sector = hbitmap_iter_next(&hbi)) != -1) {
        cluster = sector / BACKUP_SECTORS_PER_CLUSTER;
        cluster = sector / sectors_per_cluster;

        /* Fake progress updates for any clusters we skipped */
        if (cluster != last_cluster + 1) {
            job->common.offset += ((cluster - last_cluster - 1) *
                                   BACKUP_CLUSTER_SIZE);
                                   job->cluster_size);
        }

        for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
@@ -344,8 +350,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
                if (yield_and_check(job)) {
                    return ret;
                }
                ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER,
                                    BACKUP_SECTORS_PER_CLUSTER, &error_is_read,
                ret = backup_do_cow(bs, cluster * sectors_per_cluster,
                                    sectors_per_cluster, &error_is_read,
                                    false);
                if ((ret < 0) &&
                    backup_error_action(job, error_is_read, -ret) ==
@@ -357,17 +363,17 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)

        /* If the bitmap granularity is smaller than the backup granularity,
         * we need to advance the iterator pointer to the next cluster. */
        if (granularity < BACKUP_CLUSTER_SIZE) {
            bdrv_set_dirty_iter(&hbi, cluster * BACKUP_SECTORS_PER_CLUSTER);
        if (granularity < job->cluster_size) {
            bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
        }

        last_cluster = cluster - 1;
    }

    /* Play some final catchup with the progress meter */
    end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE);
    end = DIV_ROUND_UP(job->common.len, job->cluster_size);
    if (last_cluster + 1 < end) {
        job->common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE);
        job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
    }

    return ret;
@@ -384,13 +390,14 @@ static void coroutine_fn backup_run(void *opaque)
        .notify = backup_before_write_notify,
    };
    int64_t start, end;
    int64_t sectors_per_cluster = cluster_size_sectors(job);
    int ret = 0;

    QLIST_INIT(&job->inflight_reqs);
    qemu_co_rwlock_init(&job->flush_rwlock);

    start = 0;
    end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE);
    end = DIV_ROUND_UP(job->common.len, job->cluster_size);

    job->bitmap = hbitmap_alloc(end, 0);

@@ -427,7 +434,7 @@ static void coroutine_fn backup_run(void *opaque)
                /* Check to see if these blocks are already in the
                 * backing file. */

                for (i = 0; i < BACKUP_SECTORS_PER_CLUSTER;) {
                for (i = 0; i < sectors_per_cluster;) {
                    /* bdrv_is_allocated() only returns true/false based
                     * on the first set of sectors it comes across that
                     * are are all in the same state.
@@ -436,8 +443,8 @@ static void coroutine_fn backup_run(void *opaque)
                     * needed but at some point that is always the case. */
                    alloced =
                        bdrv_is_allocated(bs,
                                start * BACKUP_SECTORS_PER_CLUSTER + i,
                                BACKUP_SECTORS_PER_CLUSTER - i, &n);
                                start * sectors_per_cluster + i,
                                sectors_per_cluster - i, &n);
                    i += n;

                    if (alloced == 1 || n == 0) {
@@ -452,8 +459,8 @@ static void coroutine_fn backup_run(void *opaque)
                }
            }
            /* FULL sync mode we copy the whole drive. */
            ret = backup_do_cow(bs, start * BACKUP_SECTORS_PER_CLUSTER,
                    BACKUP_SECTORS_PER_CLUSTER, &error_is_read, false);
            ret = backup_do_cow(bs, start * sectors_per_cluster,
                                sectors_per_cluster, &error_is_read, false);
            if (ret < 0) {
                /* Depending on error action, fail now or retry cluster */
                BlockErrorAction action =
@@ -494,6 +501,8 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
                  BlockJobTxn *txn, Error **errp)
{
    int64_t len;
    BlockDriverInfo bdi;
    int ret;

    assert(bs);
    assert(target);
@@ -563,14 +572,32 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
        goto error;
    }

    bdrv_op_block_all(target, job->common.blocker);

    job->on_source_error = on_source_error;
    job->on_target_error = on_target_error;
    job->target = target;
    job->sync_mode = sync_mode;
    job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
                       sync_bitmap : NULL;

    /* If there is no backing file on the target, we cannot rely on COW if our
     * backup cluster size is smaller than the target cluster size. Even for
     * targets with a backing file, try to avoid COW if possible. */
    ret = bdrv_get_info(job->target, &bdi);
    if (ret < 0 && !target->backing) {
        error_setg_errno(errp, -ret,
            "Couldn't determine the cluster size of the target image, "
            "which has no backing file");
        error_append_hint(errp,
            "Aborting, since this may create an unusable destination image\n");
        goto error;
    } else if (ret < 0 && target->backing) {
        /* Not fatal; just trudge on ahead. */
        job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
    } else {
        job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
    }

    bdrv_op_block_all(target, job->common.blocker);
    job->common.len = len;
    job->common.co = qemu_coroutine_create(backup_run);
    block_job_txn_add_job(txn, &job->common);
+66 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include "block/block_int.h"
#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qstring.h"
#include "crypto/secret.h"
#include <curl/curl.h>

// #define DEBUG_CURL
@@ -78,6 +79,10 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle,
#define CURL_BLOCK_OPT_SSLVERIFY "sslverify"
#define CURL_BLOCK_OPT_TIMEOUT "timeout"
#define CURL_BLOCK_OPT_COOKIE    "cookie"
#define CURL_BLOCK_OPT_USERNAME "username"
#define CURL_BLOCK_OPT_PASSWORD_SECRET "password-secret"
#define CURL_BLOCK_OPT_PROXY_USERNAME "proxy-username"
#define CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET "proxy-password-secret"

struct BDRVCURLState;

@@ -120,6 +125,10 @@ typedef struct BDRVCURLState {
    char *cookie;
    bool accept_range;
    AioContext *aio_context;
    char *username;
    char *password;
    char *proxyusername;
    char *proxypassword;
} BDRVCURLState;

static void curl_clean_state(CURLState *s);
@@ -419,6 +428,21 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
        curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg);
        curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1);

        if (s->username) {
            curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username);
        }
        if (s->password) {
            curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password);
        }
        if (s->proxyusername) {
            curl_easy_setopt(state->curl,
                             CURLOPT_PROXYUSERNAME, s->proxyusername);
        }
        if (s->proxypassword) {
            curl_easy_setopt(state->curl,
                             CURLOPT_PROXYPASSWORD, s->proxypassword);
        }

        /* Restrict supported protocols to avoid security issues in the more
         * obscure protocols.  For example, do not allow POP3/SMTP/IMAP see
         * CVE-2013-0249.
@@ -525,10 +549,31 @@ static QemuOptsList runtime_opts = {
            .type = QEMU_OPT_STRING,
            .help = "Pass the cookie or list of cookies with each request"
        },
        {
            .name = CURL_BLOCK_OPT_USERNAME,
            .type = QEMU_OPT_STRING,
            .help = "Username for HTTP auth"
        },
        {
            .name = CURL_BLOCK_OPT_PASSWORD_SECRET,
            .type = QEMU_OPT_STRING,
            .help = "ID of secret used as password for HTTP auth",
        },
        {
            .name = CURL_BLOCK_OPT_PROXY_USERNAME,
            .type = QEMU_OPT_STRING,
            .help = "Username for HTTP proxy auth"
        },
        {
            .name = CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET,
            .type = QEMU_OPT_STRING,
            .help = "ID of secret used as password for HTTP proxy auth",
        },
        { /* end of list */ }
    },
};


static int curl_open(BlockDriverState *bs, QDict *options, int flags,
                     Error **errp)
{
@@ -539,6 +584,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
    const char *file;
    const char *cookie;
    double d;
    const char *secretid;

    static int inited = 0;

@@ -580,6 +626,26 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
        goto out_noclean;
    }

    s->username = g_strdup(qemu_opt_get(opts, CURL_BLOCK_OPT_USERNAME));
    secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PASSWORD_SECRET);

    if (secretid) {
        s->password = qcrypto_secret_lookup_as_utf8(secretid, errp);
        if (!s->password) {
            goto out_noclean;
        }
    }

    s->proxyusername = g_strdup(
        qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_USERNAME));
    secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET);
    if (secretid) {
        s->proxypassword = qcrypto_secret_lookup_as_utf8(secretid, errp);
        if (!s->proxypassword) {
            goto out_noclean;
        }
    }

    if (!inited) {
        curl_global_init(CURL_GLOBAL_ALL);
        inited = 1;
+23 −1
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
#include "sysemu/sysemu.h"
#include "qmp-commands.h"
#include "qapi/qmp/qstring.h"
#include "crypto/secret.h"

#include <iscsi/iscsi.h>
#include <iscsi/scsi-lowlevel.h>
@@ -1080,6 +1081,8 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target,
    QemuOpts *opts;
    const char *user = NULL;
    const char *password = NULL;
    const char *secretid;
    char *secret = NULL;

    list = qemu_find_opts("iscsi");
    if (!list) {
@@ -1099,8 +1102,20 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target,
        return;
    }

    secretid = qemu_opt_get(opts, "password-secret");
    password = qemu_opt_get(opts, "password");
    if (!password) {
    if (secretid && password) {
        error_setg(errp, "'password' and 'password-secret' properties are "
                   "mutually exclusive");
        return;
    }
    if (secretid) {
        secret = qcrypto_secret_lookup_as_utf8(secretid, errp);
        if (!secret) {
            return;
        }
        password = secret;
    } else if (!password) {
        error_setg(errp, "CHAP username specified but no password was given");
        return;
    }
@@ -1108,6 +1123,8 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target,
    if (iscsi_set_initiator_username_pwd(iscsi, user, password)) {
        error_setg(errp, "Failed to set initiator username and password");
    }

    g_free(secret);
}

static void parse_header_digest(struct iscsi_context *iscsi, const char *target,
@@ -1857,6 +1874,11 @@ static QemuOptsList qemu_iscsi_opts = {
            .name = "password",
            .type = QEMU_OPT_STRING,
            .help = "password for CHAP authentication to target",
        },{
            .name = "password-secret",
            .type = QEMU_OPT_STRING,
            .help = "ID of the secret providing password for CHAP "
                    "authentication to target",
        },{
            .name = "header-digest",
            .type = QEMU_OPT_STRING,
+212 −141

File changed.

Preview size limit exceeded, changes collapsed.

+12 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#include <nfsc/libnfs.h>

#define QEMU_NFS_MAX_READAHEAD_SIZE 1048576
#define QEMU_NFS_MAX_DEBUG_LEVEL 2

typedef struct NFSClient {
    struct nfs_context *context;
@@ -333,6 +334,17 @@ static int64_t nfs_client_open(NFSClient *client, const char *filename,
                val = QEMU_NFS_MAX_READAHEAD_SIZE;
            }
            nfs_set_readahead(client->context, val);
#endif
#ifdef LIBNFS_FEATURE_DEBUG
        } else if (!strcmp(qp->p[i].name, "debug")) {
            /* limit the maximum debug level to avoid potential flooding
             * of our log files. */
            if (val > QEMU_NFS_MAX_DEBUG_LEVEL) {
                error_report("NFS Warning: Limiting NFS debug level"
                             " to %d", QEMU_NFS_MAX_DEBUG_LEVEL);
                val = QEMU_NFS_MAX_DEBUG_LEVEL;
            }
            nfs_set_debug(client->context, val);
#endif
        } else {
            error_setg(errp, "Unknown NFS parameter name: %s",
Loading