Commit b6b23081 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190123a' into staging



Migration pull 2019-01-23

New pages-per-second stat, a new test, and a bunch
of fixes and tidy ups.

# gpg: Signature made Wed 23 Jan 2019 15:54:48 GMT
# gpg:                using RSA key 0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20190123a:
  migration: introduce pages-per-second
  vmstate: constify SaveVMHandlers
  tests: add /vmstate/simple/array
  migration/rdma: unregister fd handler
  migration: unify error handling for process_incoming_migration_co
  migration: add more error handling for postcopy_ram_enable_notify
  migration: multifd_save_cleanup() can't fail, simplify
  migration: fix the multifd code when receiving less channels
  Fix segmentation fault when qemu_signal_init fails

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 3e821195 aecbfe9c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -236,6 +236,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
                       info->ram->page_size >> 10);
        monitor_printf(mon, "multifd bytes: %" PRIu64 " kbytes\n",
                       info->ram->multifd_bytes >> 10);
        monitor_printf(mon, "pages-per-second: %" PRIu64 "\n",
                       info->ram->pages_per_second);

        if (info->ram->dirty_pages_rate) {
            monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
+1 −1
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ int register_savevm_live(DeviceState *dev,
                         const char *idstr,
                         int instance_id,
                         int version_id,
                         SaveVMHandlers *ops,
                         const SaveVMHandlers *ops,
                         void *opaque);

void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
+6 −5
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@
void migration_channel_process_incoming(QIOChannel *ioc)
{
    MigrationState *s = migrate_get_current();
    Error *local_err = NULL;

    trace_migration_set_incoming_channel(
        ioc, object_get_typename(OBJECT(ioc)));
@@ -38,14 +39,14 @@ void migration_channel_process_incoming(QIOChannel *ioc)
        *s->parameters.tls_creds &&
        !object_dynamic_cast(OBJECT(ioc),
                             TYPE_QIO_CHANNEL_TLS)) {
        Error *local_err = NULL;
        migration_tls_channel_process_incoming(s, ioc, &local_err);
    } else {
        migration_ioc_process_incoming(ioc, &local_err);
    }

    if (local_err) {
        error_report_err(local_err);
    }
    } else {
        migration_ioc_process_incoming(ioc);
    }
}


+31 −20
Original line number Diff line number Diff line
@@ -438,15 +438,13 @@ static void process_incoming_migration_co(void *opaque)
        /* Make sure all file formats flush their mutable metadata */
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                    MIGRATION_STATUS_FAILED);
            error_report_err(local_err);
            exit(EXIT_FAILURE);
            goto fail;
        }

        if (colo_init_ram_cache() < 0) {
            error_report("Init ram cache failed");
            exit(EXIT_FAILURE);
            goto fail;
        }

        qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
@@ -461,21 +459,23 @@ static void process_incoming_migration_co(void *opaque)
    }

    if (ret < 0) {
        Error *local_err = NULL;

        error_report("load of migration failed: %s", strerror(-ret));
        goto fail;
    }
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
    mis->migration_incoming_co = NULL;
    return;
fail:
    local_err = NULL;
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_FAILED);
        error_report("load of migration failed: %s", strerror(-ret));
    qemu_fclose(mis->from_src_file);
    if (multifd_load_cleanup(&local_err) != 0) {
        error_report_err(local_err);
    }
    exit(EXIT_FAILURE);
}
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
    mis->migration_incoming_co = NULL;
}

static void migration_incoming_setup(QEMUFile *f)
{
@@ -541,7 +541,7 @@ void migration_fd_process_incoming(QEMUFile *f)
    migration_incoming_process();
}

void migration_ioc_process_incoming(QIOChannel *ioc)
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
{
    MigrationIncomingState *mis = migration_incoming_get_current();
    bool start_migration;
@@ -563,9 +563,14 @@ void migration_ioc_process_incoming(QIOChannel *ioc)
         */
        start_migration = !migrate_use_multifd();
    } else {
        Error *local_err = NULL;
        /* Multiple connections */
        assert(migrate_use_multifd());
        start_migration = multifd_recv_new_channel(ioc);
        start_migration = multifd_recv_new_channel(ioc, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
        }
    }

    if (start_migration) {
@@ -777,6 +782,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
    info->ram->postcopy_requests = ram_counters.postcopy_requests;
    info->ram->page_size = qemu_target_page_size();
    info->ram->multifd_bytes = ram_counters.multifd_bytes;
    info->ram->pages_per_second = s->pages_per_second;

    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
@@ -1381,7 +1387,6 @@ static void migrate_fd_cleanup(void *opaque)
    qemu_savevm_state_cleanup();

    if (s->to_dst_file) {
        Error *local_err = NULL;
        QEMUFile *tmp;

        trace_migrate_fd_cleanup();
@@ -1392,9 +1397,7 @@ static void migrate_fd_cleanup(void *opaque)
        }
        qemu_mutex_lock_iothread();

        if (multifd_save_cleanup(&local_err) != 0) {
            error_report_err(local_err);
        }
        multifd_save_cleanup();
        qemu_mutex_lock(&s->qemu_file_lock);
        tmp = s->to_dst_file;
        s->to_dst_file = NULL;
@@ -1563,6 +1566,7 @@ void migrate_init(MigrationState *s)
    s->rp_state.from_dst_file = NULL;
    s->rp_state.error = false;
    s->mbps = 0.0;
    s->pages_per_second = 0.0;
    s->downtime = 0;
    s->expected_downtime = 0;
    s->setup_time = 0;
@@ -2881,7 +2885,7 @@ static void migration_calculate_complete(MigrationState *s)
static void migration_update_counters(MigrationState *s,
                                      int64_t current_time)
{
    uint64_t transferred, time_spent;
    uint64_t transferred, transferred_pages, time_spent;
    uint64_t current_bytes; /* bytes transferred since the beginning */
    double bandwidth;

@@ -2898,6 +2902,11 @@ static void migration_update_counters(MigrationState *s,
    s->mbps = (((double) transferred * 8.0) /
               ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;

    transferred_pages = ram_get_total_transferred_pages() -
                            s->iteration_initial_pages;
    s->pages_per_second = (double) transferred_pages /
                             (((double) time_spent / 1000.0));

    /*
     * if we haven't sent anything, we don't want to
     * recalculate. 10000 is a small enough number for our purposes
@@ -2910,6 +2919,7 @@ static void migration_update_counters(MigrationState *s,

    s->iteration_start_time = current_time;
    s->iteration_initial_bytes = current_bytes;
    s->iteration_initial_pages = ram_get_total_transferred_pages();

    trace_migrate_transferred(transferred, time_spent,
                              bandwidth, s->threshold_size);
@@ -3314,6 +3324,7 @@ static void migration_instance_init(Object *obj)

    ms->state = MIGRATION_STATUS_NONE;
    ms->mbps = -1;
    ms->pages_per_second = -1;
    qemu_sem_init(&ms->pause_sem, 0);
    qemu_mutex_init(&ms->error_mutex);

+10 −2
Original line number Diff line number Diff line
@@ -126,7 +126,13 @@ struct MigrationState
     */
    QemuSemaphore rate_limit_sem;

    /* bytes already send at the beggining of current interation */
    /* pages already send at the beginning of current iteration */
    uint64_t iteration_initial_pages;

    /* pages transferred per second */
    double pages_per_second;

    /* bytes already send at the beginning of current iteration */
    uint64_t iteration_initial_bytes;
    /* time at the start of current iteration */
    int64_t iteration_start_time;
@@ -229,7 +235,7 @@ struct MigrationState
void migrate_set_state(int *state, int old_state, int new_state);

void migration_fd_process_incoming(QEMUFile *f);
void migration_ioc_process_incoming(QIOChannel *ioc);
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
void migration_incoming_process(void);

bool  migration_has_all_channels(void);
@@ -271,6 +277,8 @@ bool migrate_use_block_incremental(void);
int migrate_max_cpu_throttle(void);
bool migrate_use_return_path(void);

uint64_t ram_get_total_transferred_pages(void);

bool migrate_use_compression(void);
int migrate_compress_level(void);
int migrate_compress_threads(void);
Loading