Commit 042938f4 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20180926a' into staging



Migration pull 2018-09-26

This supercedes Juan's pull from the 13th

# gpg: Signature made Wed 26 Sep 2018 18:07:30 BST
# gpg:                using RSA key 0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20180926a:
  migration/ram.c: Avoid taking address of fields in packed MultiFDInit_t struct
  migration: fix the compression code
  migration: fix QEMUFile leak
  tests/migration: Speed up the test on ppc64
  migration: cleanup in error paths in loadvm
  migration/postcopy: Clear have_listen_thread
  tests/migration: Add migration-test header file
  tests/migration: Support cross compilation in generating boot header file
  tests/migration: Convert x86 boot block compilation script into Makefile
  migration: use save_page_use_compression in flush_compressed_data
  migration: show the statistics of compression
  migration: do not flush_compressed_data at the end of iteration
  Add a hint message to loadvm and exits on failure
  migration: handle the error condition properly
  migration: fix calculating xbzrle_counters.cache_miss_rate
  migration/rdma: Fix uninitialised rdma_return_path

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 567ea808 341ba0df
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -271,6 +271,19 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
                       info->xbzrle_cache->overflow);
    }

    if (info->has_compression) {
        monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
                       info->compression->pages);
        monitor_printf(mon, "compression busy: %" PRIu64 "\n",
                       info->compression->busy);
        monitor_printf(mon, "compression busy rate: %0.2f\n",
                       info->compression->busy_rate);
        monitor_printf(mon, "compressed size: %" PRIu64 "\n",
                       info->compression->compressed_size);
        monitor_printf(mon, "compression rate: %0.2f\n",
                       info->compression->compression_rate);
    }

    if (info->has_cpu_throttle_percentage) {
        monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
                       info->cpu_throttle_percentage);
+16 −1
Original line number Diff line number Diff line
@@ -758,6 +758,18 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
        info->xbzrle_cache->overflow = xbzrle_counters.overflow;
    }

    if (migrate_use_compression()) {
        info->has_compression = true;
        info->compression = g_malloc0(sizeof(*info->compression));
        info->compression->pages = compression_counters.pages;
        info->compression->busy = compression_counters.busy;
        info->compression->busy_rate = compression_counters.busy_rate;
        info->compression->compressed_size =
                                    compression_counters.compressed_size;
        info->compression->compression_rate =
                                    compression_counters.compression_rate;
    }

    if (cpu_throttle_active()) {
        info->has_cpu_throttle_percentage = true;
        info->cpu_throttle_percentage = cpu_throttle_get_percentage();
@@ -2268,7 +2280,10 @@ out:
             */
            if (postcopy_pause_return_path_thread(ms)) {
                /* Reload rp, reset the rest */
                if (rp != ms->rp_state.from_dst_file) {
                    qemu_fclose(rp);
                    rp = ms->rp_state.from_dst_file;
                }
                ms->rp_state.error = false;
                goto retry;
            }
+91 −42
Original line number Diff line number Diff line
@@ -301,10 +301,19 @@ struct RAMState {
    uint64_t num_dirty_pages_period;
    /* xbzrle misses since the beginning of the period */
    uint64_t xbzrle_cache_miss_prev;
    /* number of iterations at the beginning of period */
    uint64_t iterations_prev;
    /* Iterations since start */
    uint64_t iterations;

    /* compression statistics since the beginning of the period */
    /* amount of count that no free thread to compress data */
    uint64_t compress_thread_busy_prev;
    /* amount bytes after compression */
    uint64_t compressed_size_prev;
    /* amount of compressed pages */
    uint64_t compress_pages_prev;

    /* total handled target pages at the beginning of period */
    uint64_t target_page_count_prev;
    /* total handled target pages since start */
    uint64_t target_page_count;
    /* number of dirty bits in the bitmap */
    uint64_t migration_dirty_pages;
    /* protects modification of the bitmap */
@@ -338,6 +347,8 @@ struct PageSearchStatus {
};
typedef struct PageSearchStatus PageSearchStatus;

CompressionStats compression_counters;

struct CompressParam {
    bool done;
    bool quit;
@@ -420,28 +431,14 @@ static void *do_data_compress(void *opaque)
    return NULL;
}

static inline void terminate_compression_threads(void)
{
    int idx, thread_count;

    thread_count = migrate_compress_threads();

    for (idx = 0; idx < thread_count; idx++) {
        qemu_mutex_lock(&comp_param[idx].mutex);
        comp_param[idx].quit = true;
        qemu_cond_signal(&comp_param[idx].cond);
        qemu_mutex_unlock(&comp_param[idx].mutex);
    }
}

static void compress_threads_save_cleanup(void)
{
    int i, thread_count;

    if (!migrate_use_compression()) {
    if (!migrate_use_compression() || !comp_param) {
        return;
    }
    terminate_compression_threads();

    thread_count = migrate_compress_threads();
    for (i = 0; i < thread_count; i++) {
        /*
@@ -451,6 +448,12 @@ static void compress_threads_save_cleanup(void)
        if (!comp_param[i].file) {
            break;
        }

        qemu_mutex_lock(&comp_param[i].mutex);
        comp_param[i].quit = true;
        qemu_cond_signal(&comp_param[i].cond);
        qemu_mutex_unlock(&comp_param[i].mutex);

        qemu_thread_join(compress_threads + i);
        qemu_mutex_destroy(&comp_param[i].mutex);
        qemu_cond_destroy(&comp_param[i].cond);
@@ -648,8 +651,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
        return -1;
    }

    be32_to_cpus(&msg.magic);
    be32_to_cpus(&msg.version);
    msg.magic = be32_to_cpu(msg.magic);
    msg.version = be32_to_cpu(msg.version);

    if (msg.magic != MULTIFD_MAGIC) {
        error_setg(errp, "multifd: received packet magic %x "
@@ -734,7 +737,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
    RAMBlock *block;
    int i;

    be32_to_cpus(&packet->magic);
    packet->magic = be32_to_cpu(packet->magic);
    if (packet->magic != MULTIFD_MAGIC) {
        error_setg(errp, "multifd: received packet "
                   "magic %x and expected magic %x",
@@ -742,7 +745,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
        return -1;
    }

    be32_to_cpus(&packet->version);
    packet->version = be32_to_cpu(packet->version);
    if (packet->version != MULTIFD_VERSION) {
        error_setg(errp, "multifd: received packet "
                   "version %d and expected version %d",
@@ -752,7 +755,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)

    p->flags = be32_to_cpu(packet->flags);

    be32_to_cpus(&packet->size);
    packet->size = be32_to_cpu(packet->size);
    if (packet->size > migrate_multifd_page_count()) {
        error_setg(errp, "multifd: received packet "
                   "with size %d and expected maximum size %d",
@@ -1592,21 +1595,42 @@ uint64_t ram_pagesize_summary(void)

static void migration_update_rates(RAMState *rs, int64_t end_time)
{
    uint64_t iter_count = rs->iterations - rs->iterations_prev;
    uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
    double compressed_size;

    /* calculate period counters */
    ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
                / (end_time - rs->time_last_bitmap_sync);

    if (!iter_count) {
    if (!page_count) {
        return;
    }

    if (migrate_use_xbzrle()) {
        xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
            rs->xbzrle_cache_miss_prev) / iter_count;
            rs->xbzrle_cache_miss_prev) / page_count;
        rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
    }

    if (migrate_use_compression()) {
        compression_counters.busy_rate = (double)(compression_counters.busy -
            rs->compress_thread_busy_prev) / page_count;
        rs->compress_thread_busy_prev = compression_counters.busy;

        compressed_size = compression_counters.compressed_size -
                          rs->compressed_size_prev;
        if (compressed_size) {
            double uncompressed_size = (compression_counters.pages -
                                    rs->compress_pages_prev) * TARGET_PAGE_SIZE;

            /* Compression-Ratio = Uncompressed-size / Compressed-size */
            compression_counters.compression_rate =
                                        uncompressed_size / compressed_size;

            rs->compress_pages_prev = compression_counters.pages;
            rs->compressed_size_prev = compression_counters.compressed_size;
        }
    }
}

static void migration_bitmap_sync(RAMState *rs)
@@ -1662,7 +1686,7 @@ static void migration_bitmap_sync(RAMState *rs)

        migration_update_rates(rs, end_time);

        rs->iterations_prev = rs->iterations;
        rs->target_page_count_prev = rs->target_page_count;

        /* reset period counters */
        rs->time_last_bitmap_sync = end_time;
@@ -1888,17 +1912,25 @@ exit:
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
    ram_counters.transferred += bytes_xmit;

    if (param->zero_page) {
        ram_counters.duplicate++;
        return;
    }
    ram_counters.transferred += bytes_xmit;

    /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
    compression_counters.compressed_size += bytes_xmit - 8;
    compression_counters.pages++;
}

static bool save_page_use_compression(RAMState *rs);

static void flush_compressed_data(RAMState *rs)
{
    int idx, len, thread_count;

    if (!migrate_use_compression()) {
    if (!save_page_use_compression(rs)) {
        return;
    }
    thread_count = migrate_compress_threads();
@@ -1996,17 +2028,22 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
        pss->page = 0;
        pss->block = QLIST_NEXT_RCU(pss->block, next);
        if (!pss->block) {
            /*
             * If memory migration starts over, we will meet a dirtied page
             * which may still exists in compression threads's ring, so we
             * should flush the compressed data to make sure the new page
             * is not overwritten by the old one in the destination.
             *
             * Also If xbzrle is on, stop using the data compression at this
             * point. In theory, xbzrle can do better than compression.
             */
            flush_compressed_data(rs);

            /* Hit the end of the list */
            pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
            /* Flag that we've looped */
            pss->complete_round = true;
            rs->ram_bulk_stage = false;
            if (migrate_use_xbzrle()) {
                /* If xbzrle is on, stop using the data compression at this
                 * point. In theory, xbzrle can do better than compression.
                 */
                flush_compressed_data(rs);
            }
        }
        /* Didn't find anything this time, but try again on the new block */
        *again = true;
@@ -2259,6 +2296,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
        return true;
    }

    compression_counters.busy++;
    return false;
}

@@ -2372,7 +2410,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
 *
 * Called within an RCU critical section.
 *
 * Returns the number of pages written where zero means no dirty pages
 * Returns the number of pages written where zero means no dirty pages,
 * or negative on error
 *
 * @rs: current RAM state
 * @last_stage: if we are at the completion stage
@@ -3196,7 +3235,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
            done = 1;
            break;
        }
        rs->iterations++;

        if (pages < 0) {
            qemu_file_set_error(f, pages);
            break;
        }

        rs->target_page_count += pages;

        /* we want to check in the 1st loop, just in case it was the 1st time
           and we had to sync the dirty bitmap.
@@ -3212,7 +3257,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
        }
        i++;
    }
    flush_compressed_data(rs);
    rcu_read_unlock();

    /*
@@ -3238,7 +3282,7 @@ out:
/**
 * ram_save_complete: function called to send the remaining amount of ram
 *
 * Returns zero to indicate success
 * Returns zero to indicate success or negative on error
 *
 * Called with iothread lock
 *
@@ -3249,6 +3293,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
{
    RAMState **temp = opaque;
    RAMState *rs = *temp;
    int ret = 0;

    rcu_read_lock();

@@ -3269,6 +3314,10 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
        if (pages == 0) {
            break;
        }
        if (pages < 0) {
            ret = pages;
            break;
        }
    }

    flush_compressed_data(rs);
@@ -3280,7 +3329,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
    qemu_fflush(f);

    return 0;
    return ret;
}

static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
+1 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@

extern MigrationStats ram_counters;
extern XBZRLECacheStats xbzrle_counters;
extern CompressionStats compression_counters;

int xbzrle_cache_resize(int64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);
+1 −1
Original line number Diff line number Diff line
@@ -4012,7 +4012,7 @@ static void rdma_accept_incoming_migration(void *opaque)
void rdma_start_incoming_migration(const char *host_port, Error **errp)
{
    int ret;
    RDMAContext *rdma, *rdma_return_path;
    RDMAContext *rdma, *rdma_return_path = NULL;
    Error *local_err = NULL;

    trace_rdma_start_incoming_migration();
Loading