Commit be8b02ed authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Dr. David Alan Gilbert
Browse files

migration: fix calculating xbzrle_counters.cache_miss_rate



As Peter pointed out:
| - xbzrle_counters.cache_miss is done in save_xbzrle_page(), so it's
|   per-guest-page granularity
|
| - RAMState.iterations is done for each ram_find_and_save_block(), so
|   it's per-host-page granularity
|
| An example is that when we migrate a 2M huge page in the guest, we
| will only increase the RAMState.iterations by 1 (since
| ram_find_and_save_block() will be called once), but we might increase
| xbzrle_counters.cache_miss for 2M/4K=512 times (we'll call
| save_xbzrle_page() that many times) if all the pages got cache miss.
| Then IMHO the cache miss rate will be 512/1=51200% (while it should
| actually be just 100% cache miss).

And he also suggested as xbzrle_counters.cache_miss_rate is the only
user of rs->iterations we can adapt it to count target guest page
numbers

After that, rename 'iterations' to 'target_page_count' to better reflect
its meaning

Suggested-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarJuan Quintela <quintela@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180903092644.25812-3-xiaoguangrong@tencent.com>
Signed-off-by: default avatarJuan Quintela <quintela@redhat.com>
Signed-off-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
parent 449f91b2
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -301,10 +301,10 @@ struct RAMState {
    uint64_t num_dirty_pages_period;
    /* xbzrle misses since the beginning of the period */
    uint64_t xbzrle_cache_miss_prev;
    /* number of iterations at the beginning of period */
    uint64_t iterations_prev;
    /* Iterations since start */
    uint64_t iterations;
    /* total handled target pages at the beginning of period */
    uint64_t target_page_count_prev;
    /* total handled target pages since start */
    uint64_t target_page_count;
    /* number of dirty bits in the bitmap */
    uint64_t migration_dirty_pages;
    /* protects modification of the bitmap */
@@ -1592,19 +1592,19 @@ uint64_t ram_pagesize_summary(void)

static void migration_update_rates(RAMState *rs, int64_t end_time)
{
    uint64_t iter_count = rs->iterations - rs->iterations_prev;
    uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;

    /* calculate period counters */
    ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
                / (end_time - rs->time_last_bitmap_sync);

    if (!iter_count) {
    if (!page_count) {
        return;
    }

    if (migrate_use_xbzrle()) {
        xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
            rs->xbzrle_cache_miss_prev) / iter_count;
            rs->xbzrle_cache_miss_prev) / page_count;
        rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
    }
}
@@ -1662,7 +1662,7 @@ static void migration_bitmap_sync(RAMState *rs)

        migration_update_rates(rs, end_time);

        rs->iterations_prev = rs->iterations;
        rs->target_page_count_prev = rs->target_page_count;

        /* reset period counters */
        rs->time_last_bitmap_sync = end_time;
@@ -3196,7 +3196,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
            done = 1;
            break;
        }
        rs->iterations++;
        rs->target_page_count += pages;

        /* we want to check in the 1st loop, just in case it was the 1st time
           and we had to sync the dirty bitmap.