Commit a8ec91f9 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Dr. David Alan Gilbert
Browse files

migration: move calling control_save_page to the common place



The function is called by both ram_save_page and ram_save_target_page,
so move it to the common caller to cleanup the code

Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180330075128.26919-8-xiaoguangrong@tencent.com>
Signed-off-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
parent 1faa5665
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -1037,10 +1037,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
    p = block->host + offset;
    trace_ram_save_page(block->idstr, (uint64_t)offset, p);

    if (control_save_page(rs, block, offset, &pages)) {
        return pages;
    }

    XBZRLE_cache_lock();
    pages = save_zero_page(rs, block, offset);
    if (pages > 0) {
@@ -1198,10 +1194,6 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,

    p = block->host + offset;

    if (control_save_page(rs, block, offset, &pages)) {
        return pages;
    }

    /* When starting the process of a new block, the first page of
     * the block should be sent out before other pages in the same
     * block, and all the pages in last block should have been sent
@@ -1489,6 +1481,14 @@ err:
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
                                bool last_stage)
{
    RAMBlock *block = pss->block;
    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
    int res;

    if (control_save_page(rs, block, offset, &res)) {
        return res;
    }

    /*
     * If xbzrle is on, stop using the data compression after first
     * round of migration even if compression is enabled. In theory,