Commit 059ff0fb authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Dr. David Alan Gilbert
Browse files

migration: introduce control_save_page()



Abstract the common function control_save_page() to cleanup the code,
no logic is changed

Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180330075128.26919-6-xiaoguangrong@tencent.com>
Signed-off-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
parent 34ab9e97
Loading
Loading
Loading
Loading
+89 −85
Original line number Diff line number Diff line
@@ -974,6 +974,44 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
    ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
}

/*
 * @pages: the number of pages written by the control path,
 *        < 0 - error
 *        > 0 - number of pages written
 *
 * Return true if the pages has been saved, otherwise false is returned.
 */
static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
                              int *pages)
{
    uint64_t bytes_xmit = 0;
    int ret;

    *pages = -1;
    ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
                                &bytes_xmit);
    if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
        return false;
    }

    if (bytes_xmit) {
        ram_counters.transferred += bytes_xmit;
        *pages = 1;
    }

    if (ret == RAM_SAVE_CONTROL_DELAYED) {
        return true;
    }

    if (bytes_xmit > 0) {
        ram_counters.normal++;
    } else if (bytes_xmit == 0) {
        ram_counters.duplicate++;
    }

    return true;
}

/**
 * ram_save_page: send the given page to the stream
 *
@@ -990,39 +1028,20 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
{
    int pages = -1;
    uint64_t bytes_xmit;
    ram_addr_t current_addr;
    uint8_t *p;
    int ret;
    bool send_async = true;
    RAMBlock *block = pss->block;
    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
    ram_addr_t current_addr = block->offset + offset;

    p = block->host + offset;
    trace_ram_save_page(block->idstr, (uint64_t)offset, p);

    /* In doubt sent page as normal */
    bytes_xmit = 0;
    ret = ram_control_save_page(rs->f, block->offset,
                           offset, TARGET_PAGE_SIZE, &bytes_xmit);
    if (bytes_xmit) {
        ram_counters.transferred += bytes_xmit;
        pages = 1;
    if (control_save_page(rs, block, offset, &pages)) {
        return pages;
    }

    XBZRLE_cache_lock();

    current_addr = block->offset + offset;

    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
        if (ret != RAM_SAVE_CONTROL_DELAYED) {
            if (bytes_xmit > 0) {
                ram_counters.normal++;
            } else if (bytes_xmit == 0) {
                ram_counters.duplicate++;
            }
        }
    } else {
    pages = save_zero_page(rs, block, offset);
    if (pages > 0) {
        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
@@ -1041,7 +1060,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
            send_async = false;
        }
    }
    }

    /* XBZRLE overflow or normal page */
    if (pages == -1) {
@@ -1174,29 +1192,16 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
                                    bool last_stage)
{
    int pages = -1;
    uint64_t bytes_xmit = 0;
    uint8_t *p;
    int ret;
    RAMBlock *block = pss->block;
    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;

    p = block->host + offset;

    ret = ram_control_save_page(rs->f, block->offset,
                                offset, TARGET_PAGE_SIZE, &bytes_xmit);
    if (bytes_xmit) {
        ram_counters.transferred += bytes_xmit;
        pages = 1;
    }
    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
        if (ret != RAM_SAVE_CONTROL_DELAYED) {
            if (bytes_xmit > 0) {
                ram_counters.normal++;
            } else if (bytes_xmit == 0) {
                ram_counters.duplicate++;
            }
    if (control_save_page(rs, block, offset, &pages)) {
        return pages;
    }
    } else {

    /* When starting the process of a new block, the first page of
     * the block should be sent out before other pages in the same
     * block, and all the pages in last block should have been sent
@@ -1232,7 +1237,6 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
            ram_release_pages(block->idstr, offset, pages);
        }
    }
    }

    return pages;
}