Commit 65dacaa0 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Dr. David Alan Gilbert
Browse files

migration: introduce save_normal_page()



It directly sends the page to the stream neither checking zero nor
using xbzrle or compression

Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180330075128.26919-10-xiaoguangrong@tencent.com>
Signed-off-by: default avatarDr. David Alan Gilbert <dgilbert@redhat.com>
parent d7400a34
Loading
Loading
Loading
Loading
+30 −20
Original line number Diff line number Diff line
@@ -1012,6 +1012,34 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
    return true;
}

/*
 * directly send the page to the stream
 *
 * Returns the number of pages written.
 *
 * @rs: current RAM state
 * @block: block that contains the page we want to send
 * @offset: offset inside the block for the page
 * @buf: the page to be sent
 * @async: send to page asyncly
 */
static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
                            uint8_t *buf, bool async)
{
    ram_counters.transferred += save_page_header(rs, rs->f, block,
                                                 offset | RAM_SAVE_FLAG_PAGE);
    if (async) {
        qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
                              migrate_release_ram() &
                              migration_in_postcopy());
    } else {
        qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
    }
    ram_counters.transferred += TARGET_PAGE_SIZE;
    ram_counters.normal++;
    return 1;
}

/**
 * ram_save_page: send the given page to the stream
 *
@@ -1052,18 +1080,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)

    /* XBZRLE overflow or normal page */
    if (pages == -1) {
        ram_counters.transferred +=
            save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
        if (send_async) {
            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                                  migrate_release_ram() &
                                  migration_in_postcopy());
        } else {
            qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
        }
        ram_counters.transferred += TARGET_PAGE_SIZE;
        pages = 1;
        ram_counters.normal++;
        pages = save_normal_page(rs, block, offset, p, send_async);
    }

    XBZRLE_cache_unlock();
@@ -1194,14 +1211,7 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
         * we post it as normal page as compression will take much
         * CPU resource.
         */
        ram_counters.transferred += save_page_header(rs, rs->f, block,
                                        offset | RAM_SAVE_FLAG_PAGE);
        qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                              migrate_release_ram() &
                              migration_in_postcopy());
        ram_counters.transferred += TARGET_PAGE_SIZE;
        ram_counters.normal++;
        pages = 1;
        pages = save_normal_page(rs, block, offset, p, true);
    } else {
        pages = compress_page_with_multi_thread(rs, block, offset);
    }