Commit aa8dc044 authored by Juan Quintela's avatar Juan Quintela
Browse files

migration: synchronize memory bitmap 64bits at a time



We use the old code if the bitmaps are not aligned

Signed-off-by: default avatarJuan Quintela <quintela@redhat.com>
Reviewed-by: default avatarOrit Wasserman <owasserm@redhat.com>
parent 791fa2a2
Loading
Loading
Loading
Loading
+29 −9
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@
#include "exec/cpu-all.h"
#include "exec/ram_addr.h"
#include "hw/acpi/acpi.h"
#include "qemu/host-utils.h"

#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
@@ -376,7 +377,25 @@ static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
    ram_addr_t addr;
    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);

    /* start address is aligned at the start of a word? */
    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
        int k;
        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];

        for (k = page; k < page + nr; k++) {
            if (src[k]) {
                unsigned long new_dirty;
                new_dirty = ~migration_bitmap[k];
                migration_bitmap[k] |= src[k];
                new_dirty &= src[k];
                migration_dirty_pages += ctpopl(new_dirty);
                src[k] = 0;
            }
        }
    } else {
        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
            if (cpu_physical_memory_get_dirty(start + addr,
                                              TARGET_PAGE_SIZE,
@@ -388,6 +407,7 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
            }
        }
    }
}


/* Needs iothread lock! */