Commit a68725f9 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/juanquintela/tags/migration-pull-request' into staging



Pull request

# gpg: Signature made Mon 15 Jul 2019 14:49:41 BST
# gpg:                using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full]
# gpg:                 aka "Juan Quintela <quintela@trasno.org>" [full]
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03  4B82 F487 EF18 5872 D723

* remotes/juanquintela/tags/migration-pull-request: (21 commits)
  migration: always initial RAMBlock.bmap to 1 for new migration
  migration/postcopy: remove redundant cpu_synchronize_all_post_init
  migration/postcopy: fix document of postcopy_send_discard_bm_ram()
  migration: allow private destination ram with x-ignore-shared
  migration: Split log_clear() into smaller chunks
  kvm: Support KVM_CLEAR_DIRTY_LOG
  kvm: Introduce slots lock for memory listener
  kvm: Persistent per kvmslot dirty bitmap
  kvm: Update comments for sync_dirty_bitmap
  memory: Introduce memory listener hook log_clear()
  memory: Pass mr into snapshot_and_clear_dirty
  bitmap: Add bitmap_copy_with_{src|dst}_offset()
  memory: Don't set migration bitmap when without migration
  migration: No need to take rcu during sync_dirty_bitmap
  migration/ram.c: reset complete_round when we gets a queued page
  migration/multifd: sync packet_num after all thread are done
  cutils: remove one unnecessary pointer operation
  migration/xbzrle: update cache and current_data in one place
  migration/multifd: call multifd_send_sync_main when sending RAM_SAVE_FLAG_EOS
  migration-test: rename parameter to parameter_int
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 0dc62847 40c4d4a8
Loading
Loading
Loading
Loading
+240 −20
Original line number Diff line number Diff line
@@ -91,6 +91,7 @@ struct KVMState
    int many_ioeventfds;
    int intx_set_mask;
    bool sync_mmu;
    bool manual_dirty_log_protect;
    /* The man page (and posix) say ioctl numbers are signed int, but
     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
     * unsigned, and treating them as signed here can break things */
@@ -138,6 +139,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
    KVM_CAP_LAST_INFO
};

#define kvm_slots_lock(kml)      qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml)    qemu_mutex_unlock(&(kml)->slots_lock)

int kvm_get_max_memslots(void)
{
    KVMState *s = KVM_STATE(current_machine->accelerator);
@@ -165,6 +169,7 @@ int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
    return 1;
}

/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
    KVMState *s = kvm_state;
@@ -182,10 +187,17 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
bool kvm_has_free_slot(MachineState *ms)
{
    KVMState *s = KVM_STATE(ms->accelerator);
    bool result;
    KVMMemoryListener *kml = &s->memory_listener;

    kvm_slots_lock(kml);
    result = !!kvm_get_free_slot(kml);
    kvm_slots_unlock(kml);

    return kvm_get_free_slot(&s->memory_listener);
    return result;
}

/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
{
    KVMSlot *slot = kvm_get_free_slot(kml);
@@ -244,18 +256,21 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
                                       hwaddr *phys_addr)
{
    KVMMemoryListener *kml = &s->memory_listener;
    int i;
    int i, ret = 0;

    kvm_slots_lock(kml);
    for (i = 0; i < s->nr_slots; i++) {
        KVMSlot *mem = &kml->slots[i];

        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
            *phys_addr = mem->start_addr + (ram - mem->ram);
            return 1;
            ret = 1;
            break;
        }
    }
    kvm_slots_unlock(kml);

    return 0;
    return ret;
}

static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
@@ -396,6 +411,7 @@ static int kvm_mem_flags(MemoryRegion *mr)
    return flags;
}

/* Called with KVMMemoryListener.slots_lock held */
static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
                                 MemoryRegion *mr)
{
@@ -414,19 +430,26 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
{
    hwaddr start_addr, size;
    KVMSlot *mem;
    int ret = 0;

    size = kvm_align_section(section, &start_addr);
    if (!size) {
        return 0;
    }

    kvm_slots_lock(kml);

    mem = kvm_lookup_matching_slot(kml, start_addr, size);
    if (!mem) {
        /* We don't have a slot if we want to trap every access. */
        return 0;
        goto out;
    }

    return kvm_slot_update_flags(kml, mem, section->mr);
    ret = kvm_slot_update_flags(kml, mem, section->mr);

out:
    kvm_slots_unlock(kml);
    return ret;
}

static void kvm_log_start(MemoryListener *listener,
@@ -478,13 +501,15 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))

/**
 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
 * This function updates qemu's dirty bitmap using
 * memory_region_set_dirty().  This means all bits are set
 * to dirty.
 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
 *
 * This function will first try to fetch dirty bitmap from the kernel,
 * and then updates qemu's dirty bitmap.
 *
 * @start_add: start of logged region.
 * @end_addr: end of logged region.
 * NOTE: caller must be with kml->slots_lock held.
 *
 * @kml: the KVM memory listener object
 * @section: the memory section to sync the dirty bitmap with
 */
static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
                                          MemoryRegionSection *section)
@@ -493,13 +518,14 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
    struct kvm_dirty_log d = {};
    KVMSlot *mem;
    hwaddr start_addr, size;
    int ret = 0;

    size = kvm_align_section(section, &start_addr);
    if (size) {
        mem = kvm_lookup_matching_slot(kml, start_addr, size);
        if (!mem) {
            /* We don't have a slot if we want to trap every access. */
            return 0;
            goto out;
        }

        /* XXX bad kernel interface alert
@@ -516,22 +542,178 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
         */
        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
                     /*HOST_LONG_BITS*/ 64) / 8;
        d.dirty_bitmap = g_malloc0(size);
        if (!mem->dirty_bmap) {
            /* Allocate on the first log_sync, once and for all */
            mem->dirty_bmap = g_malloc0(size);
        }

        d.dirty_bitmap = mem->dirty_bmap;
        d.slot = mem->slot | (kml->as_id << 16);
        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
            DPRINTF("ioctl failed %d\n", errno);
            g_free(d.dirty_bitmap);
            return -1;
            ret = -1;
            goto out;
        }

        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
        g_free(d.dirty_bitmap);
    }
out:
    return ret;
}

/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
#define KVM_CLEAR_LOG_SHIFT  6
#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)

/**
 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
 *
 * NOTE: this will be a no-op if we haven't enabled manual dirty log
 * protection in the host kernel because in that case this operation
 * will be done within log_sync().
 *
 * @kml:     the kvm memory listener
 * @section: the memory range to clear dirty bitmap
 */
static int kvm_physical_log_clear(KVMMemoryListener *kml,
                                  MemoryRegionSection *section)
{
    KVMState *s = kvm_state;
    struct kvm_clear_dirty_log d;
    uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
    KVMSlot *mem = NULL;
    int ret, i;

    if (!s->manual_dirty_log_protect) {
        /* No need to do explicit clear */
        return 0;
    }

    start = section->offset_within_address_space;
    size = int128_get64(section->size);

    if (!size) {
        /* Nothing more we can do... */
        return 0;
    }

    kvm_slots_lock(kml);

    /* Find any possible slot that covers the section */
    for (i = 0; i < s->nr_slots; i++) {
        mem = &kml->slots[i];
        if (mem->start_addr <= start &&
            start + size <= mem->start_addr + mem->memory_size) {
            break;
        }
    }

    /*
     * We should always find one memslot until this point, otherwise
     * there could be something wrong from the upper layer
     */
    assert(mem && i != s->nr_slots);

    /*
     * We need to extend either the start or the size or both to
     * satisfy the KVM interface requirement.  Firstly, do the start
     * page alignment on 64 host pages
     */
    bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
    start_delta = start - mem->start_addr - bmap_start;
    bmap_start /= psize;

    /*
     * The kernel interface has restriction on the size too, that either:
     *
     * (1) the size is 64 host pages aligned (just like the start), or
     * (2) the size fills up until the end of the KVM memslot.
     */
    bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
        << KVM_CLEAR_LOG_SHIFT;
    end = mem->memory_size / psize;
    if (bmap_npages > end - bmap_start) {
        bmap_npages = end - bmap_start;
    }
    start_delta /= psize;

    /*
     * Prepare the bitmap to clear dirty bits.  Here we must guarantee
     * that we won't clear any unknown dirty bits otherwise we might
     * accidentally clear some set bits which are not yet synced from
     * the kernel into QEMU's bitmap, then we'll lose track of the
     * guest modifications upon those pages (which can directly lead
     * to guest data loss or panic after migration).
     *
     * Layout of the KVMSlot.dirty_bmap:
     *
     *                   |<-------- bmap_npages -----------..>|
     *                                                     [1]
     *                     start_delta         size
     *  |----------------|-------------|------------------|------------|
     *  ^                ^             ^                               ^
     *  |                |             |                               |
     * start          bmap_start     (start)                         end
     * of memslot                                             of memslot
     *
     * [1] bmap_npages can be aligned to either 64 pages or the end of slot
     */

    assert(bmap_start % BITS_PER_LONG == 0);
    /* We should never do log_clear before log_sync */
    assert(mem->dirty_bmap);
    if (start_delta) {
        /* Slow path - we need to manipulate a temp bitmap */
        bmap_clear = bitmap_new(bmap_npages);
        bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
                                    bmap_start, start_delta + size / psize);
        /*
         * We need to fill the holes at start because that was not
         * specified by the caller and we extended the bitmap only for
         * 64 pages alignment
         */
        bitmap_clear(bmap_clear, 0, start_delta);
        d.dirty_bitmap = bmap_clear;
    } else {
        /* Fast path - start address aligns well with BITS_PER_LONG */
        d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
    }

    d.first_page = bmap_start;
    /* It should never overflow.  If it happens, say something */
    assert(bmap_npages <= UINT32_MAX);
    d.num_pages = bmap_npages;
    d.slot = mem->slot | (kml->as_id << 16);

    if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
        ret = -errno;
        error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
                     "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
                     __func__, d.slot, (uint64_t)d.first_page,
                     (uint32_t)d.num_pages, ret);
    } else {
        ret = 0;
        trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
    }

    /*
     * After we have updated the remote dirty bitmap, we update the
     * cached bitmap as well for the memslot, then if another user
     * clears the same region we know we shouldn't clear it again on
     * the remote otherwise it's data loss as well.
     */
    bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
                 size / psize);
    /* This handles the NULL case well */
    g_free(bmap_clear);

    kvm_slots_unlock(kml);

    return ret;
}

static void kvm_coalesce_mmio_region(MemoryListener *listener,
                                     MemoryRegionSection *secion,
                                     hwaddr start, hwaddr size)
@@ -791,16 +973,20 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
          (start_addr - section->offset_within_address_space);

    kvm_slots_lock(kml);

    if (!add) {
        mem = kvm_lookup_matching_slot(kml, start_addr, size);
        if (!mem) {
            return;
            goto out;
        }
        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
            kvm_physical_sync_dirty_bitmap(kml, section);
        }

        /* unregister the slot */
        g_free(mem->dirty_bmap);
        mem->dirty_bmap = NULL;
        mem->memory_size = 0;
        mem->flags = 0;
        err = kvm_set_user_memory_region(kml, mem, false);
@@ -809,7 +995,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
                    __func__, strerror(-err));
            abort();
        }
        return;
        goto out;
    }

    /* register the new slot */
@@ -825,6 +1011,9 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
                strerror(-err));
        abort();
    }

out:
    kvm_slots_unlock(kml);
}

static void kvm_region_add(MemoryListener *listener,
@@ -851,8 +1040,26 @@ static void kvm_log_sync(MemoryListener *listener,
    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
    int r;

    kvm_slots_lock(kml);
    r = kvm_physical_sync_dirty_bitmap(kml, section);
    kvm_slots_unlock(kml);
    if (r < 0) {
        abort();
    }
}

static void kvm_log_clear(MemoryListener *listener,
                          MemoryRegionSection *section)
{
    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
    int r;

    r = kvm_physical_log_clear(kml, section);
    if (r < 0) {
        error_report_once("%s: kvm log clear failed: mr=%s "
                          "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
                          section->mr->name, section->offset_within_region,
                          int128_get64(section->size));
        abort();
    }
}
@@ -935,6 +1142,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
{
    int i;

    qemu_mutex_init(&kml->slots_lock);
    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
    kml->as_id = as_id;

@@ -947,6 +1155,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
    kml->listener.log_start = kvm_log_start;
    kml->listener.log_stop = kvm_log_stop;
    kml->listener.log_sync = kvm_log_sync;
    kml->listener.log_clear = kvm_log_clear;
    kml->listener.priority = 10;

    memory_listener_register(&kml->listener, as);
@@ -1671,6 +1880,17 @@ static int kvm_init(MachineState *ms)
    s->coalesced_pio = s->coalesced_mmio &&
                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);

    s->manual_dirty_log_protect =
        kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
    if (s->manual_dirty_log_protect) {
        ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
        if (ret) {
            warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
                        "but failed.  Falling back to the legacy mode. ");
            s->manual_dirty_log_protect = false;
        }
    }

#ifdef KVM_CAP_VCPU_EVENTS
    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif
+1 −0
Original line number Diff line number Diff line
@@ -15,4 +15,5 @@ kvm_irqchip_release_virq(int virq) "virq %d"
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
+14 −1
Original line number Diff line number Diff line
@@ -1358,6 +1358,8 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
    DirtyMemoryBlocks *blocks;
    unsigned long end, page;
    bool dirty = false;
    RAMBlock *ramblock;
    uint64_t mr_offset, mr_size;

    if (length == 0) {
        return false;
@@ -1369,6 +1371,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
    rcu_read_lock();

    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
    ramblock = qemu_get_ram_block(start);
    /* Range sanity check on the ramblock */
    assert(start >= ramblock->offset &&
           start + length <= ramblock->offset + ramblock->used_length);

    while (page < end) {
        unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
@@ -1380,6 +1386,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
        page += num;
    }

    mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
    mr_size = (end - page) << TARGET_PAGE_BITS;
    memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);

    rcu_read_unlock();

    if (dirty && tcg_enabled()) {
@@ -1390,9 +1400,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
}

DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
     (ram_addr_t start, ram_addr_t length, unsigned client)
    (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
    DirtyMemoryBlocks *blocks;
    ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
    unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
    ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
    ram_addr_t last  = QEMU_ALIGN_UP(start + length, align);
@@ -1434,6 +1445,8 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
        tlb_reset_dirty_range_all(start, length);
    }

    memory_region_clear_dirty_bitmap(mr, offset, length);

    return snap;
}

+19 −0
Original line number Diff line number Diff line
@@ -46,6 +46,8 @@
        OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
                         TYPE_IOMMU_MEMORY_REGION)

extern bool global_dirty_log;

typedef struct MemoryRegionOps MemoryRegionOps;
typedef struct MemoryRegionMmio MemoryRegionMmio;

@@ -414,6 +416,7 @@ struct MemoryListener {
    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
                     int old, int new);
    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
    void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
    void (*log_global_start)(MemoryListener *listener);
    void (*log_global_stop)(MemoryListener *listener);
    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
@@ -1267,6 +1270,22 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size);

/**
 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
 *
 * This function is called when the caller wants to clear the remote
 * dirty bitmap of a memory range within the memory region.  This can
 * be used by e.g. KVM to manually clear dirty log when
 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
 * kernel.
 *
 * @mr:     the memory region to clear the dirty log upon
 * @start:  start address offset within the memory region
 * @len:    length of the memory region to clear dirty bitmap
 */
void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
                                      hwaddr len);

/**
 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
 *                                         bitmap and clear it.
+87 −5
Original line number Diff line number Diff line
@@ -51,8 +51,70 @@ struct RAMBlock {
    unsigned long *unsentmap;
    /* bitmap of already received pages in postcopy */
    unsigned long *receivedmap;

    /*
     * bitmap to track already cleared dirty bitmap.  When the bit is
     * set, it means the corresponding memory chunk needs a log-clear.
     * Set this up to non-NULL to enable the capability to postpone
     * and split clearing of dirty bitmap on the remote node (e.g.,
     * KVM).  The bitmap will be set only when doing global sync.
     *
     * NOTE: this bitmap is different comparing to the other bitmaps
     * in that one bit can represent multiple guest pages (which is
     * decided by the `clear_bmap_shift' variable below).  On
     * destination side, this should always be NULL, and the variable
     * `clear_bmap_shift' is meaningless.
     */
    unsigned long *clear_bmap;
    uint8_t clear_bmap_shift;
};

/**
 * clear_bmap_size: calculate clear bitmap size
 *
 * @pages: number of guest pages
 * @shift: guest page number shift
 *
 * Returns: number of bits for the clear bitmap
 */
static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
{
    return DIV_ROUND_UP(pages, 1UL << shift);
}

/**
 * clear_bmap_set: set clear bitmap for the page range
 *
 * @rb: the ramblock to operate on
 * @start: the start page number
 * @size: number of pages to set in the bitmap
 *
 * Returns: None
 */
static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
                                  uint64_t npages)
{
    uint8_t shift = rb->clear_bmap_shift;

    bitmap_set_atomic(rb->clear_bmap, start >> shift,
                      clear_bmap_size(npages, shift));
}

/**
 * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
 *
 * @rb: the ramblock to operate on
 * @page: the page number to check
 *
 * Returns: true if the bit was set, false otherwise
 */
static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
{
    uint8_t shift = rb->clear_bmap_shift;

    return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
}

static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
    return (b && b->host && offset < b->used_length) ? true : false;
@@ -349,8 +411,13 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
            if (bitmap[k]) {
                unsigned long temp = leul_to_cpu(bitmap[k]);

                atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
                atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);

                if (global_dirty_log) {
                    atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
                              temp);
                }

                if (tcg_enabled()) {
                    atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
                }
@@ -367,6 +434,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
        xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
    } else {
        uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;

        if (!global_dirty_log) {
            clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
        }

        /*
         * bitmap-traveling is faster than memory-traveling (for addr...)
         * especially when most of the memory is not dirty.
@@ -394,7 +466,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
                                              unsigned client);

DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
    (ram_addr_t start, ram_addr_t length, unsigned client);
    (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);

bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
                                            ram_addr_t start,
@@ -409,6 +481,7 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
}


/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
                                               ram_addr_t start,
@@ -432,8 +505,6 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
                                        DIRTY_MEMORY_BLOCK_SIZE);
        unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);

        rcu_read_lock();

        src = atomic_rcu_read(
                &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;

@@ -454,7 +525,18 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
            }
        }

        rcu_read_unlock();
        if (rb->clear_bmap) {
            /*
             * Postpone the dirty bitmap clear to the point before we
             * really send the pages, also we will split the clear
             * dirty procedure into smaller chunks.
             */
            clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
                           length >> TARGET_PAGE_BITS);
        } else {
            /* Slow path - still do that in a huge chunk */
            memory_region_clear_dirty_bitmap(rb->mr, start, length);
        }
    } else {
        ram_addr_t offset = rb->offset;

Loading