Commit 4222147d authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Christian Borntraeger
Browse files

kvm: extract kvm_log_clear_one_slot



We may need to clear the dirty bitmap for more than one KVM memslot.
First do some code movement with no semantic change.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarIgor Mammedov <imammedo@redhat.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Message-Id: <20190924144751.24149-2-imammedo@redhat.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
[fixup line break]
parent ee35e968
Loading
Loading
Loading
Loading
+57 −46
Original line number Diff line number Diff line
@@ -575,55 +575,14 @@ out:
#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)

/**
 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
 *
 * NOTE: this will be a no-op if we haven't enabled manual dirty log
 * protection in the host kernel because in that case this operation
 * will be done within log_sync().
 *
 * @kml:     the kvm memory listener
 * @section: the memory range to clear dirty bitmap
 */
static int kvm_physical_log_clear(KVMMemoryListener *kml,
                                  MemoryRegionSection *section)
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
                                  uint64_t size)
{
    KVMState *s = kvm_state;
    uint64_t end, bmap_start, start_delta, bmap_npages;
    struct kvm_clear_dirty_log d;
    uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
    KVMSlot *mem = NULL;
    int ret, i;

    if (!s->manual_dirty_log_protect) {
        /* No need to do explicit clear */
        return 0;
    }

    start = section->offset_within_address_space;
    size = int128_get64(section->size);

    if (!size) {
        /* Nothing more we can do... */
        return 0;
    }

    kvm_slots_lock(kml);

    /* Find any possible slot that covers the section */
    for (i = 0; i < s->nr_slots; i++) {
        mem = &kml->slots[i];
        if (mem->start_addr <= start &&
            start + size <= mem->start_addr + mem->memory_size) {
            break;
        }
    }

    /*
     * We should always find one memslot until this point, otherwise
     * there could be something wrong from the upper layer
     */
    assert(mem && i != s->nr_slots);
    int ret;

    /*
     * We need to extend either the start or the size or both to
@@ -694,7 +653,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
    /* It should never overflow.  If it happens, say something */
    assert(bmap_npages <= UINT32_MAX);
    d.num_pages = bmap_npages;
    d.slot = mem->slot | (kml->as_id << 16);
    d.slot = mem->slot | (as_id << 16);

    if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
        ret = -errno;
@@ -717,6 +676,58 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
                 size / psize);
    /* This handles the NULL case well */
    g_free(bmap_clear);
    return ret;
}


/**
 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
 *
 * NOTE: this will be a no-op if we haven't enabled manual dirty log
 * protection in the host kernel because in that case this operation
 * will be done within log_sync().
 *
 * @kml:     the kvm memory listener
 * @section: the memory range to clear dirty bitmap
 */
static int kvm_physical_log_clear(KVMMemoryListener *kml,
                                  MemoryRegionSection *section)
{
    KVMState *s = kvm_state;
    uint64_t start, size;
    KVMSlot *mem = NULL;
    int ret, i;

    if (!s->manual_dirty_log_protect) {
        /* No need to do explicit clear */
        return 0;
    }

    start = section->offset_within_address_space;
    size = int128_get64(section->size);

    if (!size) {
        /* Nothing more we can do... */
        return 0;
    }

    kvm_slots_lock(kml);

    /* Find any possible slot that covers the section */
    for (i = 0; i < s->nr_slots; i++) {
        mem = &kml->slots[i];
        if (mem->start_addr <= start &&
            start + size <= mem->start_addr + mem->memory_size) {
            break;
        }
    }

    /*
     * We should always find one memslot until this point, otherwise
     * there could be something wrong from the upper layer
     */
    assert(mem && i != s->nr_slots);
    ret = kvm_log_clear_one_slot(mem, kml->as_id, start, size);

    kvm_slots_unlock(kml);