Commit 044d4e1a authored by Hanweidong's avatar Hanweidong Committed by Stefano Stabellini
Browse files

xen-mapcache: pass the right size argument to test_bits



Compute the correct size for test_bits().
qemu_get_ram_ptr() and qemu_safe_ram_ptr() will call xen_map_cache()
with size is 0 if the requested address is in the RAM.  Then
xen_map_cache() will pass the size 0 to test_bits() for checking if the
corresponding pfn was mapped in cache. But test_bits() will always
return 1 when size is 0 without any bit testing. Actually, for this
case, test_bits should check one bit. So this patch introduced a
__test_bit_size which is greater than 0 and a multiple of XC_PAGE_SIZE,
then test_bits can work correctly with __test_bit_size
>> XC_PAGE_SHIFT as its size.

Signed-off-by: default avatarZhenguo Wang <wangzhenguo@huawei.com>
Signed-off-by: default avatarWeidong Han <hanweidong@huawei.com>
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
parent e2deee3e
Loading
Loading
Loading
Loading
+22 −4
Original line number Diff line number Diff line
@@ -200,6 +200,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
    hwaddr address_index;
    hwaddr address_offset;
    hwaddr __size = size;
    hwaddr __test_bit_size = size;
    bool translated = false;

tryagain:
@@ -208,9 +209,23 @@ tryagain:

    trace_xen_map_cache(phys_addr);

    /* __test_bit_size is always a multiple of XC_PAGE_SIZE */
    if (size) {
        __test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));

        if (__test_bit_size % XC_PAGE_SIZE) {
            __test_bit_size += XC_PAGE_SIZE - (__test_bit_size % XC_PAGE_SIZE);
        }
    } else {
        __test_bit_size = XC_PAGE_SIZE;
    }

    if (mapcache->last_entry != NULL &&
        mapcache->last_entry->paddr_index == address_index &&
        !lock && !__size) {
        !lock && !__size &&
        test_bits(address_offset >> XC_PAGE_SHIFT,
                  __test_bit_size >> XC_PAGE_SHIFT,
                  mapcache->last_entry->valid_mapping)) {
        trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
        return mapcache->last_entry->vaddr_base + address_offset;
    }
@@ -229,7 +244,8 @@ tryagain:

    while (entry && entry->lock && entry->vaddr_base &&
            (entry->paddr_index != address_index || entry->size != __size ||
             !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
             !test_bits(address_offset >> XC_PAGE_SHIFT,
                 __test_bit_size >> XC_PAGE_SHIFT,
                 entry->valid_mapping))) {
        pentry = entry;
        entry = entry->next;
@@ -241,13 +257,15 @@ tryagain:
    } else if (!entry->lock) {
        if (!entry->vaddr_base || entry->paddr_index != address_index ||
                entry->size != __size ||
                !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
                !test_bits(address_offset >> XC_PAGE_SHIFT,
                    __test_bit_size >> XC_PAGE_SHIFT,
                    entry->valid_mapping)) {
            xen_remap_bucket(entry, __size, address_index);
        }
    }

    if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
    if(!test_bits(address_offset >> XC_PAGE_SHIFT,
                __test_bit_size >> XC_PAGE_SHIFT,
                entry->valid_mapping)) {
        mapcache->last_entry = NULL;
        if (!translated && mapcache->phys_offset_to_gaddr) {