Commit deee6ff7 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190925' into staging



Fixes for TLB_BSWAP
Coversion of NOTDIRTY and ROM handling to cputlb
Followup cleanups to cputlb

# gpg: Signature made Wed 25 Sep 2019 19:41:17 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190925:
  cputlb: Pass retaddr to tb_check_watchpoint
  cputlb: Pass retaddr to tb_invalidate_phys_page_fast
  cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access
  cputlb: Remove cpu->mem_io_vaddr
  cputlb: Handle TLB_NOTDIRTY in probe_access
  cputlb: Merge and move memory_notdirty_write_{prepare,complete}
  cputlb: Partially inline memory_region_section_get_iotlb
  cputlb: Move NOTDIRTY handling from I/O path to TLB path
  cputlb: Move ROM handling from I/O path to TLB path
  exec: Adjust notdirty tracing
  cputlb: Introduce TLB_BSWAP
  cputlb: Split out load/store_memop
  cputlb: Use qemu_build_not_reached in load/store_helpers
  qemu/compiler.h: Add qemu_build_not_reached
  cputlb: Disable __always_inline__ without optimization
  exec: Use TARGET_PAGE_BITS_MIN for TLB flags

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 1329132d ae57db63
Loading
Loading
Loading
Loading
+206 −136
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#include "exec/helper-proto.h"
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
#include "translate-all.h"

/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -577,7 +578,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
{
    uintptr_t addr = tlb_entry->addr_write;

    if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
    if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
                 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
        addr &= TARGET_PAGE_MASK;
        addr += tlb_entry->addend;
        if ((addr - start) < length) {
@@ -704,13 +706,14 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    target_ulong write_address;
    uintptr_t addend;
    CPUTLBEntry *te, tn;
    hwaddr iotlb, xlat, sz, paddr_page;
    target_ulong vaddr_page;
    int asidx = cpu_asidx_from_attrs(cpu, attrs);
    int wp_flags;
    bool is_ram, is_romd;

    assert_cpu_is_self(cpu);

@@ -737,22 +740,48 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
        address |= TLB_INVALID_MASK;
    }
    if (attrs.byte_swap) {
        /* Force the access through the I/O slow path.  */
        address |= TLB_MMIO;
        address |= TLB_BSWAP;
    }
    if (!memory_region_is_ram(section->mr) &&
        !memory_region_is_romd(section->mr)) {
        /* IO memory case */
        address |= TLB_MMIO;

    is_ram = memory_region_is_ram(section->mr);
    is_romd = memory_region_is_romd(section->mr);

    if (is_ram || is_romd) {
        /* RAM and ROMD both have associated host memory. */
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
    } else {
        /* I/O does not; force the host address to NULL. */
        addend = 0;
    }

    write_address = address;
    if (is_ram) {
        iotlb = memory_region_get_ram_addr(section->mr) + xlat;
        /*
         * Computing is_clean is expensive; avoid all that unless
         * the page is actually writable.
         */
        if (prot & PAGE_WRITE) {
            if (section->readonly) {
                write_address |= TLB_DISCARD_WRITE;
            } else if (cpu_physical_memory_is_clean(iotlb)) {
                write_address |= TLB_NOTDIRTY;
            }
        }
    } else {
        /* TLB_MMIO for rom/romd handled below */
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
        /* I/O or ROMD */
        iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
        /*
         * Writes to romd devices must go through MMIO to enable write.
         * Reads to romd devices go through the ram_ptr found above,
         * but of course reads to I/O must go through MMIO.
         */
        write_address |= TLB_MMIO;
        if (!is_romd) {
            address = write_address;
        }
    }

    code_address = address;
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
                                            paddr_page, xlat, prot, &address);
    wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
                                              TARGET_PAGE_SIZE);

@@ -792,8 +821,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
    /*
     * At this point iotlb contains a physical section number in the lower
     * TARGET_PAGE_BITS, and either
     *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
     *  + the offset within section->mr of the page base (otherwise)
     *  + the ram_addr_t of the page base of the target RAM (RAM)
     *  + the offset within section->mr of the page base (I/O, ROMD)
     * We subtract the vaddr_page (which is page aligned and thus won't
     * disturb the low bits) to give an offset which can be added to the
     * (non-page-aligned) vaddr of the eventual memory access to get
@@ -816,24 +845,14 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
    }

    if (prot & PAGE_EXEC) {
        tn.addr_code = code_address;
        tn.addr_code = address;
    } else {
        tn.addr_code = -1;
    }

    tn.addr_write = -1;
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
            || memory_region_is_romd(section->mr)) {
            /* Write access calls the I/O callback.  */
            tn.addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
                   && cpu_physical_memory_is_clean(
                       memory_region_get_ram_addr(section->mr) + xlat)) {
            tn.addr_write = address | TLB_NOTDIRTY;
        } else {
            tn.addr_write = address;
        }
        tn.addr_write = write_address;
        if (prot & PAGE_WRITE_INV) {
            tn.addr_write |= TLB_INVALID_MASK;
        }
@@ -901,19 +920,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
    bool locked = false;
    MemTxResult r;

    if (iotlbentry->attrs.byte_swap) {
        op ^= MO_BSWAP;
    }

    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
    mr = section->mr;
    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
    cpu->mem_io_pc = retaddr;
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
    if (!cpu->can_do_io) {
        cpu_io_recompile(cpu, retaddr);
    }

    cpu->mem_io_vaddr = addr;
    cpu->mem_io_access_type = access_type;

    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
@@ -947,17 +961,12 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
    bool locked = false;
    MemTxResult r;

    if (iotlbentry->attrs.byte_swap) {
        op ^= MO_BSWAP;
    }

    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
    mr = section->mr;
    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
    if (!cpu->can_do_io) {
        cpu_io_recompile(cpu, retaddr);
    }
    cpu->mem_io_vaddr = addr;
    cpu->mem_io_pc = retaddr;

    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
@@ -1075,6 +1084,33 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
    return qemu_ram_addr_from_host_nofail(p);
}

static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
                           CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
{
    ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;

    trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);

    if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
        struct page_collection *pages
            = page_collection_lock(ram_addr, ram_addr + size);
        tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
        page_collection_unlock(pages);
    }

    /*
     * Set both VGA and migration bits for simplicity and to remove
     * the notdirty callback faster.
     */
    cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);

    /* We remove the notdirty callback only if the code has been flushed. */
    if (!cpu_physical_memory_is_clean(ram_addr)) {
        trace_memory_notdirty_set_dirty(mem_vaddr);
        tlb_set_dirty(cpu, mem_vaddr);
    }
}

/*
 * Probe for whether the specified guest access is permitted. If it is not
 * permitted then an exception will be taken in the same way as if this
@@ -1126,16 +1162,24 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
        return NULL;
    }

    if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
        CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];

        /* Reject I/O access, or other required slow-path.  */
        if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
            return NULL;
        }

        /* Handle watchpoints.  */
        if (tlb_addr & TLB_WATCHPOINT) {
            cpu_check_watchpoint(env_cpu(env), addr, size,
                             env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
                             wp_access, retaddr);
                                 iotlbentry->attrs, wp_access, retaddr);
        }

    if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
        /* I/O access */
        return NULL;
        /* Handle clean RAM pages.  */
        if (tlb_addr & TLB_NOTDIRTY) {
            notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
        }
    }

    return (void *)((uintptr_t)addr + entry->addend);
@@ -1194,8 +1238,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
 * operations, or io operations to proceed.  Return the host address.  */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
                               TCGMemOpIdx oi, uintptr_t retaddr,
                               NotDirtyInfo *ndi)
                               TCGMemOpIdx oi, uintptr_t retaddr)
{
    size_t mmu_idx = get_mmuidx(oi);
    uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1255,12 +1298,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,

    hostaddr = (void *)((uintptr_t)addr + tlbe->addend);

    ndi->active = false;
    if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
        ndi->active = true;
        memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
                                      qemu_ram_addr_from_host_nofail(hostaddr),
                                      1 << s_bits);
        notdirty_write(env_cpu(env), addr, 1 << s_bits,
                       &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
    }

    return hostaddr;
@@ -1281,7 +1321,30 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
                                TCGMemOpIdx oi, uintptr_t retaddr);

static inline uint64_t __attribute__((always_inline))
static inline uint64_t QEMU_ALWAYS_INLINE
load_memop(const void *haddr, MemOp op)
{
    switch (op) {
    case MO_UB:
        return ldub_p(haddr);
    case MO_BEUW:
        return lduw_be_p(haddr);
    case MO_LEUW:
        return lduw_le_p(haddr);
    case MO_BEUL:
        return (uint32_t)ldl_be_p(haddr);
    case MO_LEUL:
        return (uint32_t)ldl_le_p(haddr);
    case MO_BEQ:
        return ldq_be_p(haddr);
    case MO_LEQ:
        return ldq_le_p(haddr);
    default:
        qemu_build_not_reached();
    }
}

static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
            uintptr_t retaddr, MemOp op, bool code_read,
            FullLoadHelper *full_load)
@@ -1321,6 +1384,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
    /* Handle anything that isn't just a straight memory access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        CPUIOTLBEntry *iotlbentry;
        bool need_swap;

        /* For anything that is unaligned, recurse through full_load.  */
        if ((addr & (size - 1)) != 0) {
@@ -1334,17 +1398,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
            /* On watchpoint hit, this will longjmp out.  */
            cpu_check_watchpoint(env_cpu(env), addr, size,
                                 iotlbentry->attrs, BP_MEM_READ, retaddr);

            /* The backing page may or may not require I/O.  */
            tlb_addr &= ~TLB_WATCHPOINT;
            if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
                goto do_aligned_access;
            }
        }

        need_swap = size > 1 && (tlb_addr & TLB_BSWAP);

        /* Handle I/O access.  */
        return io_readx(env, iotlbentry, mmu_idx, addr,
                        retaddr, access_type, op);
        if (likely(tlb_addr & TLB_MMIO)) {
            return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
                            access_type, op ^ (need_swap * MO_BSWAP));
        }

        haddr = (void *)((uintptr_t)addr + entry->addend);

        /*
         * Keep these two load_memop separate to ensure that the compiler
         * is able to fold the entire function to a single instruction.
         * There is a build-time assert inside to remind you of this.  ;-)
         */
        if (unlikely(need_swap)) {
            return load_memop(haddr, op ^ MO_BSWAP);
        }
        return load_memop(haddr, op);
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
@@ -1371,35 +1445,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
        return res & MAKE_64BIT_MASK(0, size * 8);
    }

 do_aligned_access:
    haddr = (void *)((uintptr_t)addr + entry->addend);
    switch (op) {
    case MO_UB:
        res = ldub_p(haddr);
        break;
    case MO_BEUW:
        res = lduw_be_p(haddr);
        break;
    case MO_LEUW:
        res = lduw_le_p(haddr);
        break;
    case MO_BEUL:
        res = (uint32_t)ldl_be_p(haddr);
        break;
    case MO_LEUL:
        res = (uint32_t)ldl_le_p(haddr);
        break;
    case MO_BEQ:
        res = ldq_be_p(haddr);
        break;
    case MO_LEQ:
        res = ldq_le_p(haddr);
        break;
    default:
        g_assert_not_reached();
    }

    return res;
    return load_memop(haddr, op);
}

/*
@@ -1530,7 +1577,37 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
 * Store Helpers
 */

static inline void __attribute__((always_inline))
static inline void QEMU_ALWAYS_INLINE
store_memop(void *haddr, uint64_t val, MemOp op)
{
    switch (op) {
    case MO_UB:
        stb_p(haddr, val);
        break;
    case MO_BEUW:
        stw_be_p(haddr, val);
        break;
    case MO_LEUW:
        stw_le_p(haddr, val);
        break;
    case MO_BEUL:
        stl_be_p(haddr, val);
        break;
    case MO_LEUL:
        stl_le_p(haddr, val);
        break;
    case MO_BEQ:
        stq_be_p(haddr, val);
        break;
    case MO_LEQ:
        stq_le_p(haddr, val);
        break;
    default:
        qemu_build_not_reached();
    }
}

static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
             TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
{
@@ -1564,6 +1641,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
    /* Handle anything that isn't just a straight memory access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        CPUIOTLBEntry *iotlbentry;
        bool need_swap;

        /* For anything that is unaligned, recurse through byte stores.  */
        if ((addr & (size - 1)) != 0) {
@@ -1577,16 +1655,39 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
            /* On watchpoint hit, this will longjmp out.  */
            cpu_check_watchpoint(env_cpu(env), addr, size,
                                 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
        }

        need_swap = size > 1 && (tlb_addr & TLB_BSWAP);

        /* Handle I/O access.  */
        if (tlb_addr & TLB_MMIO) {
            io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
                      op ^ (need_swap * MO_BSWAP));
            return;
        }

            /* The backing page may or may not require I/O.  */
            tlb_addr &= ~TLB_WATCHPOINT;
            if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
                goto do_aligned_access;
        /* Ignore writes to ROM.  */
        if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
            return;
        }

        /* Handle clean RAM pages.  */
        if (tlb_addr & TLB_NOTDIRTY) {
            notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
        }

        /* Handle I/O access.  */
        io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
        haddr = (void *)((uintptr_t)addr + entry->addend);

        /*
         * Keep these two store_memop separate to ensure that the compiler
         * is able to fold the entire function to a single instruction.
         * There is a build-time assert inside to remind you of this.  ;-)
         */
        if (unlikely(need_swap)) {
            store_memop(haddr, val, op ^ MO_BSWAP);
        } else {
            store_memop(haddr, val, op);
        }
        return;
    }

@@ -1655,34 +1756,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
        return;
    }

 do_aligned_access:
    haddr = (void *)((uintptr_t)addr + entry->addend);
    switch (op) {
    case MO_UB:
        stb_p(haddr, val);
        break;
    case MO_BEUW:
        stw_be_p(haddr, val);
        break;
    case MO_LEUW:
        stw_le_p(haddr, val);
        break;
    case MO_BEUL:
        stl_be_p(haddr, val);
        break;
    case MO_LEUL:
        stl_le_p(haddr, val);
        break;
    case MO_BEQ:
        stq_be_p(haddr, val);
        break;
    case MO_LEQ:
        stq_le_p(haddr, val);
        break;
    default:
        g_assert_not_reached();
        break;
    }
    store_memop(haddr, val, op);
}

void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
@@ -1733,14 +1808,9 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
#define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
    HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
#define ATOMIC_MMU_CLEANUP                              \
    do {                                                \
        if (unlikely(ndi.active)) {                     \
            memory_notdirty_write_complete(&ndi);       \
        }                                               \
    } while (0)
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
#define ATOMIC_MMU_CLEANUP

#define DATA_SIZE 1
#include "atomic_template.h"
@@ -1768,7 +1838,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
#undef ATOMIC_MMU_LOOKUP
#define EXTRA_ARGS         , TCGMemOpIdx oi
#define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())

#define DATA_SIZE 1
#include "atomic_template.h"
+24 −27
Original line number Diff line number Diff line
@@ -1889,7 +1889,7 @@ static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
                                      PageDesc *p, tb_page_addr_t start,
                                      tb_page_addr_t end,
                                      int is_cpu_write_access)
                                      uintptr_t retaddr)
{
    TranslationBlock *tb;
    tb_page_addr_t tb_start, tb_end;
@@ -1897,9 +1897,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#ifdef TARGET_HAS_PRECISE_SMC
    CPUState *cpu = current_cpu;
    CPUArchState *env = NULL;
    int current_tb_not_found = is_cpu_write_access;
    bool current_tb_not_found = retaddr != 0;
    bool current_tb_modified = false;
    TranslationBlock *current_tb = NULL;
    int current_tb_modified = 0;
    target_ulong current_pc = 0;
    target_ulong current_cs_base = 0;
    uint32_t current_flags = 0;
@@ -1931,24 +1931,21 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
        if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC
            if (current_tb_not_found) {
                current_tb_not_found = 0;
                current_tb = NULL;
                if (cpu->mem_io_pc) {
                current_tb_not_found = false;
                /* now we have a real cpu fault */
                    current_tb = tcg_tb_lookup(cpu->mem_io_pc);
                }
                current_tb = tcg_tb_lookup(retaddr);
            }
            if (current_tb == tb &&
                (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
                /* If we are modifying the current TB, we must stop
                its execution. We could be more precise by checking
                that the modification is after the current PC, but it
                would require a specialized function to partially
                restore the CPU state */

                current_tb_modified = 1;
                cpu_restore_state_from_tb(cpu, current_tb,
                                          cpu->mem_io_pc, true);
                /*
                 * If we are modifying the current TB, we must stop
                 * its execution. We could be more precise by checking
                 * that the modification is after the current PC, but it
                 * would require a specialized function to partially
                 * restore the CPU state.
                 */
                current_tb_modified = true;
                cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                                     &current_flags);
            }
@@ -1983,8 +1980,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
 *
 * Called with mmap_lock held for user-mode emulation
 */
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
                                   int is_cpu_write_access)
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
{
    struct page_collection *pages;
    PageDesc *p;
@@ -1996,8 +1992,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
        return;
    }
    pages = page_collection_lock(start, end);
    tb_invalidate_phys_page_range__locked(pages, p, start, end,
                                          is_cpu_write_access);
    tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
    page_collection_unlock(pages);
}

@@ -2044,7 +2039,8 @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
 * Call with all @pages in the range [@start, @start + len[ locked.
 */
void tb_invalidate_phys_page_fast(struct page_collection *pages,
                                  tb_page_addr_t start, int len)
                                  tb_page_addr_t start, int len,
                                  uintptr_t retaddr)
{
    PageDesc *p;

@@ -2071,7 +2067,8 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
        }
    } else {
    do_invalidate:
        tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
        tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
                                              retaddr);
    }
}
#else
@@ -2145,16 +2142,16 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#endif

/* user-mode: call with mmap_lock held */
void tb_check_watchpoint(CPUState *cpu)
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
{
    TranslationBlock *tb;

    assert_memory_lock();

    tb = tcg_tb_lookup(cpu->mem_io_pc);
    tb = tcg_tb_lookup(retaddr);
    if (tb) {
        /* We can use retranslation to find the PC.  */
        cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
        cpu_restore_state_from_tb(cpu, tb, retaddr, true);
        tb_phys_invalidate(tb, -1);
    } else {
        /* The exception probably happened in a helper.  The CPU state should
+4 −4
Original line number Diff line number Diff line
@@ -27,10 +27,10 @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
                                             tb_page_addr_t end);
void page_collection_unlock(struct page_collection *set);
void tb_invalidate_phys_page_fast(struct page_collection *pages,
                                  tb_page_addr_t start, int len);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
                                   int is_cpu_write_access);
void tb_check_watchpoint(CPUState *cpu);
                                  tb_page_addr_t start, int len,
                                  uintptr_t retaddr);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);

#ifdef CONFIG_USER_ONLY
int page_unprotect(target_ulong address, uintptr_t pc);
+7 −153

File changed.

Preview size limit exceeded, changes collapsed.

+0 −1
Original line number Diff line number Diff line
@@ -261,7 +261,6 @@ static void cpu_common_reset(CPUState *cpu)
    cpu->interrupt_request = 0;
    cpu->halted = 0;
    cpu->mem_io_pc = 0;
    cpu->mem_io_vaddr = 0;
    cpu->icount_extra = 0;
    atomic_set(&cpu->icount_decr_ptr->u32, 0);
    cpu->can_do_io = 1;
Loading