Commit ab08440a authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180702' into staging



Assorted tlb and tb caching fixes

# gpg: Signature made Mon 02 Jul 2018 17:03:07 BST
# gpg:                using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20180702:
  cpu: Assert asidx_from_attrs return value in range
  accel/tcg: Avoid caching overwritten tlb entries
  accel/tcg: Don't treat invalid TLB entries as needing recheck
  accel/tcg: Correct "is this a TLB miss" check in get_page_addr_code()
  tcg: Define and use new tlb_hit() and tlb_hit_page() functions
  translate-all: fix locking of TBs whose two pages share the same physical page

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents e14dcc9c 9c8c334b
Loading
Loading
Loading
Loading
+40 −36
Original line number Diff line number Diff line
@@ -235,20 +235,30 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
}

static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
                                        target_ulong page)
{
    return tlb_hit_page(tlb_entry->addr_read, page) ||
           tlb_hit_page(tlb_entry->addr_write, page) ||
           tlb_hit_page(tlb_entry->addr_code, page);
}


static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page)
{
    if (addr == (tlb_entry->addr_read &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_write &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_code &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (tlb_hit_page_anyprot(tlb_entry, page)) {
        memset(tlb_entry, -1, sizeof(*tlb_entry));
    }
}

static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx,
                                       target_ulong page)
{
    int k;
    for (k = 0; k < CPU_VTLB_SIZE; k++) {
        tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page);
    }
}

static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
{
    CPUArchState *env = cpu->env_ptr;
@@ -274,14 +284,7 @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    }

    /* check whether there are entries that need to be flushed in the vtlb */
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
        tlb_flush_vtlb_page(env, mmu_idx, addr);
    }

    tb_flush_jmp_cache(cpu, addr);
@@ -313,7 +316,6 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
    unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
    int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    int mmu_idx;
    int i;

    assert_cpu_is_self(cpu);

@@ -323,11 +325,7 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
            tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);

            /* check whether there are vltb entries that need to be flushed */
            for (i = 0; i < CPU_VTLB_SIZE; i++) {
                tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
            }
            tlb_flush_vtlb_page(env, mmu_idx, addr);
        }
    }

@@ -612,10 +610,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te, *tv, tn;
    CPUTLBEntry *te, tn;
    hwaddr iotlb, xlat, sz, paddr_page;
    target_ulong vaddr_page;
    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
    int asidx = cpu_asidx_from_attrs(cpu, attrs);

    assert_cpu_is_self(cpu);
@@ -657,19 +654,28 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
    }

    /* Make sure there's no cached translation for the new page.  */
    tlb_flush_vtlb_page(env, mmu_idx, vaddr_page);

    code_address = address;
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
                                            paddr_page, xlat, prot, &address);

    index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    te = &env->tlb_table[mmu_idx][index];
    /* do not discard the translation in te, evict it into a victim tlb */
    tv = &env->tlb_v_table[mmu_idx][vidx];

    /* addr_write can race with tlb_reset_dirty_range */
    copy_tlb_helper(tv, te, true);
    /*
     * Only evict the old entry to the victim tlb if it's for a
     * different page; otherwise just overwrite the stale data.
     */
    if (!tlb_hit_page_anyprot(te, vaddr_page)) {
        unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
        CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];

        /* Evict the old entry into the victim tlb.  */
        copy_tlb_helper(tv, te, true);
        env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
    }

    /* refill the tlb */
    /*
@@ -960,14 +966,14 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)

    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    mmu_idx = cpu_mmu_index(env, true);
    if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
                 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
    if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) {
        if (!VICTIM_TLB_HIT(addr_read, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
        }
    }

    if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
    if (unlikely((env->tlb_table[mmu_idx][index].addr_code &
                  (TLB_RECHECK | TLB_INVALID_MASK)) == TLB_RECHECK)) {
        /*
         * This is a TLB_RECHECK access, where the MMU protection
         * covers a smaller range than a target page, and we must
@@ -1046,8 +1052,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;

    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        /* TLB entry is for a different page */
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
@@ -1091,8 +1096,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
    }

    /* Check TLB entry and enforce page permissions.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
                     mmu_idx, retaddr);
+6 −10
Original line number Diff line number Diff line
@@ -123,8 +123,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
    }

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
@@ -191,8 +190,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
    }

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
@@ -286,8 +284,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
    }

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
                     mmu_idx, retaddr);
@@ -322,7 +319,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
        if (!tlb_hit_page(tlb_addr2, page2)
            && !VICTIM_TLB_HIT(addr_write, page2)) {
            tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
                     mmu_idx, retaddr);
@@ -364,8 +361,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
    }

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
                     mmu_idx, retaddr);
@@ -400,7 +396,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
        if (!tlb_hit_page(tlb_addr2, page2)
            && !VICTIM_TLB_HIT(addr_write, page2)) {
            tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
                     mmu_idx, retaddr);
+25 −7
Original line number Diff line number Diff line
@@ -669,9 +669,15 @@ static inline void page_lock_tb(const TranslationBlock *tb)

static inline void page_unlock_tb(const TranslationBlock *tb)
{
    page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS));
    PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);

    page_unlock(p1);
    if (unlikely(tb->page_addr[1] != -1)) {
        page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS));
        PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);

        if (p2 != p1) {
            page_unlock(p2);
        }
    }
}

@@ -850,22 +856,34 @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
{
    PageDesc *p1, *p2;
    tb_page_addr_t page1;
    tb_page_addr_t page2;

    assert_memory_lock();
    g_assert(phys1 != -1 && phys1 != phys2);
    p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc);
    g_assert(phys1 != -1);

    page1 = phys1 >> TARGET_PAGE_BITS;
    page2 = phys2 >> TARGET_PAGE_BITS;

    p1 = page_find_alloc(page1, alloc);
    if (ret_p1) {
        *ret_p1 = p1;
    }
    if (likely(phys2 == -1)) {
        page_lock(p1);
        return;
    } else if (page1 == page2) {
        page_lock(p1);
        if (ret_p2) {
            *ret_p2 = p1;
        }
    p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc);
        return;
    }
    p2 = page_find_alloc(page2, alloc);
    if (ret_p2) {
        *ret_p2 = p2;
    }
    if (phys1 < phys2) {
    if (page1 < page2) {
        page_lock(p1);
        page_lock(p2);
    } else {
@@ -1623,7 +1641,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
        tb = existing_tb;
    }

    if (p2) {
    if (p2 && p2 != p) {
        page_unlock(p2);
    }
    page_unlock(p);
+23 −0
Original line number Diff line number Diff line
@@ -339,6 +339,29 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_FLAGS_MASK  (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
                         | TLB_RECHECK)

/**
 * tlb_hit_page: return true if page aligned @addr is a hit against the
 * TLB entry @tlb_addr
 *
 * @addr: virtual address to test (must be page aligned)
 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
 */
static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
{
    return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}

/**
 * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
 *
 * @addr: virtual address to test (need not be page aligned)
 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
 */
static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
{
    return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}

void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
#endif /* !CONFIG_USER_ONLY */
+1 −2
Original line number Diff line number Diff line
@@ -422,8 +422,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
        g_assert_not_reached();
    }

    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    if (!tlb_hit(tlb_addr, addr)) {
        /* TLB entry is for a different page */
        return NULL;
    }
Loading