Commit 298ad7b5 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190714' into staging



Fixes for 3 tcg bugs

# gpg: Signature made Sun 14 Jul 2019 12:11:01 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190714:
  tcg: Release mmap_lock on translation fault
  tcg: Remove duplicate #if !defined(CODE_ACCESS)
  tcg: Remove cpu_ld*_code_ra
  tcg: Introduce set/clear_helper_retaddr
  include/qemu/atomic.h: Add signal_barrier
  tcg/aarch64: Fix output of extract2 opcodes
  tcg: Fix constant folding of INDEX_op_extract2_i32

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 46cd24e7 52ba13f0
Loading
Loading
Loading
Loading
+53 −24
Original line number Diff line number Diff line
@@ -64,27 +64,56 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
    CPUState *cpu = current_cpu;
    CPUClass *cc;
    unsigned long address = (unsigned long)info->si_addr;
    MMUAccessType access_type;
    MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;

    /* We must handle PC addresses from two different sources:
     * a call return address and a signal frame address.
     *
     * Within cpu_restore_state_from_tb we assume the former and adjust
     * the address by -GETPC_ADJ so that the address is within the call
     * insn so that addr does not accidentally match the beginning of the
     * next guest insn.
    switch (helper_retaddr) {
    default:
        /*
         * Fault during host memory operation within a helper function.
         * The helper's host return address, saved here, gives us a
         * pointer into the generated code that will unwind to the
         * correct guest pc.
         */
        pc = helper_retaddr;
        break;

    case 0:
        /*
         * Fault during host memory operation within generated code.
         * (Or, a unrelated bug within qemu, but we can't tell from here).
         *
     * However, when the PC comes from the signal frame, it points to
     * the actual faulting host insn and not a call insn.  Subtracting
     * GETPC_ADJ in that case may accidentally match the previous guest insn.
         * We take the host pc from the signal frame.  However, we cannot
         * use that value directly.  Within cpu_restore_state_from_tb, we
         * assume PC comes from GETPC(), as used by the helper functions,
         * so we adjust the address by -GETPC_ADJ to form an address that
         * is within the call insn, so that the address does not accidentially
         * match the beginning of the next guest insn.  However, when the
         * pc comes from the signal frame it points to the actual faulting
         * host memory insn and not the return from a call insn.
         *
     * So for the later case, adjust forward to compensate for what
     * will be done later by cpu_restore_state_from_tb.
         * Therefore, adjust to compensate for what will be done later
         * by cpu_restore_state_from_tb.
         */
    if (helper_retaddr) {
        pc = helper_retaddr;
    } else {
        pc += GETPC_ADJ;
        break;

    case 1:
        /*
         * Fault during host read for translation, or loosely, "execution".
         *
         * The guest pc is already pointing to the start of the TB for which
         * code is being generated.  If the guest translator manages the
         * page crossings correctly, this is exactly the correct address
         * (and if the translator doesn't handle page boundaries correctly
         * there's little we can do about that here).  Therefore, do not
         * trigger the unwinder.
         *
         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
         */
        pc = 0;
        access_type = MMU_INST_FETCH;
        mmap_unlock();
        break;
    }

    /* For synchronous signals we expect to be coming from the vCPU
@@ -134,7 +163,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
             * currently executing TB was modified and must be exited
             * immediately.  Clear helper_retaddr for next execution.
             */
            helper_retaddr = 0;
            clear_helper_retaddr();
            cpu_exit_tb_from_sighandler(cpu, old_set);
            /* NORETURN */

@@ -152,10 +181,9 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
     * an exception.  Undo signal and retaddr state prior to longjmp.
     */
    sigprocmask(SIG_SETMASK, old_set, NULL);
    helper_retaddr = 0;
    clear_helper_retaddr();

    cc = CPU_GET_CLASS(cpu);
    access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
    cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
    g_assert_not_reached();
}
@@ -682,14 +710,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
    if (unlikely(addr & (size - 1))) {
        cpu_loop_exit_atomic(env_cpu(env), retaddr);
    }
    helper_retaddr = retaddr;
    return g2h(addr);
    void *ret = g2h(addr);
    set_helper_retaddr(retaddr);
    return ret;
}

/* Macro to call the above, with local variables from the use context.  */
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0)
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)

#define ATOMIC_NAME(X)   HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
+20 −0
Original line number Diff line number Diff line
@@ -89,6 +89,26 @@ typedef target_ulong abi_ptr;

extern __thread uintptr_t helper_retaddr;

static inline void set_helper_retaddr(uintptr_t ra)
{
    helper_retaddr = ra;
    /*
     * Ensure that this write is visible to the SIGSEGV handler that
     * may be invoked due to a subsequent invalid memory operation.
     */
    signal_barrier();
}

static inline void clear_helper_retaddr(void)
{
    /*
     * Ensure that previous memory operations have succeeded before
     * removing the data visible to the signal handler.
     */
    signal_barrier();
    helper_retaddr = 0;
}

/* In user-only mode we provide only the _code and _data accessors. */

#define MEMSUFFIX _data
+27 −13
Original line number Diff line number Diff line
@@ -64,61 +64,75 @@
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
#ifdef CODE_ACCESS
    RES_TYPE ret;
    set_helper_retaddr(1);
    ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
    clear_helper_retaddr();
    return ret;
#else
    trace_guest_mem_before_exec(
        env_cpu(env), ptr,
        trace_mem_build_info(SHIFT, false, MO_TE, false));
#endif
    return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
}

#ifndef CODE_ACCESS
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
                                                  abi_ptr ptr,
                                                  uintptr_t retaddr)
{
    RES_TYPE ret;
    helper_retaddr = retaddr;
    set_helper_retaddr(retaddr);
    ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
    helper_retaddr = 0;
    clear_helper_retaddr();
    return ret;
}
#endif

#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
#ifdef CODE_ACCESS
    int ret;
    set_helper_retaddr(1);
    ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
    clear_helper_retaddr();
    return ret;
#else
    trace_guest_mem_before_exec(
        env_cpu(env), ptr,
        trace_mem_build_info(SHIFT, true, MO_TE, false));
#endif
    return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
#endif
}

#ifndef CODE_ACCESS
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
                                                  abi_ptr ptr,
                                                  uintptr_t retaddr)
{
    int ret;
    helper_retaddr = retaddr;
    set_helper_retaddr(retaddr);
    ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
    helper_retaddr = 0;
    clear_helper_retaddr();
    return ret;
}
#endif
#endif /* CODE_ACCESS */
#endif /* DATA_SIZE <= 2 */

#ifndef CODE_ACCESS
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
                                      RES_TYPE v)
{
#if !defined(CODE_ACCESS)
    trace_guest_mem_before_exec(
        env_cpu(env), ptr,
        trace_mem_build_info(SHIFT, false, MO_TE, true));
#endif
    glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}

@@ -128,9 +142,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
                                                  RES_TYPE v,
                                                  uintptr_t retaddr)
{
    helper_retaddr = retaddr;
    set_helper_retaddr(retaddr);
    glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
    helper_retaddr = 0;
    clear_helper_retaddr();
}
#endif

+11 −0
Original line number Diff line number Diff line
@@ -88,6 +88,13 @@
#define smp_read_barrier_depends()   barrier()
#endif

/*
 * A signal barrier forces all pending local memory ops to be observed before
 * a SIGSEGV is delivered to the *same* thread.  In practice this is exactly
 * the same as barrier(), but since we have the correct builtin, use it.
 */
#define signal_barrier()    __atomic_signal_fence(__ATOMIC_SEQ_CST)

/* Sanity check that the size of an atomic operation isn't "overly large".
 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
 * want to use them because we ought not need them, and this lets us do a
@@ -308,6 +315,10 @@
#define smp_read_barrier_depends()   barrier()
#endif

#ifndef signal_barrier
#define signal_barrier()    barrier()
#endif

/* These will only be atomic if the processor does the fetch or store
 * in a single issue memory operation
 */
+4 −4
Original line number Diff line number Diff line
@@ -554,7 +554,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
    /* ??? Enforce alignment.  */
    uint64_t *haddr = g2h(addr);

    helper_retaddr = ra;
    set_helper_retaddr(ra);
    o0 = ldq_le_p(haddr + 0);
    o1 = ldq_le_p(haddr + 1);
    oldv = int128_make128(o0, o1);
@@ -564,7 +564,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
        stq_le_p(haddr + 0, int128_getlo(newv));
        stq_le_p(haddr + 1, int128_gethi(newv));
    }
    helper_retaddr = 0;
    clear_helper_retaddr();
#else
    int mem_idx = cpu_mmu_index(env, false);
    TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
@@ -624,7 +624,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
    /* ??? Enforce alignment.  */
    uint64_t *haddr = g2h(addr);

    helper_retaddr = ra;
    set_helper_retaddr(ra);
    o1 = ldq_be_p(haddr + 0);
    o0 = ldq_be_p(haddr + 1);
    oldv = int128_make128(o0, o1);
@@ -634,7 +634,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
        stq_be_p(haddr + 0, int128_gethi(newv));
        stq_be_p(haddr + 1, int128_getlo(newv));
    }
    helper_retaddr = 0;
    clear_helper_retaddr();
#else
    int mem_idx = cpu_mmu_index(env, false);
    TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
Loading