Commit 76c64d33 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

target-i386: do not duplicate page protection checks



x86_cpu_handle_mmu_fault is currently checking twice for writability
and executability of pages; the first time to decide whether to
trigger a page fault, the second time to compute the "prot" argument
to tlb_set_page_with_attrs.

Reorganize code so that first "prot" is computed, then it is used
to check whether to raise a page fault, then finally PROT_WRITE is
removed if the D bit will have to be set.

Reviewed-by: default avatarRichard Henderson <rth@twiddle.net>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3daa4107
Loading
Loading
Loading
Loading
+23 −42
Original line number Diff line number Diff line
@@ -890,38 +890,30 @@ do_check_protect_pse36:
        goto do_fault_rsvd;
    }
    ptep ^= PG_NX_MASK;
    if ((ptep & PG_NX_MASK) && is_write1 == 2) {
        goto do_fault_protect;
    }
    switch (mmu_idx) {
    case MMU_USER_IDX:
        if (!(ptep & PG_USER_MASK)) {
            goto do_fault_protect;
        }
        if (is_write && !(ptep & PG_RW_MASK)) {

    /* can the page can be put in the TLB?  prot will tell us */
    if (is_user && !(ptep & PG_USER_MASK)) {
        goto do_fault_protect;
    }
        break;

    case MMU_KSMAP_IDX:
        if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
            goto do_fault_protect;
    prot = 0;
    if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
        prot |= PAGE_READ;
        if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
            prot |= PAGE_WRITE;
        }
        /* fall through */
    case MMU_KNOSMAP_IDX:
        if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
            (ptep & PG_USER_MASK)) {
            goto do_fault_protect;
    }
        if ((env->cr[0] & CR0_WP_MASK) &&
            is_write && !(ptep & PG_RW_MASK)) {
            goto do_fault_protect;
    if (!(ptep & PG_NX_MASK) &&
        (mmu_idx == MMU_USER_IDX ||
         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
        prot |= PAGE_EXEC;
    }
        break;

    default: /* cannot happen */
        break;
    if ((prot & (1 << is_write1)) == 0) {
        goto do_fault_protect;
    }

    /* yes, it can! */
    is_dirty = is_write && !(pte & PG_DIRTY_MASK);
    if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
        pte |= PG_ACCESSED_MASK;
@@ -931,25 +923,13 @@ do_check_protect_pse36:
        x86_stl_phys_notdirty(cs, pte_addr, pte);
    }

    /* the page can be put in the TLB */
    prot = PAGE_READ;
    if (!(ptep & PG_NX_MASK) &&
        (mmu_idx == MMU_USER_IDX ||
         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
        prot |= PAGE_EXEC;
    }
    if (pte & PG_DIRTY_MASK) {
    if (!(pte & PG_DIRTY_MASK)) {
        /* only set write access if already dirty... otherwise wait
           for dirty access */
        if (is_user) {
            if (ptep & PG_RW_MASK)
                prot |= PAGE_WRITE;
        } else {
            if (!(env->cr[0] & CR0_WP_MASK) ||
                (ptep & PG_RW_MASK))
                prot |= PAGE_WRITE;
        }
        assert(!is_write);
        prot &= ~PAGE_WRITE;
    }

 do_mapping:
    pte = pte & env->a20_mask;

@@ -962,6 +942,7 @@ do_check_protect_pse36:
    page_offset = vaddr & (page_size - 1);
    paddr = pte + page_offset;

    assert(prot & (1 << is_write1));
    tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
                            prot, mmu_idx, page_size);
    return 0;