Commit d92baf00 authored by Cédric Le Goater's avatar Cédric Le Goater Committed by David Gibson
Browse files

target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation



This is moving code under a new ppc_radix64_xlate() routine shared by
the MMU Radix page fault handler and the 'get_phys_page_debug' PPC
callback. The difference being that 'get_phys_page_debug' does not
generate exceptions.

The specific part of process-scoped Radix translation is moved under
ppc_radix64_process_scoped_xlate() in preparation of the future support
for partition-scoped Radix translation. Routines raising the exceptions
now take a 'cause_excp' bool to cover the 'get_phys_page_debug' case.

It should be functionally equivalent.

Signed-off-by: default avatarSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: default avatarCédric Le Goater <clg@kaod.org>
Message-Id: <20200403140056.59465-2-clg@kaod.org>
Reviewed-by: default avatarGreg Kurz <groug@kaod.org>
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
parent 05af7c77
Loading
Loading
Loading
Loading
+123 −96
Original line number Diff line number Diff line
@@ -219,67 +219,86 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
    return true;
}

int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
                                 int mmu_idx)
static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
                                            vaddr eaddr, uint64_t pid,
                                            ppc_v3_pate_t pate, hwaddr *g_raddr,
                                            int *g_prot, int *g_page_size,
                                            bool cause_excp)
{
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
    PPCVirtualHypervisorClass *vhc;
    hwaddr raddr, pte_addr;
    uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
    int page_size, prot, fault_cause = 0;
    ppc_v3_pate_t pate;
    bool relocation;
    uint64_t offset, size, prtbe_addr, prtbe0, pte;
    int fault_cause = 0;
    hwaddr pte_addr;

    assert(!(msr_hv && cpu->vhyp));
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
    /* Index Process Table by PID to Find Corresponding Process Table Entry */
    offset = pid * sizeof(struct prtb_entry);
    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
    if (offset >= size) {
        /* offset exceeds size of the process table */
        if (cause_excp) {
            ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
        }
        return 1;
    }
    prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
    prtbe0 = ldq_phys(cs->as, prtbe_addr);

    relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
    /* HV or virtual hypervisor Real Mode Access */
    if (!relocation && (msr_hv || cpu->vhyp)) {
        /* In real mode top 4 effective addr bits (mostly) ignored */
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
    *g_page_size = PRTBE_R_GET_RTS(prtbe0);
    pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
                                prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
                                g_raddr, g_page_size, &fault_cause, &pte_addr);

        /* In HV mode, add HRMOR if top EA bit is clear */
        if (msr_hv || !env->has_hv_mode) {
            if (!(eaddr >> 63)) {
                raddr |= env->spr[SPR_HRMOR];
    if (!(pte & R_PTE_VALID) ||
        ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot)) {
        /* No valid pte or access denied due to protection */
        if (cause_excp) {
            ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
        }
        return 1;
    }
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);

    ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);

    return 0;
}

    /*
     * Check UPRT (we avoid the check in real mode to deal with
     * transitional states during kexec.
     */
    if (!ppc64_use_proc_tbl(cpu)) {
        qemu_log_mask(LOG_GUEST_ERROR,
                      "LPCR:UPRT not set in radix mode ! LPCR="
                      TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
    }
static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
                             bool relocation,
                             hwaddr *raddr, int *psizep, int *protp,
                             bool cause_excp)
{
    uint64_t lpid = 0, pid = 0;
    ppc_v3_pate_t pate;
    int psize, prot;
    hwaddr g_raddr;

    /* Virtual Mode Access - get the fully qualified address */
    if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
    if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
        if (cause_excp) {
            ppc_radix64_raise_segi(cpu, rwx, eaddr);
        }
        return 1;
    }

    /* Get Process Table */
    if (cpu->vhyp) {
        PPCVirtualHypervisorClass *vhc;
        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
        vhc->get_pate(cpu->vhyp, &pate);
    } else {
        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
            if (cause_excp) {
                ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
            }
            return 1;
        }
        if (!validate_pate(cpu, lpid, &pate)) {
            if (cause_excp) {
                ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
            }
            return 1;
        }
        /* We don't support guest mode yet */
        if (lpid != 0) {
            error_report("PowerNV guest support Unimplemented");
@@ -287,89 +306,97 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
        }
    }

    /* Index Process Table by PID to Find Corresponding Process Table Entry */
    offset = pid * sizeof(struct prtb_entry);
    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
    if (offset >= size) {
        /* offset exceeds size of the process table */
        ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
        return 1;
    }
    prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
    *psizep = INT_MAX;
    *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;

    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
    page_size = PRTBE_R_GET_RTS(prtbe0);
    pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
                                prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
                                &raddr, &page_size, &fault_cause, &pte_addr);
    if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
        /* Couldn't get pte or access denied due to protection */
        ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
        return 1;
    /*
     * Perform process-scoped translation if relocation enabled.
     *
     * - Translates an effective address to a host real address in
     *   quadrants 0 and 3 when HV=1.
     */
    if (relocation) {
        int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
                                                   pate, &g_raddr, &prot,
                                                   &psize, cause_excp);
        if (ret) {
            return ret;
        }
        *psizep = MIN(*psizep, psize);
        *protp &= prot;
    } else {
        g_raddr = eaddr & R_EADDR_MASK;
    }

    /* Update Reference and Change Bits */
    ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);

    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
                 prot, mmu_idx, 1UL << page_size);
    *raddr = g_raddr;
    return 0;
}

hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
                                 int mmu_idx)
{
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
    PPCVirtualHypervisorClass *vhc;
    hwaddr raddr, pte_addr;
    uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
    int page_size, fault_cause = 0;
    ppc_v3_pate_t pate;
    int page_size, prot;
    bool relocation;
    hwaddr raddr;

    /* Handle Real Mode */
    if (msr_dr == 0) {
    assert(!(msr_hv && cpu->vhyp));
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

    relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
    /* HV or virtual hypervisor Real Mode Access */
    if (!relocation && (msr_hv || cpu->vhyp)) {
        /* In real mode top 4 effective addr bits (mostly) ignored */
        return eaddr & 0x0FFFFFFFFFFFFFFFULL;
    }
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;

    /* Virtual Mode Access - get the fully qualified address */
    if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
        return -1;
        /* In HV mode, add HRMOR if top EA bit is clear */
        if (msr_hv || !env->has_hv_mode) {
            if (!(eaddr >> 63)) {
                raddr |= env->spr[SPR_HRMOR];
           }

    /* Get Process Table */
    if (cpu->vhyp) {
        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
        vhc->get_pate(cpu->vhyp, &pate);
    } else {
        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
            return -1;
        }
        if (!validate_pate(cpu, lpid, &pate)) {
            return -1;
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
        return 0;
    }
        /* We don't support guest mode yet */
        if (lpid != 0) {
            error_report("PowerNV guest support Unimplemented");
            exit(1);

    /*
     * Check UPRT (we avoid the check in real mode to deal with
     * transitional states during kexec.
     */
    if (!ppc64_use_proc_tbl(cpu)) {
        qemu_log_mask(LOG_GUEST_ERROR,
                      "LPCR:UPRT not set in radix mode ! LPCR="
                      TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
    }

    /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
    if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
                          &page_size, &prot, true)) {
        return 1;
    }

    /* Index Process Table by PID to Find Corresponding Process Table Entry */
    offset = pid * sizeof(struct prtb_entry);
    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
    if (offset >= size) {
        /* offset exceeds size of the process table */
        return -1;
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
                 prot, mmu_idx, 1UL << page_size);
    return 0;
}
    prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);

    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
    page_size = PRTBE_R_GET_RTS(prtbe0);
    pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
                                prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
                                &raddr, &page_size, &fault_cause, &pte_addr);
    if (!pte) {
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
    CPUPPCState *env = &cpu->env;
    int psize, prot;
    hwaddr raddr;

    /* Handle Real Mode */
    if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
        /* In real mode top 4 effective addr bits (mostly) ignored */
        return eaddr & 0x0FFFFFFFFFFFFFFFULL;
    }

    if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
                          &prot, false)) {
        return -1;
    }