Commit 79d1befe authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32s: Don't hash_preload() kernel text



We now always map kernel text with BATs. Neither need to preload
hash with kernel text addresses nor ensure they are never evicted.

This is more or less a revert of commit ee4f2ea4 ("[POWERPC] Fix
32-bit mm operations when not using BATs")

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/0a0bab7fadd89aa829e33420fbc10d60c59040a7.1606285014.git.christophe.leroy@csgroup.eu
parent 035b19a1
Loading
Loading
Loading
Loading
+1 −17
Original line number Original line Diff line number Diff line
@@ -411,30 +411,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
	 * and we know there is a definite (although small) speed
	 * and we know there is a definite (although small) speed
	 * advantage to putting the PTE in the primary PTEG, we always
	 * advantage to putting the PTE in the primary PTEG, we always
	 * put the PTE in the primary PTEG.
	 * put the PTE in the primary PTEG.
	 *
	 * In addition, we skip any slot that is mapping kernel text in
	 * order to avoid a deadlock when not using BAT mappings if
	 * trying to hash in the kernel hash code itself after it has
	 * already taken the hash table lock. This works in conjunction
	 * with pre-faulting of the kernel text.
	 *
	 * If the hash table bucket is full of kernel text entries, we'll
	 * lockup here but that shouldn't happen
	 */
	 */


1:	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
	addi	r6,r6,HPTE_SIZE			/* search for candidate */
	addi	r6,r6,HPTE_SIZE			/* search for candidate */
	andi.	r6,r6,7*HPTE_SIZE
	andi.	r6,r6,7*HPTE_SIZE
	stw	r6,next_slot@l(r4)
	stw	r6,next_slot@l(r4)
	add	r4,r3,r6
	add	r4,r3,r6
	LDPTE	r0,HPTE_SIZE/2(r4)		/* get PTE second word */
	clrrwi	r0,r0,12
	lis	r6,etext@h
	ori	r6,r6,etext@l			/* get etext */
	tophys(r6,r6)
	cmpl	cr0,r0,r6			/* compare and try again */
	blt	1b


#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
	/* Store PTE in PTEG */
	/* Store PTE in PTEG */
+1 −1
Original line number Original line Diff line number Diff line
@@ -302,7 +302,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
/*
/*
 * Preload a translation in the hash table
 * Preload a translation in the hash table
 */
 */
void hash_preload(struct mm_struct *mm, unsigned long ea)
static void hash_preload(struct mm_struct *mm, unsigned long ea)
{
{
	pmd_t *pmd;
	pmd_t *pmd;


+0 −2
Original line number Original line Diff line number Diff line
@@ -91,8 +91,6 @@ void print_system_hash_info(void);


#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC32


void hash_preload(struct mm_struct *mm, unsigned long ea);

extern void mapin_ram(void);
extern void mapin_ram(void);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
		   unsigned int size, pgprot_t prot);
		   unsigned int size, pgprot_t prot);
+0 −4
Original line number Original line Diff line number Diff line
@@ -112,10 +112,6 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
		ktext = ((char *)v >= _stext && (char *)v < etext) ||
		ktext = ((char *)v >= _stext && (char *)v < etext) ||
			((char *)v >= _sinittext && (char *)v < _einittext);
			((char *)v >= _sinittext && (char *)v < _einittext);
		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
#ifdef CONFIG_PPC_BOOK3S_32
		if (ktext)
			hash_preload(&init_mm, v);
#endif
		v += PAGE_SIZE;
		v += PAGE_SIZE;
		p += PAGE_SIZE;
		p += PAGE_SIZE;
	}
	}