Loading arch/parisc/kernel/asm-offsets.c +8 −0 Original line number Diff line number Diff line Loading @@ -289,6 +289,14 @@ int main(void) DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); DEFINE(ASM_PT_INITIAL, PT_INITIAL); BLANK(); /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text * and kernel data on physical huge pages */ #ifdef CONFIG_HUGETLB_PAGE DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); #else DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); #endif BLANK(); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); Loading arch/parisc/kernel/vmlinux.lds.S +6 −3 Original line number Diff line number Diff line Loading @@ -60,7 +60,7 @@ SECTIONS EXIT_DATA } PERCPU_SECTION(8) . = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE); __init_end = .; /* freed after init ends here */ Loading Loading @@ -116,7 +116,7 @@ SECTIONS * that we can properly leave these * as writable */ . = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE); data_start = .; EXCEPTION_TABLE(8) Loading @@ -135,8 +135,11 @@ SECTIONS _edata = .; /* BSS */ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) /* bootmap is allocated in setup_bootmem() directly behind bss. */ . = ALIGN(HUGEPAGE_SIZE); _end = . ; STABS_DEBUG Loading arch/parisc/mm/init.c +17 −23 Original line number Diff line number Diff line Loading @@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; unsigned long fv_addr; unsigned long gw_addr; extern const unsigned long fault_vector_20; extern void * const linux_gateway_page; unsigned long kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; Loading Loading @@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr, for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; /* * Map the fault vector writable so we can * write the HPMC checksum. */ if (force) pte = __mk_pte(address, pgprot); else if (parisc_text_address(vaddr) && address != fv_addr) else if (parisc_text_address(vaddr)) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); if (address >= ro_start && address < kernel_end) pte = pte_mkhuge(pte); } else #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) if (address >= ro_start && address < ro_end && address != fv_addr && address != gw_addr) pte = __mk_pte(address, PAGE_KERNEL_RO); else if (address >= ro_start && address < ro_end) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); pte = pte_mkhuge(pte); } else #endif { pte = __mk_pte(address, pgprot); if (address >= ro_start && address < kernel_end) pte = pte_mkhuge(pte); } if (address >= end_paddr) { if (force) Loading Loading @@ -534,15 +531,12 @@ void free_initmem(void) /* force the kernel to see the new TLB entries */ __flush_tlb_range(0, init_begin, init_end); /* Attempt to catch anyone trying to execute code here * by filling the page with BRK insns. */ memset((void *)init_begin, 0x00, init_end - init_begin); /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); free_initmem_default(-1); free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); Loading Loading @@ -712,8 +706,8 @@ static void __init pagetable_init(void) unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); size = pmem_ranges[range].pages << PAGE_SHIFT; end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); Loading Loading
arch/parisc/kernel/asm-offsets.c +8 −0 Original line number Diff line number Diff line Loading @@ -289,6 +289,14 @@ int main(void) DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); DEFINE(ASM_PT_INITIAL, PT_INITIAL); BLANK(); /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text * and kernel data on physical huge pages */ #ifdef CONFIG_HUGETLB_PAGE DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); #else DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); #endif BLANK(); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); Loading
arch/parisc/kernel/vmlinux.lds.S +6 −3 Original line number Diff line number Diff line Loading @@ -60,7 +60,7 @@ SECTIONS EXIT_DATA } PERCPU_SECTION(8) . = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE); __init_end = .; /* freed after init ends here */ Loading Loading @@ -116,7 +116,7 @@ SECTIONS * that we can properly leave these * as writable */ . = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE); data_start = .; EXCEPTION_TABLE(8) Loading @@ -135,8 +135,11 @@ SECTIONS _edata = .; /* BSS */ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) /* bootmap is allocated in setup_bootmem() directly behind bss. */ . = ALIGN(HUGEPAGE_SIZE); _end = . ; STABS_DEBUG Loading
arch/parisc/mm/init.c +17 −23 Original line number Diff line number Diff line Loading @@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; unsigned long fv_addr; unsigned long gw_addr; extern const unsigned long fault_vector_20; extern void * const linux_gateway_page; unsigned long kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; Loading Loading @@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr, for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; /* * Map the fault vector writable so we can * write the HPMC checksum. */ if (force) pte = __mk_pte(address, pgprot); else if (parisc_text_address(vaddr) && address != fv_addr) else if (parisc_text_address(vaddr)) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); if (address >= ro_start && address < kernel_end) pte = pte_mkhuge(pte); } else #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) if (address >= ro_start && address < ro_end && address != fv_addr && address != gw_addr) pte = __mk_pte(address, PAGE_KERNEL_RO); else if (address >= ro_start && address < ro_end) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); pte = pte_mkhuge(pte); } else #endif { pte = __mk_pte(address, pgprot); if (address >= ro_start && address < kernel_end) pte = pte_mkhuge(pte); } if (address >= end_paddr) { if (force) Loading Loading @@ -534,15 +531,12 @@ void free_initmem(void) /* force the kernel to see the new TLB entries */ __flush_tlb_range(0, init_begin, init_end); /* Attempt to catch anyone trying to execute code here * by filling the page with BRK insns. */ memset((void *)init_begin, 0x00, init_end - init_begin); /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); free_initmem_default(-1); free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); Loading Loading @@ -712,8 +706,8 @@ static void __init pagetable_init(void) unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); size = pmem_ranges[range].pages << PAGE_SHIFT; end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); Loading