Commit 08179b47 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull parisc updates from Helge Deller:

 - Optimize parisc page table locks by using the existing
   page_table_lock

 - Export argv0-preserve flag in binfmt_misc for usage in qemu-user

 - Fix interrupt table (IVT) checksum so firmware will call crash
   handler (HPMC)

 - Increase IRQ stack to 64kb on 64-bit kernel

 - Switch to common devmem_is_allowed() implementation

 - Minor fix to get_whan()

* 'parisc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  binfmt_misc: pass binfmt_misc flags to the interpreter
  parisc: Optimize per-pagetable spinlocks
  parisc: Replace test_ti_thread_flag() with test_tsk_thread_flag()
  parisc: Bump 64-bit IRQ stack size to 64 KB
  parisc: Fix IVT checksum calculation wrt HPMC
  parisc: Use the generic devmem_is_allowed()
  parisc: Drop out of get_whan() if task is running again
parents 2671fe5e 2347961b
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@ config PARISC
	select GENERIC_SMP_IDLE_THREAD
	select GENERIC_CPU_DEVICES
	select GENERIC_STRNCPY_FROM_USER
	select GENERIC_LIB_DEVMEM_IS_ALLOWED
	select SYSCTL_ARCH_UNALIGN_ALLOW
	select SYSCTL_EXCEPTION_TRACE
	select HAVE_MOD_ARCH_SPECIFIC
@@ -310,6 +311,16 @@ config IRQSTACKS
	  for handling hard and soft interrupts.  This can help avoid
	  overflowing the process kernel stacks.

config TLB_PTLOCK
	bool "Use page table locks in TLB fault handler"
	depends on SMP
	default n
	help
	  Select this option to enable page table locking in the TLB
	  fault handler. This ensures that page table entries are
	  updated consistently on SMP machines at the expense of some
	  loss in performance.

config HOTPLUG_CPU
	bool
	default y if SMP
+1 −1
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ static __inline__ void __user *arch_compat_alloc_user_space(long len)

static inline int __is_compat_task(struct task_struct *t)
{
	return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
	return test_tsk_thread_flag(t, TIF_32BIT);
}

static inline int is_compat_task(void)
+2 −0
Original line number Diff line number Diff line
@@ -321,4 +321,6 @@ extern void iowrite64be(u64 val, void __iomem *addr);
 */
#define xlate_dev_kmem_ptr(p)	p

extern int devmem_is_allowed(unsigned long pfn);

#endif
+7 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <asm-generic/mm_hooks.h>

/* on PA-RISC, we actually have enough contexts to justify an allocator
@@ -50,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
		struct mm_struct *next, struct task_struct *tsk)
{
	if (prev != next) {
#ifdef CONFIG_TLB_PTLOCK
		/* put physical address of page_table_lock in cr28 (tr4)
		   for TLB faults */
		spinlock_t *pgd_lock = &next->page_table_lock;
		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
#endif
		mtctl(__pa(next->pgd), 25);
		load_context(next->context);
	}
+1 −1
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ extern int npmem_ranges;
#else
#define BITS_PER_PTE_ENTRY	2
#define BITS_PER_PMD_ENTRY	2
#define BITS_PER_PGD_ENTRY	BITS_PER_PMD_ENTRY
#define BITS_PER_PGD_ENTRY	2
#endif
#define PGD_ENTRY_SIZE	(1UL << BITS_PER_PGD_ENTRY)
#define PMD_ENTRY_SIZE	(1UL << BITS_PER_PMD_ENTRY)
Loading