Commit 192fe71c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull parisc architecture fixes from Helge Deller:
 "Quite a bunch of real bugfixes in here and most of them are tagged for
  backporting: A fix for cache flushing from irq context, a kprobes &
  kgdb breakpoint handling fix, and a fix in the alternative code
  patching function to take care of CPU hotplugging.

  parisc now provides LOCKDEP support and comes with a lightweight
  spinlock check. Both features helped me to find the cache flush bug.

  Additionally writing the AGP gatt has been fixed, the machine allows
  the user to reboot after a system halt and arch_sync_dma_for_cpu() has
  been optimized for PCXL PCUs.

  Summary:

   - Fix flush_dcache_page() for usage from irq context

   - Handle kprobes breakpoints only in kernel context

   - Handle kgdb breakpoints only in kernel context

   - Use num_present_cpus() in alternative patching code

   - Enable LOCKDEP support

   - Add lightweight spinlock checks

   - Flush AGP gatt writes and adjust gatt mask in parisc_agp_mask_memory()

   - Allow to reboot machine after system halt

   - Improve cache flushing for PCXL in arch_sync_dma_for_cpu()"

* tag 'parisc-for-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Fix flush_dcache_page() for usage from irq context
  parisc: Handle kgdb breakpoints only in kernel context
  parisc: Handle kprobes breakpoints only in kernel context
  parisc: Allow to reboot machine after system halt
  parisc: Enable LOCKDEP support
  parisc: Add lightweight spinlock checks
  parisc: Use num_present_cpus() in alternative patching code
  parisc: Flush gatt writes and adjust gatt mask in parisc_agp_mask_memory()
  parisc: Improve cache flushing for PCXL in arch_sync_dma_for_cpu()
parents 9828ed3f 61e150fb
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -130,6 +130,10 @@ config PM
config STACKTRACE_SUPPORT
	def_bool y

config LOCKDEP_SUPPORT
	bool
	default y

config ISA_DMA_API
	bool

+11 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
#
config LIGHTWEIGHT_SPINLOCK_CHECK
	bool "Enable lightweight spinlock checks"
	depends on SMP && !DEBUG_SPINLOCK
	default y
	help
	  Add checks with low performance impact to the spinlock functions
	  to catch memory overwrites at runtime. For more advanced
	  spinlock debugging you should choose the DEBUG_SPINLOCK option
	  which will detect unitialized spinlocks too.
	  If unsure say Y here.
+4 −0
Original line number Diff line number Diff line
@@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);

#define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
#define flush_dcache_mmap_lock_irqsave(mapping, flags)		\
		xa_lock_irqsave(&mapping->i_pages, flags)
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags)	\
		xa_unlock_irqrestore(&mapping->i_pages, flags)

#define flush_icache_page(vma,page)	do { 		\
	flush_kernel_dcache_page_addr(page_address(page)); \
+34 −5
Original line number Diff line number Diff line
@@ -7,10 +7,26 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>

#define SPINLOCK_BREAK_INSN	0x0000c006	/* break 6,6 */

static inline void arch_spin_val_check(int lock_val)
{
	if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
		asm volatile(	"andcm,= %0,%1,%%r0\n"
				".word %2\n"
		: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
			"i" (SPINLOCK_BREAK_INSN));
}

static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
	volatile unsigned int *a = __ldcw_align(x);
	return READ_ONCE(*a) == 0;
	volatile unsigned int *a;
	int lock_val;

	a = __ldcw_align(x);
	lock_val = READ_ONCE(*a);
	arch_spin_val_check(lock_val);
	return (lock_val == 0);
}

static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
	volatile unsigned int *a;

	a = __ldcw_align(x);
	while (__ldcw(a) == 0)
	do {
		int lock_val_old;

		lock_val_old = __ldcw(a);
		arch_spin_val_check(lock_val_old);
		if (lock_val_old)
			return;	/* got lock */

		/* wait until we should try to get lock again */
		while (*a == 0)
			continue;
	} while (1);
}

static inline void arch_spin_unlock(arch_spinlock_t *x)
@@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)

	a = __ldcw_align(x);
	/* Release with ordered store. */
	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
	__asm__ __volatile__("stw,ma %0,0(%1)"
		: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
}

static inline int arch_spin_trylock(arch_spinlock_t *x)
{
	volatile unsigned int *a;
	int lock_val;

	a = __ldcw_align(x);
	return __ldcw(a) != 0;
	lock_val = __ldcw(a);
	arch_spin_val_check(lock_val);
	return lock_val != 0;
}

/*
+6 −2
Original line number Diff line number Diff line
@@ -2,13 +2,17 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H

#define __ARCH_SPIN_LOCK_UNLOCKED_VAL	0x1a46

typedef struct {
#ifdef CONFIG_PA20
	volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
#else
	volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED	{ { 1, 1, 1, 1 } }
# define __ARCH_SPIN_LOCK_UNLOCKED	\
	{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
	    __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
#endif
} arch_spinlock_t;

Loading