Commit 8dab5241 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds
Browse files

Rework ptep_set_access_flags and fix sun4c



Some changes done a while ago to avoid pounding on ptep_set_access_flags and
update_mmu_cache in some race situations break sun4c which requires
update_mmu_cache() to always be called on minor faults.

This patch reworks ptep_set_access_flags() semantics, implementations and
callers so that it's now responsible for returning whether an update is
necessary or not (basically whether the PTE actually changed).  This allow
fixing the sparc implementation to always return 1 on sun4c.

[akpm@linux-foundation.org: fixes, cleanups]
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: David Miller <davem@davemloft.net>
Cc: Mark Fortescue <mark@mtfhpc.demon.co.uk>
Acked-by: default avatarWilliam Lee Irwin III <wli@holomorphy.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 679ce0ac
Loading
Loading
Loading
Loading
+12 −5
Original line number Diff line number Diff line
@@ -27,13 +27,20 @@ do { \
 * Largely same as above, but only sets the access flags (dirty,
 * accessed, and writable). Furthermore, we know it always gets set
 * to a "more permissive" setting, which allows most architectures
 * to optimize this.
 * to optimize this. We return whether the PTE actually changed, which
 * in turn instructs the caller to do things like update__mmu_cache.
 * This used to be done in the caller, but sparc needs minor faults to
 * force that call on sun4c so we changed this macro slightly
 */
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
do {				  					  \
({									  \
	int __changed = !pte_same(*(__ptep), __entry);			  \
	if (__changed) {						  \
		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
		flush_tlb_page(__vma, __address);			  \
} while (0)
	}								  \
	__changed;							  \
})
#endif

#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+5 −3
Original line number Diff line number Diff line
@@ -285,13 +285,15 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 */
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
do {									\
	if (dirty) {							\
({									\
	int __changed = !pte_same(*(ptep), entry);			\
	if (__changed && dirty) {					\
		(ptep)->pte_low = (entry).pte_low;			\
		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
		flush_tlb_page(vma, address);				\
	}								\
} while (0)
	__changed;							\
})

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define ptep_test_and_clear_dirty(vma, addr, ptep) ({			\
+16 −9
Original line number Diff line number Diff line
@@ -534,15 +534,22 @@ extern void lazy_mmu_prot_update (pte_t pte);
 */
#ifdef CONFIG_SMP
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
do {											\
	if (__safely_writable) {							\
({									\
	int __changed = !pte_same(*(__ptep), __entry);			\
	if (__changed && __safely_writable) {				\
		set_pte(__ptep, __entry);				\
		flush_tlb_page(__vma, __addr);				\
	}								\
} while (0)
	__changed;							\
})
#else
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
	ptep_establish(__vma, __addr, __ptep, __entry)
({									\
	int __changed = !pte_same(*(__ptep), __entry);			\
	if (__changed)							\
		ptep_establish(__vma, __addr, __ptep, __entry);		\
	__changed;							\
})
#endif

#  ifdef CONFIG_VIRTUAL_MEM_MAP
+8 −4
Original line number Diff line number Diff line
@@ -673,10 +673,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
}

#define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
	do {								   \
({									   \
	int __changed = !pte_same(*(__ptep), __entry);			   \
	if (__changed) {						   \
		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \
		flush_tlb_page_nohash(__vma, __address);		   \
	} while(0)
	}								   \
	__changed;							   \
})

/*
 * Macro to mark a page protection value as "uncacheable".
+8 −4
Original line number Diff line number Diff line
@@ -413,10 +413,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
	:"cc");
}
#define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
	do {								   \
({									   \
	int __changed = !pte_same(*(__ptep), __entry);			   \
	if (__changed) {						   \
		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \
		flush_tlb_page_nohash(__vma, __address);		   \
	} while(0)
	}								   \
	__changed;							   \
})

/*
 * Macro to mark a page protection value as "uncacheable".
Loading