Commit 5237dd97 authored by Jinjiang Tu's avatar Jinjiang Tu
Browse files

mm/tlbbatch: fix kabi change

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I7U78A


CVE: NA

--------------------------------

fix kabi change for mm_struct->tlb_flush_batched
and task_struct->tlb_ubc.

Signed-off-by: default avatarJinjiang Tu <tujinjiang@huawei.com>
parent 37f0c78d
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -598,7 +598,7 @@ struct mm_struct {
		 * moving a PROT_NONE or PROT_NUMA mapped page.
		 */
		atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && !defined(CONFIG_ARM64)
		/* See flush_tlb_batched_pending() */
		bool tlb_flush_batched;
#endif
@@ -620,6 +620,8 @@ struct mm_struct {

#if defined(CONFIG_X86_64)
	KABI_USE(1, struct mm_struct_extend *mm_extend)
#elif defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64)
	KABI_USE(1, bool tlb_flush_batched)
#else
	KABI_RESERVE(1)
#endif
+24 −1
Original line number Diff line number Diff line
@@ -74,7 +74,7 @@ struct page_frag {

/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && !defined(CONFIG_ARM64)
	/*
	 * The arch code makes the following promise: generic code can modify a
	 * PTE, then call arch_tlbbatch_add_pending() (which internally provides
@@ -96,4 +96,27 @@ struct tlbflush_unmap_batch {
#endif
};

#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64)
struct tlbflush_unmap_batch_arm64 {
	/*
	 * The arch code makes the following promise: generic code can modify a
	 * PTE, then call arch_tlbbatch_add_pending() (which internally provides
	 * all needed barriers), then call arch_tlbbatch_flush(), and the entries
	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
	 * returns.
	 */
	struct arch_tlbflush_unmap_batch arch;

	/* True if a flush is needed. */
	bool flush_required;

	/*
	 * If true then the PTE was dirty when unmapped. The entry must be
	 * flushed before IO is initiated or a stale TLB entry potentially
	 * allows an update without redirtying the page.
	 */
	bool writable;
};
#endif

#endif /* _LINUX_MM_TYPES_TASK_H */
+3 −0
Original line number Diff line number Diff line
@@ -685,6 +685,9 @@ struct task_struct_resvd {
#ifdef CONFIG_MMU
	struct timer_list	oom_reaper_timer;
#endif
#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64)
	struct tlbflush_unmap_batch_arm64       tlb_ubc;
#endif
};

struct task_struct {
+11 −3
Original line number Diff line number Diff line
@@ -596,6 +596,14 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
}

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH

#ifdef CONFIG_ARM64
#define DEFINE_TLB_UBC(name) struct tlbflush_unmap_batch_arm64 *name = \
				&current->_resvd->tlb_ubc
#else
#define DEFINE_TLB_UBC(name) struct tlbflush_unmap_batch *name = &current->tlb_ubc
#endif

/*
 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 * important if a PTE was dirty when it was unmapped that it's flushed
@@ -604,7 +612,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 */
void try_to_unmap_flush(void)
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
	DEFINE_TLB_UBC(tlb_ubc);

	if (!tlb_ubc->flush_required)
		return;
@@ -617,7 +625,7 @@ void try_to_unmap_flush(void)
/* Flush iff there are potentially writable TLB entries that can race with IO */
void try_to_unmap_flush_dirty(void)
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
	DEFINE_TLB_UBC(tlb_ubc);

	if (tlb_ubc->writable)
		try_to_unmap_flush();
@@ -626,7 +634,7 @@ void try_to_unmap_flush_dirty(void)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
						unsigned long uaddr)
{
	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
	DEFINE_TLB_UBC(tlb_ubc);

	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
	tlb_ubc->flush_required = true;