Commit c436500d authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/mte' into for-next/core

* for-next/mte:
  arm64: kasan: Revert "arm64: mte: reset the page tag in page->flags"
  mm: kasan: Skip page unpoisoning only if __GFP_SKIP_KASAN_UNPOISON
  mm: kasan: Skip unpoisoning of user pages
  mm: kasan: Ensure the tags are visible before the tag in page->flags
parents 03939cf0 20794545
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void)
		unsigned long pfn = xa_state.xa_index;
		struct page *page = pfn_to_online_page(pfn);

		/*
		 * It is not required to invoke page_kasan_tag_reset(page)
		 * at this point since the tags stored in page->flags are
		 * already restored.
		 */
		mte_restore_page_tags(page_address(page), tags);

		mte_free_tag_storage(tags);
+0 −9
Original line number Diff line number Diff line
@@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
	if (!pte_is_tagged)
		return;

	page_kasan_tag_reset(page);
	/*
	 * We need smp_wmb() in between setting the flags and clearing the
	 * tags because if another thread reads page->flags and builds a
	 * tagged address out of it, there is an actual dependency to the
	 * memory access, but on the current thread we do not guarantee that
	 * the new page->flags are visible before the tags were updated.
	 */
	smp_wmb();
	mte_clear_page_tags(page_address(page));
}

+0 −9
Original line number Diff line number Diff line
@@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)

	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
		set_bit(PG_mte_tagged, &to->flags);
		page_kasan_tag_reset(to);
		/*
		 * We need smp_wmb() in between setting the flags and clearing the
		 * tags because if another thread reads page->flags and builds a
		 * tagged address out of it, there is an actual dependency to the
		 * memory access, but on the current thread we do not guarantee that
		 * the new page->flags are visible before the tags were updated.
		 */
		smp_wmb();
		mte_copy_page_tags(kto, kfrom);
	}
}
+0 −1
Original line number Diff line number Diff line
@@ -927,6 +927,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
void tag_clear_highpage(struct page *page)
{
	mte_zero_clear_page_tags(page_address(page));
	page_kasan_tag_reset(page);
	set_bit(PG_mte_tagged, &page->flags);
}
+0 −9
Original line number Diff line number Diff line
@@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
	if (!tags)
		return false;

	page_kasan_tag_reset(page);
	/*
	 * We need smp_wmb() in between setting the flags and clearing the
	 * tags because if another thread reads page->flags and builds a
	 * tagged address out of it, there is an actual dependency to the
	 * memory access, but on the current thread we do not guarantee that
	 * the new page->flags are visible before the tags were updated.
	 */
	smp_wmb();
	mte_restore_page_tags(page_address(page), tags);

	return true;
Loading