Commit 61f4a896 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton
Browse files

hexagon/mm: support __HAVE_ARCH_PTE_SWP_EXCLUSIVE

Let's support __HAVE_ARCH_PTE_SWP_EXCLUSIVE by stealing one bit from the
offset.  This reduces the maximum swap space per file to 16 GiB (was 32
GiB).

While at it, mask the type in __swp_entry().

Link: https://lkml.kernel.org/r/20230113171026.582290-7-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Brian Cain <bcain@quicinc.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 41e0d491
Loading
Loading
Loading
Loading
+31 −6
Original line number Diff line number Diff line
@@ -61,6 +61,9 @@ extern unsigned long empty_zero_page;
 * So we'll put up with a bit of inefficiency for now...
 */

/* We borrow bit 6 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE	(1<<6)

/*
 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
 * only the second from the bottom, pgd and pud both being collapsed.
@@ -359,9 +362,12 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))

/*
 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
 * are !pte_none() && !pte_present().
 *
 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
 * interpreted as swap information.  The remaining free bits are interpreted as
 * swap type/offset tuple.  Rather than have the TLB fill handler test
 * listed below.  Rather than have the TLB fill handler test
 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
 * all zeros for swap entries, which speeds up the miss handler at the cost of
 * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
@@ -371,9 +377,10 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 * Format of swap PTE:
 *	bit	0:	Present (zero)
 *	bits	1-5:	swap type (arch independent layer uses 5 bits max)
 *	bits	6-9:	bits 3:0 of offset
 *	bit	6:	exclusive marker
 *	bits	7-9:	bits 2:0 of offset
 *	bits	10-12:	effectively _PAGE_PROTNONE (all zero)
 *	bits	13-31:  bits 22:4 of swap offset
 *	bits	13-31:  bits 21:3 of swap offset
 *
 * The split offset makes some of the following macros a little gnarly,
 * but there's plenty of precedent for this sort of thing.
@@ -383,11 +390,29 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define __swp_type(swp_pte)		(((swp_pte).val >> 1) & 0x1f)

#define __swp_offset(swp_pte) \
	((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
	((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8))

#define __swp_entry(type, offset) \
	((swp_entry_t)	{ \
		((type << 1) | \
		 ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
		(((type & 0x1f) << 1) | \
		 ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })

#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte)
{
	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}

static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
	return pte;
}

static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
	return pte;
}

#endif