Unverified Commit 97f98c39 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14826 v2 mm: hugetlb: independent PMD page table shared count

parents 130de291 04ac3249
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -138,7 +138,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long *addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -171,8 +172,8 @@ static inline unsigned long hugetlb_total_pages(void)
	return 0;
}

static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
					pte_t *ptep)
static inline int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
					unsigned long *addr, pte_t *ptep)
{
	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -2104,6 +2104,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page->pmd_huge_pte = NULL;
#endif
	page_pmd_pts_init(page);
	return ptlock_init(page);
}

+29 −0
Original line number Diff line number Diff line
@@ -145,6 +145,9 @@ struct page {
			union {
				struct mm_struct *pt_mm; /* x86 pgds only */
				atomic_t pt_frag_refcount; /* powerpc */
#if defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && !defined(__GENKSYMS__)
				atomic_t pt_share_count;
#endif
			};
#if ALLOC_SPLIT_PTLOCKS
			spinlock_t *ptl;
@@ -209,6 +212,32 @@ struct page {
#endif
} _struct_page_alignment;

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static inline void page_pmd_pts_init(struct page *page)
{
	atomic_set(&page->pt_share_count, 0);
}

static inline void page_pmd_pts_inc(struct page *page)
{
	atomic_inc(&page->pt_share_count);
}

static inline void page_pmd_pts_dec(struct page *page)
{
	atomic_dec(&page->pt_share_count);
}

static inline int page_pmd_pts_count(struct page *page)
{
	return atomic_read(&page->pt_share_count);
}
#else
static inline void page_pmd_pts_init(struct page *page)
{
}
#endif

#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)

+13 −13
Original line number Diff line number Diff line
@@ -5054,7 +5054,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
			continue;

		ptl = huge_pte_lock(h, mm, ptep);
		if (huge_pmd_unshare(mm, &address, ptep)) {
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
			spin_unlock(ptl);
			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
			force_flush = true;
@@ -6126,7 +6126,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
		if (!ptep)
			continue;
		ptl = huge_pte_lock(h, mm, ptep);
		if (huge_pmd_unshare(mm, &address, ptep)) {
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
			pages++;
			spin_unlock(ptl);
			shared_pmd = true;
@@ -6444,7 +6444,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
			spte = huge_pte_offset(svma->vm_mm, saddr,
					       vma_mmu_pagesize(svma));
			if (spte) {
				get_page(virt_to_page(spte));
				page_pmd_pts_inc(virt_to_page(spte));
				break;
			}
		}
@@ -6459,7 +6459,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
				(pmd_t *)((unsigned long)spte & PAGE_MASK));
		mm_inc_nr_pmds(mm);
	} else {
		put_page(virt_to_page(spte));
		page_pmd_pts_dec(virt_to_page(spte));
	}
	spin_unlock(ptl);
out:
@@ -6471,27 +6471,26 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
/*
 * unmap huge page backed by shared pte.
 *
 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
 * indicated by page_count > 1, unmap is achieved by clearing pud and
 * decrementing the ref count. If count == 1, the pte page is not shared.
 *
 * called with page table lock held.
 *
 * returns: 1 successfully unmapped a shared pte page
 *	    0 the underlying pte page is not shared, or it is the last user
 */
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
		     unsigned long *addr, pte_t *ptep)
{
	unsigned long sz = huge_page_size(hstate_vma(vma));
	pgd_t *pgd = pgd_offset(mm, *addr);
	p4d_t *p4d = p4d_offset(pgd, *addr);
	pud_t *pud = pud_offset(p4d, *addr);

	BUG_ON(page_count(virt_to_page(ptep)) == 0);
	if (page_count(virt_to_page(ptep)) == 1)
	if (sz != PMD_SIZE)
		return 0;
	if (!page_pmd_pts_count(virt_to_page(ptep)))
		return 0;

	pud_clear(pud);
	put_page(virt_to_page(ptep));
	page_pmd_pts_dec(virt_to_page(ptep));
	mm_dec_nr_pmds(mm);
	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
	return 1;
@@ -6503,7 +6502,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
	return NULL;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long *addr, pte_t *ptep)
{
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -1449,7 +1449,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
		address = pvmw.address;

		if (PageHuge(page)) {
			if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				/*
				 * huge_pmd_unshare unmapped an entire PMD
				 * page.  There is no way of knowing exactly