Loading mm/huge_memory.c +7 −10 Original line number Diff line number Diff line Loading @@ -2443,13 +2443,13 @@ static void __split_huge_page(struct page *page, struct list_head *list, ClearPageCompound(head); /* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { /* Additional pin to radix tree of swap cache */ /* Additional pin to swap cache */ if (PageSwapCache(head)) page_ref_add(head, 2); else page_ref_inc(head); } else { /* Additional pin to radix tree */ /* Additional pin to page cache */ page_ref_add(head, 2); xa_unlock(&head->mapping->i_pages); } Loading Loading @@ -2561,7 +2561,7 @@ bool can_split_huge_page(struct page *page, int *pextra_pins) { int extra_pins; /* Additional pins from radix tree */ /* Additional pins from page cache */ if (PageAnon(page)) extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; else Loading Loading @@ -2657,17 +2657,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); if (mapping) { void **pslot; XA_STATE(xas, &mapping->i_pages, page_index(head)); xa_lock(&mapping->i_pages); pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(head)); /* * Check if the head page is present in radix tree. * Check if the head page is present in page cache. * We assume all tail are present too, if head is there. */ if (radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != head) xa_lock(&mapping->i_pages); if (xas_load(&xas) != head) goto fail; } Loading Loading
mm/huge_memory.c +7 −10 Original line number Diff line number Diff line Loading @@ -2443,13 +2443,13 @@ static void __split_huge_page(struct page *page, struct list_head *list, ClearPageCompound(head); /* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { /* Additional pin to radix tree of swap cache */ /* Additional pin to swap cache */ if (PageSwapCache(head)) page_ref_add(head, 2); else page_ref_inc(head); } else { /* Additional pin to radix tree */ /* Additional pin to page cache */ page_ref_add(head, 2); xa_unlock(&head->mapping->i_pages); } Loading Loading @@ -2561,7 +2561,7 @@ bool can_split_huge_page(struct page *page, int *pextra_pins) { int extra_pins; /* Additional pins from radix tree */ /* Additional pins from page cache */ if (PageAnon(page)) extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; else Loading Loading @@ -2657,17 +2657,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); if (mapping) { void **pslot; XA_STATE(xas, &mapping->i_pages, page_index(head)); xa_lock(&mapping->i_pages); pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(head)); /* * Check if the head page is present in radix tree. * Check if the head page is present in page cache. * We assume all tail are present too, if head is there. */ if (radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != head) xa_lock(&mapping->i_pages); if (xas_load(&xas) != head) goto fail; } Loading