Loading fs/dax.c +21 −43 Original line number Diff line number Diff line Loading @@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, */ struct page *dax_layout_busy_page(struct address_space *mapping) { pgoff_t indices[PAGEVEC_SIZE]; XA_STATE(xas, &mapping->i_pages, 0); void *entry; unsigned int scanned = 0; struct page *page = NULL; struct pagevec pvec; pgoff_t index, end; unsigned i; /* * In the 'limited' case get_user_pages() for dax is disabled. Loading @@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping) if (!dax_mapping(mapping) || !mapping_mapped(mapping)) return NULL; pagevec_init(&pvec); index = 0; end = -1; /* * If we race get_user_pages_fast() here either we'll see the * elevated page count in the pagevec_lookup and wait, or * elevated page count in the iteration and wait, or * get_user_pages_fast() will see that the page it took a reference * against is no longer mapped in the page tables and bail to the * get_user_pages() slow path. The slow path is protected by Loading @@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping) */ unmap_mapping_range(mapping, 0, 0, 1); while (index < end && pagevec_lookup_entries(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *pvec_ent = pvec.pages[i]; void *entry; index = indices[i]; if (index >= end) break; if (WARN_ON_ONCE(!xa_is_value(pvec_ent))) xas_lock_irq(&xas); xas_for_each(&xas, entry, ULONG_MAX) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; xa_lock_irq(&mapping->i_pages); entry = get_unlocked_mapping_entry(mapping, index, NULL); if (unlikely(dax_is_locked(entry))) entry = get_unlocked_entry(&xas); if (entry) page = dax_busy_page(entry); put_unlocked_mapping_entry(mapping, index, entry); xa_unlock_irq(&mapping->i_pages); put_unlocked_entry(&xas, entry); if (page) break; } /* * We don't expect normal struct page entries to exist in our * tree, but we keep these pagevec calls so that this code is * consistent with the common pattern for handling pagevecs * throughout the kernel. */ pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); index++; if (++scanned % XA_CHECK_SCHED) continue; if (page) break; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page); Loading Loading
fs/dax.c +21 −43 Original line number Diff line number Diff line Loading @@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, */ struct page *dax_layout_busy_page(struct address_space *mapping) { pgoff_t indices[PAGEVEC_SIZE]; XA_STATE(xas, &mapping->i_pages, 0); void *entry; unsigned int scanned = 0; struct page *page = NULL; struct pagevec pvec; pgoff_t index, end; unsigned i; /* * In the 'limited' case get_user_pages() for dax is disabled. Loading @@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping) if (!dax_mapping(mapping) || !mapping_mapped(mapping)) return NULL; pagevec_init(&pvec); index = 0; end = -1; /* * If we race get_user_pages_fast() here either we'll see the * elevated page count in the pagevec_lookup and wait, or * elevated page count in the iteration and wait, or * get_user_pages_fast() will see that the page it took a reference * against is no longer mapped in the page tables and bail to the * get_user_pages() slow path. The slow path is protected by Loading @@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping) */ unmap_mapping_range(mapping, 0, 0, 1); while (index < end && pagevec_lookup_entries(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *pvec_ent = pvec.pages[i]; void *entry; index = indices[i]; if (index >= end) break; if (WARN_ON_ONCE(!xa_is_value(pvec_ent))) xas_lock_irq(&xas); xas_for_each(&xas, entry, ULONG_MAX) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; xa_lock_irq(&mapping->i_pages); entry = get_unlocked_mapping_entry(mapping, index, NULL); if (unlikely(dax_is_locked(entry))) entry = get_unlocked_entry(&xas); if (entry) page = dax_busy_page(entry); put_unlocked_mapping_entry(mapping, index, entry); xa_unlock_irq(&mapping->i_pages); put_unlocked_entry(&xas, entry); if (page) break; } /* * We don't expect normal struct page entries to exist in our * tree, but we keep these pagevec calls so that this code is * consistent with the common pattern for handling pagevecs * throughout the kernel. */ pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); index++; if (++scanned % XA_CHECK_SCHED) continue; if (page) break; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page); Loading