Loading mm/truncate.c +6 −9 Original line number Diff line number Diff line Loading @@ -33,15 +33,12 @@ static inline void __clear_shadow_entry(struct address_space *mapping, pgoff_t index, void *entry) { struct radix_tree_node *node; void **slot; XA_STATE(xas, &mapping->i_pages, index); if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot)) xas_set_update(&xas, workingset_update_node); if (xas_load(&xas) != entry) return; if (*slot != entry) return; __radix_tree_replace(&mapping->i_pages, node, slot, NULL, workingset_update_node); xas_store(&xas, NULL); mapping->nrexceptional--; } Loading Loading @@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping, index++; } /* * For DAX we invalidate page tables after invalidating radix tree. We * For DAX we invalidate page tables after invalidating page cache. We * could invalidate page tables while invalidating each entry however * that would be expensive. And doing range unmapping before doesn't * work as we have no cheap way to find whether radix tree entry didn't * work as we have no cheap way to find whether page cache entry didn't * get remapped later. */ if (dax_mapping(mapping)) { Loading Loading
mm/truncate.c +6 −9 Original line number Diff line number Diff line Loading @@ -33,15 +33,12 @@ static inline void __clear_shadow_entry(struct address_space *mapping, pgoff_t index, void *entry) { struct radix_tree_node *node; void **slot; XA_STATE(xas, &mapping->i_pages, index); if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot)) xas_set_update(&xas, workingset_update_node); if (xas_load(&xas) != entry) return; if (*slot != entry) return; __radix_tree_replace(&mapping->i_pages, node, slot, NULL, workingset_update_node); xas_store(&xas, NULL); mapping->nrexceptional--; } Loading Loading @@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping, index++; } /* * For DAX we invalidate page tables after invalidating radix tree. We * For DAX we invalidate page tables after invalidating page cache. We * could invalidate page tables while invalidating each entry however * that would be expensive. And doing range unmapping before doesn't * work as we have no cheap way to find whether radix tree entry didn't * work as we have no cheap way to find whether page cache entry didn't * get remapped later. */ if (dax_mapping(mapping)) { Loading