Loading include/linux/mm.h +2 −0 Original line number Diff line number Diff line Loading @@ -796,6 +796,8 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); Loading mm/truncate.c +20 −6 Original line number Diff line number Diff line Loading @@ -146,6 +146,24 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) return truncate_complete_page(mapping, page); } /* * Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. The page must be locked. * * Returns 1 if the page is successfully invalidated, otherwise 0. */ int invalidate_inode_page(struct page *page) { struct address_space *mapping = page_mapping(page); if (!mapping) return 0; if (PageDirty(page) || PageWriteback(page)) return 0; if (page_mapped(page)) return 0; return invalidate_complete_page(mapping, page); } /** * truncate_inode_pages - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate Loading Loading @@ -312,12 +330,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (lock_failed) continue; if (PageDirty(page) || PageWriteback(page)) goto unlock; if (page_mapped(page)) goto unlock; ret += invalidate_complete_page(mapping, page); unlock: ret += invalidate_inode_page(page); unlock_page(page); if (next > end) break; Loading Loading
include/linux/mm.h +2 −0 Original line number Diff line number Diff line Loading @@ -796,6 +796,8 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); Loading
mm/truncate.c +20 −6 Original line number Diff line number Diff line Loading @@ -146,6 +146,24 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) return truncate_complete_page(mapping, page); } /* * Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. The page must be locked. * * Returns 1 if the page is successfully invalidated, otherwise 0. */ int invalidate_inode_page(struct page *page) { struct address_space *mapping = page_mapping(page); if (!mapping) return 0; if (PageDirty(page) || PageWriteback(page)) return 0; if (page_mapped(page)) return 0; return invalidate_complete_page(mapping, page); } /** * truncate_inode_pages - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate Loading Loading @@ -312,12 +330,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (lock_failed) continue; if (PageDirty(page) || PageWriteback(page)) goto unlock; if (page_mapped(page)) goto unlock; ret += invalidate_complete_page(mapping, page); unlock: ret += invalidate_inode_page(page); unlock_page(page); if (next > end) break; Loading