Commit 5d8edfb9 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

iomap: Copy larger chunks from userspace



If we have a large folio, we can copy in larger chunks than PAGE_SIZE.
Start at the maximum page cache size and shrink by half every time we
hit the "we are short on memory" problem.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent d6bb59a9
Loading
Loading
Loading
Loading
+17 −15
Original line number Diff line number Diff line
@@ -769,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
	loff_t length = iomap_length(iter);
	size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
	loff_t pos = iter->pos;
	ssize_t written = 0;
	long status = 0;
@@ -777,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)

	do {
		struct folio *folio;
		struct page *page;
		unsigned long offset;	/* Offset into pagecache page */
		unsigned long bytes;	/* Bytes to write to page */
		size_t offset;		/* Offset into folio */
		size_t bytes;		/* Bytes to write to folio */
		size_t copied;		/* Bytes copied from user */

		offset = offset_in_page(pos);
		bytes = min_t(unsigned long, PAGE_SIZE - offset,
						iov_iter_count(i));
again:
		offset = pos & (chunk - 1);
		bytes = min(chunk - offset, iov_iter_count(i));
		status = balance_dirty_pages_ratelimited_flags(mapping,
							       bdp_flags);
		if (unlikely(status))
@@ -815,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
		if (iter->iomap.flags & IOMAP_F_STALE)
			break;

		page = folio_file_page(folio, pos >> PAGE_SHIFT);
		if (mapping_writably_mapped(mapping))
			flush_dcache_page(page);
		offset = offset_in_folio(folio, pos);
		if (bytes > folio_size(folio) - offset)
			bytes = folio_size(folio) - offset;

		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
		if (mapping_writably_mapped(mapping))
			flush_dcache_folio(folio);

		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
		status = iomap_write_end(iter, pos, bytes, copied, folio);

		if (unlikely(copied != status))
@@ -836,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
			 */
			if (copied)
				bytes = copied;
			goto again;
		}
			if (chunk > PAGE_SIZE)
				chunk /= 2;
		} else {
			pos += status;
			written += status;
			length -= status;
		}
	} while (iov_iter_count(i) && length);

	if (status == -EAGAIN) {