Commit 21b56c84 authored by Al Viro's avatar Al Viro
Browse files

iov_iter: get rid of separate bvec and xarray callbacks



After the previous commit we have
	* xarray and bvec callbacks idential in all cases
	* both equivalent to kvec callback wrapped into
kmap_local_page()/kunmap_local() pair.

So we can pass only two (iovec and kvec) callbacks to
iterate_and_advance() and let iterate_{bvec,xarray} wrap
it into kmap_local_page()/kunmap_local_page().

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1b4fb5ff
Loading
Loading
Loading
Loading
+30 −82
Original line number Diff line number Diff line
@@ -42,18 +42,20 @@
	while (n) {						\
		unsigned offset = p->bv_offset + skip;		\
		unsigned left;					\
		__v.bv_offset = offset % PAGE_SIZE;		\
		__v.bv_page = p->bv_page + offset / PAGE_SIZE;	\
		__v.bv_len = min(min(n, p->bv_len - skip),	\
		void *kaddr = kmap_local_page(p->bv_page +	\
					offset / PAGE_SIZE);	\
		__v.iov_base = kaddr + offset % PAGE_SIZE;	\
		__v.iov_len = min(min(n, p->bv_len - skip),	\
		     (size_t)(PAGE_SIZE - offset % PAGE_SIZE));	\
		left = (STEP);					\
		__v.bv_len -= left;				\
		skip += __v.bv_len;				\
		kunmap_local(kaddr);				\
		__v.iov_len -= left;				\
		skip += __v.iov_len;				\
		if (skip == p->bv_len) {			\
			skip = 0;				\
			p++;					\
		}						\
		n -= __v.bv_len;				\
		n -= __v.iov_len;				\
		if (left)					\
			break;					\
	}							\
@@ -81,15 +83,16 @@
			break;						\
		for (j = (head->index < index) ? index - head->index : 0; \
		     j < thp_nr_pages(head); j++) {			\
			__v.bv_page = head + j;				\
			offset = (i->xarray_start + skip) & ~PAGE_MASK;	\
			void *kaddr = kmap_local_page(head + j);	\
			offset = (i->xarray_start + skip) % PAGE_SIZE;	\
			__v.iov_base = kaddr + offset;			\
			seg = PAGE_SIZE - offset;			\
			__v.bv_offset = offset;				\
			__v.bv_len = min(n, seg);			\
			__v.iov_len = min(n, seg);			\
			left = (STEP);					\
			__v.bv_len -= left;				\
			n -= __v.bv_len;				\
			skip += __v.bv_len;				\
			kunmap_local(kaddr);				\
			__v.iov_len -= left;				\
			n -= __v.iov_len;				\
			skip += __v.iov_len;				\
			if (left || n == 0)				\
				goto __out;				\
		}							\
@@ -99,7 +102,7 @@ __out: \
	n = wanted - n;						\
}

#define __iterate_and_advance(i, n, v, I, B, K, X) {		\
#define __iterate_and_advance(i, n, v, I, K) {			\
	if (unlikely(i->count < n))				\
		n = i->count;					\
	if (likely(n)) {					\
@@ -112,8 +115,8 @@ __out: \
			i->iov = iov;				\
		} else if (iov_iter_is_bvec(i)) {		\
			const struct bio_vec *bvec = i->bvec;	\
			struct bio_vec v;			\
			iterate_bvec(i, n, v, bvec, skip, (B))	\
			struct kvec v;				\
			iterate_bvec(i, n, v, bvec, skip, (K))	\
			i->nr_segs -= bvec - i->bvec;		\
			i->bvec = bvec;				\
		} else if (iov_iter_is_kvec(i)) {		\
@@ -123,16 +126,15 @@ __out: \
			i->nr_segs -= kvec - i->kvec;		\
			i->kvec = kvec;				\
		} else if (iov_iter_is_xarray(i)) {		\
			struct bio_vec v;			\
			iterate_xarray(i, n, v, skip, (X))	\
			struct kvec v;				\
			iterate_xarray(i, n, v, skip, (K))	\
		}						\
		i->count -= n;					\
		i->iov_offset = skip;				\
	}							\
}
#define iterate_and_advance(i, n, v, I, B, K, X) \
	__iterate_and_advance(i, n, v, I, ((void)(B),0),	\
				((void)(K),0), ((void)(X),0))
#define iterate_and_advance(i, n, v, I, K) \
	__iterate_and_advance(i, n, v, I, ((void)(K),0))

static int copyout(void __user *to, const void *from, size_t n)
{
@@ -612,11 +614,7 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
		might_fault();
	iterate_and_advance(i, bytes, v,
		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
		memcpy_to_page(v.bv_page, v.bv_offset,
			       (from += v.bv_len) - v.bv_len, v.bv_len),
		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
		memcpy_to_page(v.bv_page, v.bv_offset,
			       (from += v.bv_len) - v.bv_len, v.bv_len)
		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
	)

	return bytes;
@@ -714,12 +712,8 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
	__iterate_and_advance(i, bytes, v,
		copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
			   v.iov_len),
		copy_mc_to_page(v.bv_page, v.bv_offset,
				      (from += v.bv_len) - v.bv_len, v.bv_len),
		copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
					- v.iov_len, v.iov_len),
		copy_mc_to_page(v.bv_page, v.bv_offset,
				      (from += v.bv_len) - v.bv_len, v.bv_len)
					- v.iov_len, v.iov_len)
	)

	return bytes;
@@ -738,11 +732,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
		might_fault();
	iterate_and_advance(i, bytes, v,
		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len)
		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
	)

	return bytes;
@@ -759,11 +749,7 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
	iterate_and_advance(i, bytes, v,
		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
					 v.iov_base, v.iov_len),
		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len)
		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
	)

	return bytes;
@@ -795,12 +781,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
	iterate_and_advance(i, bytes, v,
		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
					 v.iov_base, v.iov_len),
		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
			v.iov_len),
		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len)
			v.iov_len)
	)

	return bytes;
@@ -931,9 +913,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
		return pipe_zero(bytes, i);
	iterate_and_advance(i, bytes, v,
		clear_user(v.iov_base, v.iov_len),
		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
		memset(v.iov_base, 0, v.iov_len),
		memzero_page(v.bv_page, v.bv_offset, v.bv_len)
		memset(v.iov_base, 0, v.iov_len)
	)

	return bytes;
@@ -955,11 +935,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
	}
	iterate_and_advance(i, bytes, v,
		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len)
		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
	)
	kunmap_atomic(kaddr);
	return bytes;
@@ -1697,25 +1673,11 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
			off += v.iov_len;
		}
		next ? 0 : v.iov_len;
	}), ({
		char *p = kmap_atomic(v.bv_page);
		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
				      p + v.bv_offset, v.bv_len,
				      sum, off);
		kunmap_atomic(p);
		off += v.bv_len;
	}), ({
		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
				      v.iov_base, v.iov_len,
				      sum, off);
		off += v.iov_len;
	}), ({
		char *p = kmap_atomic(v.bv_page);
		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
				      p + v.bv_offset, v.bv_len,
				      sum, off);
		kunmap_atomic(p);
		off += v.bv_len;
	})
	)
	*csum = sum;
@@ -1749,25 +1711,11 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
			off += v.iov_len;
		}
		next ? 0 : v.iov_len;
	}), ({
		char *p = kmap_atomic(v.bv_page);
		sum = csum_and_memcpy(p + v.bv_offset,
				      (from += v.bv_len) - v.bv_len,
				      v.bv_len, sum, off);
		kunmap_atomic(p);
		off += v.bv_len;
	}), ({
		sum = csum_and_memcpy(v.iov_base,
				     (from += v.iov_len) - v.iov_len,
				     v.iov_len, sum, off);
		off += v.iov_len;
	}), ({
		char *p = kmap_atomic(v.bv_page);
		sum = csum_and_memcpy(p + v.bv_offset,
				      (from += v.bv_len) - v.bv_len,
				      v.bv_len, sum, off);
		kunmap_atomic(p);
		off += v.bv_len;
	})
	)
	csstate->csum = csum_shift(sum, csstate->off);