Commit 98fe2c3c authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong
Browse files

xfs: remove kmem_alloc_io()



Since commit 59bb4798 ("mm, sl[aou]b: guarantee natural alignment
for kmalloc(power-of-two)"), the core slab code now guarantees slab
alignment in all situations sufficient for IO purposes (i.e. minimum
of 512 byte alignment of >= 512 byte sized heap allocations) we no
longer need the workaround in the XFS code to provide this
guarantee.

Replace the use of kmem_alloc_io() with kmem_alloc() or
kmem_alloc_large() appropriately, and remove the kmem_alloc_io()
interface altogether.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent de2860f4
Loading
Loading
Loading
Loading
+0 −25
Original line number Diff line number Diff line
@@ -56,31 +56,6 @@ __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
	return ptr;
}

/*
 * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
 * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
 * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
 * aligned region.
 */
void *
kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
{
	void	*ptr;

	trace_kmem_alloc_io(size, flags, _RET_IP_);

	if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
		align_mask = PAGE_SIZE - 1;

	ptr = kmem_alloc(size, flags | KM_MAYFAIL);
	if (ptr) {
		if (!((uintptr_t)ptr & align_mask))
			return ptr;
		kfree(ptr);
	}
	return __kmem_vmalloc(size, flags);
}

void *
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
{
+0 −1
Original line number Diff line number Diff line
@@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
}

extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
static inline void  kmem_free(const void *ptr)
{
+1 −2
Original line number Diff line number Diff line
@@ -315,7 +315,6 @@ xfs_buf_alloc_kmem(
	struct xfs_buf	*bp,
	xfs_buf_flags_t	flags)
{
	int		align_mask = xfs_buftarg_dma_alignment(bp->b_target);
	xfs_km_flags_t	kmflag_mask = KM_NOFS;
	size_t		size = BBTOB(bp->b_length);

@@ -323,7 +322,7 @@ xfs_buf_alloc_kmem(
	if (!(flags & XBF_READ))
		kmflag_mask |= KM_ZERO;

	bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask);
	bp->b_addr = kmem_alloc(size, kmflag_mask);
	if (!bp->b_addr)
		return -ENOMEM;

+0 −6
Original line number Diff line number Diff line
@@ -355,12 +355,6 @@ extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
#define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)

static inline int
xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
{
	return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
}

int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
+1 −2
Original line number Diff line number Diff line
@@ -1476,7 +1476,6 @@ xlog_alloc_log(
	 */
	ASSERT(log->l_iclog_size >= 4096);
	for (i = 0; i < log->l_iclog_bufs; i++) {
		int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
				sizeof(struct bio_vec);

@@ -1488,7 +1487,7 @@ xlog_alloc_log(
		iclog->ic_prev = prev_iclog;
		prev_iclog = iclog;

		iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
		iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
						KM_MAYFAIL | KM_ZERO);
		if (!iclog->ic_data)
			goto out_free_iclog;
Loading