Commit ce288e05 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove BLK_BOUNCE_ISA support



Remove the BLK_BOUNCE_ISA support now that all users are gone.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20210331073001.46776-7-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent aaff5eba
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -204,7 +204,6 @@ bool bio_integrity_prep(struct bio *bio)
{
	struct bio_integrity_payload *bip;
	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
	void *buf;
	unsigned long start, end;
	unsigned int len, nr_pages;
@@ -238,7 +237,7 @@ bool bio_integrity_prep(struct bio *bio)

	/* Allocate kernel buffer for protection data */
	len = intervals * bi->tuple_size;
	buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
	buf = kmalloc(len, GFP_NOIO);
	status = BLK_STS_RESOURCE;
	if (unlikely(buf == NULL)) {
		printk(KERN_ERR "could not allocate integrity buffer\n");
+2 −2
Original line number Diff line number Diff line
@@ -181,7 +181,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,

			i++;
		} else {
			page = alloc_page(rq->q->bounce_gfp | gfp_mask);
			page = alloc_page(GFP_NOIO | gfp_mask);
			if (!page) {
				ret = -ENOMEM;
				goto cleanup;
@@ -486,7 +486,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
		if (bytes > len)
			bytes = len;

		page = alloc_page(q->bounce_gfp | gfp_mask);
		page = alloc_page(GFP_NOIO | gfp_mask);
		if (!page)
			goto cleanup;

+0 −11
Original line number Diff line number Diff line
@@ -103,28 +103,17 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
{
	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
	int dma = 0;

	q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
	/*
	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
	 * some IOMMUs can handle everything, but I don't know of a
	 * way to test this here.
	 */
	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
		dma = 1;
	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
#else
	if (b_pfn < blk_max_low_pfn)
		dma = 1;
	q->limits.bounce_pfn = b_pfn;
#endif
	if (dma) {
		init_emergency_isa_pool();
		q->bounce_gfp = GFP_NOIO | GFP_DMA;
		q->limits.bounce_pfn = b_pfn;
	}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);

+0 −5
Original line number Diff line number Diff line
@@ -312,13 +312,8 @@ static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
#endif

#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
+26 −98
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@
#define ISA_POOL_SIZE	16

static struct bio_set bounce_bio_set, bounce_bio_split;
static mempool_t page_pool, isa_page_pool;
static mempool_t page_pool;

static void init_bounce_bioset(void)
{
@@ -89,41 +89,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)

#endif /* CONFIG_HIGHMEM */

/*
 * allocate pages in the DMA region for the ISA pool
 */
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}

static DEFINE_MUTEX(isa_mutex);

/*
 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
 * as the max address, so check if the pool has already been created.
 */
int init_emergency_isa_pool(void)
{
	int ret;

	mutex_lock(&isa_mutex);

	if (mempool_initialized(&isa_page_pool)) {
		mutex_unlock(&isa_mutex);
		return 0;
	}

	ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
			   mempool_free_pages, (void *) 0);
	BUG_ON(ret);

	pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
	init_bounce_bioset();
	mutex_unlock(&isa_mutex);
	return 0;
}

/*
 * Simple bounce buffer support for highmem pages. Depending on the
 * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -159,7 +124,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
	}
}

static void bounce_end_io(struct bio *bio, mempool_t *pool)
static void bounce_end_io(struct bio *bio)
{
	struct bio *bio_orig = bio->bi_private;
	struct bio_vec *bvec, orig_vec;
@@ -173,7 +138,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
		orig_vec = bio_iter_iovec(bio_orig, orig_iter);
		if (bvec->bv_page != orig_vec.bv_page) {
			dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
			mempool_free(bvec->bv_page, pool);
			mempool_free(bvec->bv_page, &page_pool);
		}
		bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
	}
@@ -185,33 +150,17 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)

static void bounce_end_io_write(struct bio *bio)
{
	bounce_end_io(bio, &page_pool);
	bounce_end_io(bio);
}

static void bounce_end_io_write_isa(struct bio *bio)
{

	bounce_end_io(bio, &isa_page_pool);
}

static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
static void bounce_end_io_read(struct bio *bio)
{
	struct bio *bio_orig = bio->bi_private;

	if (!bio->bi_status)
		copy_to_high_bio_irq(bio_orig, bio);

	bounce_end_io(bio, pool);
}

static void bounce_end_io_read(struct bio *bio)
{
	__bounce_end_io_read(bio, &page_pool);
}

static void bounce_end_io_read_isa(struct bio *bio)
{
	__bounce_end_io_read(bio, &isa_page_pool);
	bounce_end_io(bio);
}

static struct bio *bounce_clone_bio(struct bio *bio_src)
@@ -287,8 +236,8 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
	return NULL;
}

static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
			       mempool_t *pool)

void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
	struct bio *bio;
	int rw = bio_data_dir(*bio_orig);
@@ -298,6 +247,20 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
	bool bounce = false;
	int sectors = 0;

	/*
	 * Data-less bio, nothing to bounce
	 */
	if (!bio_has_data(*bio_orig))
		return;

	/*
	 * Just check if the bounce pfn is equal to or bigger than the highest
	 * pfn in the system -- in that case, don't waste time iterating over
	 * bio segments
	 */
	if (q->limits.bounce_pfn >= blk_max_pfn)
		return;

	bio_for_each_segment(from, *bio_orig, iter) {
		if (i++ < BIO_MAX_VECS)
			sectors += from.bv_len >> 9;
@@ -327,7 +290,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
		if (page_to_pfn(page) <= q->limits.bounce_pfn)
			continue;

		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
		to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
		inc_zone_page_state(to->bv_page, NR_BOUNCE);

		if (rw == WRITE) {
@@ -346,46 +309,11 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,

	bio->bi_flags |= (1 << BIO_BOUNCED);

	if (pool == &page_pool) {
		bio->bi_end_io = bounce_end_io_write;
	if (rw == READ)
		bio->bi_end_io = bounce_end_io_read;
	} else {
		bio->bi_end_io = bounce_end_io_write_isa;
		if (rw == READ)
			bio->bi_end_io = bounce_end_io_read_isa;
	}
	else
		bio->bi_end_io = bounce_end_io_write;

	bio->bi_private = *bio_orig;
	*bio_orig = bio;
}

void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
	mempool_t *pool;

	/*
	 * Data-less bio, nothing to bounce
	 */
	if (!bio_has_data(*bio_orig))
		return;

	/*
	 * for non-isa bounce case, just check if the bounce pfn is equal
	 * to or bigger than the highest pfn in the system -- in that case,
	 * don't waste time iterating over bio segments
	 */
	if (!(q->bounce_gfp & GFP_DMA)) {
		if (q->limits.bounce_pfn >= blk_max_pfn)
			return;
		pool = &page_pool;
	} else {
		BUG_ON(!mempool_initialized(&isa_page_pool));
		pool = &isa_page_pool;
	}

	/*
	 * slow path
	 */
	__blk_queue_bounce(q, bio_orig, pool);
}
Loading