Commit ef1d88ce authored by Mike Snitzer's avatar Mike Snitzer
Browse files

Merge remote-tracking branch 'jens/for-4.7/core' into dm-4.7

Needed in order to update the DM thinp code to use the new async
__blkdev_issue_discard() interface.
parents 04974df8 0ef5a50c
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing
an IO scheduler name to this file will attempt to load that IO scheduler
module, if it isn't already present in the system.

write_cache (RW)
----------------
When read, this file will display whether the device has write back
caching enabled or not. It will return "write back" for the former
case, and "write through" for the latter. Writing to this file can
change the kernels view of the device, but it doesn't alter the
device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.


Jens Axboe <jens.axboe@oracle.com>, February 2009
+0 −11
Original line number Diff line number Diff line
@@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
	bio_endio(__bio_chain_endio(bio));
}

/*
 * Increment chain count for the bio. Make sure the CHAIN flag update
 * is visible before the raised count.
 */
static inline void bio_inc_remaining(struct bio *bio)
{
	bio_set_flag(bio, BIO_CHAIN);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_remaining);
}

/**
 * bio_chain - chain bio completions
 * @bio: the target bio
+3 −2
Original line number Diff line number Diff line
@@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request);
 * blk_add_request_payload - add a payload to a request
 * @rq: request to update
 * @page: page backing the payload
 * @offset: offset in page
 * @len: length of the payload.
 *
 * This allows to later add a payload to an already submitted request by
@@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request);
 * discard requests should ever use it.
 */
void blk_add_request_payload(struct request *rq, struct page *page,
		unsigned int len)
		int offset, unsigned int len)
{
	struct bio *bio = rq->bio;

	bio->bi_io_vec->bv_page = page;
	bio->bi_io_vec->bv_offset = 0;
	bio->bi_io_vec->bv_offset = offset;
	bio->bi_io_vec->bv_len = len;

	bio->bi_iter.bi_size = len;
+64 −114
Original line number Diff line number Diff line
@@ -9,82 +9,46 @@

#include "blk.h"

struct bio_batch {
	atomic_t		done;
	int			error;
	struct completion	*wait;
};

static void bio_batch_end_io(struct bio *bio)
static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
		gfp_t gfp)
{
	struct bio_batch *bb = bio->bi_private;
	struct bio *new = bio_alloc(gfp, nr_pages);

	if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
		bb->error = bio->bi_error;
	if (atomic_dec_and_test(&bb->done))
		complete(bb->wait);
	bio_put(bio);
	if (bio) {
		bio_chain(bio, new);
		submit_bio(rw, bio);
	}

/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @flags:	BLKDEV_IFL_* flags to control behaviour
 *
 * Description:
 *    Issue a discard request for the sectors in question.
 */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
	return new;
}

int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	int type = REQ_WRITE | REQ_DISCARD;
	struct bio *bio = *biop;
	unsigned int granularity;
	int alignment;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;
	struct blk_plug plug;

	if (!q)
		return -ENXIO;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;
	if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
		return -EOPNOTSUPP;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);
	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;

	if (flags & BLKDEV_DISCARD_SECURE) {
		if (!blk_queue_secdiscard(q))
			return -EOPNOTSUPP;
		type |= REQ_SECURE;
	}

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	blk_start_plug(&plug);
	while (nr_sects) {
		unsigned int req_sects;
		sector_t end_sect, tmp;

		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		/* Make sure bi_size doesn't overflow */
		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);

		/*
		/**
		 * If splitting a request, and the next starting sector would be
		 * misaligned, stop the discard at the previous aligned sector.
		 */
@@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
			req_sects = end_sect - sector;
		}

		bio = next_bio(bio, type, 1, gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;

		bio->bi_iter.bi_size = req_sects << 9;
		nr_sects -= req_sects;
		sector = end_sect;

		atomic_inc(&bb.done);
		submit_bio(type, bio);

		/*
		 * We can loop for a long time in here, if someone does
		 * full device discards (like mkfs). Be nice and allow
@@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		 */
		cond_resched();
	}
	blk_finish_plug(&plug);

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);
	*biop = bio;
	return 0;
}
EXPORT_SYMBOL(__blkdev_issue_discard);

/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @flags:	BLKDEV_IFL_* flags to control behaviour
 *
 * Description:
 *    Issue a discard request for the sectors in question.
 */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	int type = REQ_WRITE | REQ_DISCARD;
	struct bio *bio = NULL;
	struct blk_plug plug;
	int ret;

	if (flags & BLKDEV_DISCARD_SECURE)
		type |= REQ_SECURE;

	blk_start_plug(&plug);
	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
			&bio);
	if (!ret && bio) {
		ret = submit_bio_wait(type, bio);
		if (ret == -EOPNOTSUPP)
			ret = 0;
	}
	blk_finish_plug(&plug);

	if (bb.error)
		return bb.error;
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			    sector_t nr_sects, gfp_t gfp_mask,
			    struct page *page)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
	struct bio_batch bb;
	struct bio *bio;
	struct bio *bio = NULL;
	int ret = 0;

	if (!q)
@@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	max_write_same_sectors = UINT_MAX >> 9;

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	while (nr_sects) {
		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
		}

		atomic_inc(&bb.done);
		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
	if (bio)
		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
	return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);

@@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
				  sector_t nr_sects, gfp_t gfp_mask)
{
	int ret;
	struct bio *bio;
	struct bio_batch bb;
	struct bio *bio = NULL;
	unsigned int sz;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	ret = 0;
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio = next_bio(bio, WRITE,
				min(nr_sects, (sector_t)BIO_MAX_PAGES),
				gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_private = &bb;

		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
			if (ret < (sz << 9))
				break;
		}
		ret = 0;
		atomic_inc(&bb.done);
		submit_bio(WRITE, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
	if (bio)
		return submit_bio_wait(WRITE, bio);
	return 0;
}

/**
+12 −0
Original line number Diff line number Diff line
@@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);

void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
{
	int i;

	for (i = 0; i < tagset->nr_hw_queues; i++) {
		if (tagset->tags && tagset->tags[i])
			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
	}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv)
{
Loading