Commit defd94b7 authored by Mike Christie's avatar Mike Christie Committed by James Bottomley
Browse files

[SCSI] seperate max_sectors from max_hw_sectors



- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.

Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.

Signed-off-by: default avatarMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 8b05b773
Loading
Loading
Loading
Loading
+26 −8
Original line number Diff line number Diff line
@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
	q->backing_dev_info.state = 0;
	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
	blk_queue_max_sectors(q, MAX_SECTORS);
	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
	blk_queue_hardsect_size(q, 512);
	blk_queue_dma_alignment(q, 511);
	blk_queue_congestion_threshold(q);
@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
	}

	q->max_sectors = q->max_hw_sectors = max_sectors;
	if (BLK_DEF_MAX_SECTORS > max_sectors)
		q->max_hw_sectors = q->max_sectors = max_sectors;
 	else {
		q->max_sectors = BLK_DEF_MAX_SECTORS;
		q->max_hw_sectors = max_sectors;
	}
}

EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
{
	/* zero is "infinity" */
	t->max_sectors = t->max_hw_sectors =
		min_not_zero(t->max_sectors,b->max_sectors);
	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);

	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
			    struct bio *bio)
{
	unsigned short max_sectors;
	int len;

	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
	if (unlikely(blk_pc_request(req)))
		max_sectors = q->max_hw_sectors;
	else
		max_sectors = q->max_sectors;

	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
		req->flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
			     struct bio *bio)
{
	unsigned short max_sectors;
	int len;

	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
	if (unlikely(blk_pc_request(req)))
		max_sectors = q->max_hw_sectors;
	else
		max_sectors = q->max_sectors;


	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
		req->flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
	struct bio *bio;
	int reading;

	if (len > (q->max_sectors << 9))
	if (len > (q->max_hw_sectors << 9))
		return -EINVAL;
	if (!len || !ubuf)
		return -EINVAL;
@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
{
	struct bio *bio;

	if (len > (q->max_sectors << 9))
	if (len > (q->max_hw_sectors << 9))
		return -EINVAL;
	if (!len || !kbuf)
		return -EINVAL;
+1 −1
Original line number Diff line number Diff line
@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q,
	if (verify_command(file, cmd))
		return -EPERM;

	if (hdr->dxfer_len > (q->max_sectors << 9))
	if (hdr->dxfer_len > (q->max_hw_sectors << 9))
		return -EIO;

	if (hdr->dxfer_len)
+1 −1
Original line number Diff line number Diff line
@@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input)
static void check_for_valid_limits(struct io_restrictions *rs)
{
	if (!rs->max_sectors)
		rs->max_sectors = MAX_SECTORS;
		rs->max_sectors = SAFE_MAX_SECTORS;
	if (!rs->max_phys_segments)
		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
	if (!rs->max_hw_segments)
+1 −1
Original line number Diff line number Diff line
@@ -462,6 +462,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
	req = blk_get_request(sdev->request_queue, write, gfp);
	if (!req)
		goto free_sense;
	req->flags |= REQ_BLOCK_PC | REQ_QUIET;

	if (use_sg)
		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
@@ -477,7 +478,6 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
	req->sense_len = 0;
	req->timeout = timeout;
	req->retries = retries;
	req->flags |= REQ_BLOCK_PC | REQ_QUIET;
	req->end_io_data = sioc;

	sioc->data = privdata;
+11 −9
Original line number Diff line number Diff line
@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
}

static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
			  *page, unsigned int len, unsigned int offset)
			  *page, unsigned int len, unsigned int offset,
			  unsigned short max_sectors)
{
	int retried_segments = 0;
	struct bio_vec *bvec;
@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
	if (bio->bi_vcnt >= bio->bi_max_vecs)
		return 0;

	if (((bio->bi_size + len) >> 9) > q->max_sectors)
	if (((bio->bi_size + len) >> 9) > max_sectors)
		return 0;

	/*
@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
		    unsigned int len, unsigned int offset)
{
	return __bio_add_page(q, bio, page, len, offset);
	return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
}

/**
@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
		 unsigned int offset)
{
	return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
			      len, offset);
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
}

struct bio_map_data {
@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
			break;
		}

		if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
			ret = -EINVAL;
			break;
		}
@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
			/*
			 * sorry...
			 */
			if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
			if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
					    bytes)
				break;

			len -= bytes;
@@ -820,7 +822,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
		if (bytes > len)
			bytes = len;

		if (__bio_add_page(q, bio, virt_to_page(data), bytes,
		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
				    offset) < bytes)
			break;

Loading