Commit aaff5eba authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

scsi: remove the unchecked_isa_dma flag



Remove the unchecked_isa_dma now that all users are gone.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20210331073001.46776-6-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9b4c8eaa
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -1095,10 +1095,6 @@ of interest:
		 - maximum number of commands that can be queued on devices
                   controlled by the host. Overridden by LLD calls to
                   scsi_change_queue_depth().
    unchecked_isa_dma
		 - 1=>only use bottom 16 MB of ram (ISA DMA addressing
                   restriction), 0=>can use full 32 bit (or better) DMA
                   address space
    no_async_abort
		 - 1=>Asynchronous aborts are not supported
		 - 0=>Timed-out commands will be aborted asynchronously
+0 −1
Original line number Diff line number Diff line
@@ -249,7 +249,6 @@ static struct scsi_host_template driver_template = {
	.cmd_per_lun			=
		ESAS2R_DEFAULT_CMD_PER_LUN,
	.present			= 0,
	.unchecked_isa_dma		= 0,
	.emulated			= 0,
	.proc_name			= ESAS2R_DRVR_NAME,
	.change_queue_depth		= scsi_change_queue_depth,
+1 −6
Original line number Diff line number Diff line
@@ -371,13 +371,9 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
	struct Scsi_Host *shost;
	gfp_t gfp_mask = GFP_KERNEL;
	int index;

	if (sht->unchecked_isa_dma && privsize)
		gfp_mask |= __GFP_DMA;

	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
	if (!shost)
		return NULL;

@@ -419,7 +415,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
	shost->sg_tablesize = sht->sg_tablesize;
	shost->sg_prot_tablesize = sht->sg_prot_tablesize;
	shost->cmd_per_lun = sht->cmd_per_lun;
	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
	shost->no_write_same = sht->no_write_same;
	shost->host_tagset = sht->host_tagset;

+0 −1
Original line number Diff line number Diff line
@@ -8,7 +8,6 @@
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
static const char *const scsi_cmd_flags[] = {
	SCSI_CMD_FLAG_NAME(TAGGED),
	SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
	SCSI_CMD_FLAG_NAME(INITIALIZED),
};
#undef SCSI_CMD_FLAG_NAME
+5 −47
Original line number Diff line number Diff line
@@ -53,49 +53,16 @@
#endif

static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);

static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);

static inline struct kmem_cache *
scsi_select_sense_cache(bool unchecked_isa_dma)
{
	return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
}

static void scsi_free_sense_buffer(bool unchecked_isa_dma,
				   unsigned char *sense_buffer)
{
	kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
			sense_buffer);
}

static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
	gfp_t gfp_mask, int numa_node)
{
	return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
				     gfp_mask, numa_node);
}

int scsi_init_sense_cache(struct Scsi_Host *shost)
{
	struct kmem_cache *cache;
	int ret = 0;

	mutex_lock(&scsi_sense_cache_mutex);
	cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
	if (cache)
		goto exit;

	if (shost->unchecked_isa_dma) {
		scsi_sense_isadma_cache =
			kmem_cache_create("scsi_sense_cache(DMA)",
				SCSI_SENSE_BUFFERSIZE, 0,
				SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
		if (!scsi_sense_isadma_cache)
			ret = -ENOMEM;
	} else {
	if (!scsi_sense_cache) {
		scsi_sense_cache =
			kmem_cache_create_usercopy("scsi_sense_cache",
				SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
@@ -103,7 +70,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
		if (!scsi_sense_cache)
			ret = -ENOMEM;
	}
 exit:
	mutex_unlock(&scsi_sense_cache_mutex);
	return ret;
}
@@ -1748,15 +1714,12 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
				unsigned int hctx_idx, unsigned int numa_node)
{
	struct Scsi_Host *shost = set->driver_data;
	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
	struct scatterlist *sg;
	int ret = 0;

	if (unchecked_isa_dma)
		cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
	cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
						    GFP_KERNEL, numa_node);
	cmd->sense_buffer =
		kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
	if (!cmd->sense_buffer)
		return -ENOMEM;
	cmd->req.sense = cmd->sense_buffer;
@@ -1770,8 +1733,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
	if (shost->hostt->init_cmd_priv) {
		ret = shost->hostt->init_cmd_priv(shost, cmd);
		if (ret < 0)
			scsi_free_sense_buffer(unchecked_isa_dma,
					       cmd->sense_buffer);
			kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
	}

	return ret;
@@ -1785,8 +1747,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,

	if (shost->hostt->exit_cmd_priv)
		shost->hostt->exit_cmd_priv(shost, cmd);
	scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
			       cmd->sense_buffer);
	kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
}

static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -1821,8 +1782,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
				dma_max_mapping_size(dev) >> SECTOR_SHIFT);
	}
	blk_queue_max_hw_sectors(q, shost->max_sectors);
	if (shost->unchecked_isa_dma)
		blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
	blk_queue_segment_boundary(q, shost->dma_boundary);
	dma_set_seg_boundary(dev, shost->dma_boundary);

@@ -1988,7 +1947,6 @@ EXPORT_SYMBOL(scsi_unblock_requests);
void scsi_exit_queue(void)
{
	kmem_cache_destroy(scsi_sense_cache);
	kmem_cache_destroy(scsi_sense_isadma_cache);
}

/**
Loading