Unverified Commit 516fa3da authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!2552 scsi: mpt3sas: Driver patch set for openEuler-22.03-LTS

Merge Pull Request from: @ci-robot 
 
PR sync from: Hao Zhang <Hao.Zhang@windriver.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/CDMG5XBTBDBEFMJ5A2GEQQVK3EIHBQBA/ 
Get patch set from upstream 5.10 LTS stable branch

Ranjan Kumar (1):
  scsi: mpt3sas: Perform additional retries if doorbell read returns 0

Sreekanth Reddy (4):
  scsi: mpt3sas: Fix use-after-free warning
  scsi: mpt3sas: Don't change DMA mask while reallocating pools
  scsi: mpt3sas: re-do lost mpt3sas DMA mask fix
  scsi: mpt3sas: Remove usage of dma_get_required_mask() API

Suganath Prabu S (1):
  scsi: mpt3sas: Force PCIe scatterlist allocations to be within same 4
    GB region

Tomas Henzl (1):
  scsi: mpt3sas: Fix a memory leak

Wenchao Hao (1):
  scsi: mpt3sas: Fix NULL pointer access in mpt3sas_transport_port_add()

Yang Yingliang (1):
  scsi: mpt3sas: Fix possible resource leaks in
    mpt3sas_transport_port_add()


-- 
2.33.0
 
https://gitee.com/openeuler/kernel/issues/I8AGBT 
 
Link:https://gitee.com/openeuler/kernel/pulls/2552

 

Reviewed-by: default avatarsanglipeng <sanglipeng1@jd.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 8c1e3b54 11c1ecac
Loading
Loading
Loading
Loading
+158 −64
Original line number Diff line number Diff line
@@ -131,6 +131,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);

static u32
_base_readl_ext_retry(const volatile void __iomem *addr);

/**
 * mpt3sas_base_check_cmd_timeout - Function
 *		to check timeout and command termination due
@@ -206,6 +209,20 @@ _base_readl_aero(const volatile void __iomem *addr)
	return ret_val;
}

static u32
_base_readl_ext_retry(const volatile void __iomem *addr)
{
	u32 i, ret_val;

	for (i = 0 ; i < 30 ; i++) {
		ret_val = readl(addr);
		if (ret_val == 0)
			continue;
	}

	return ret_val;
}

static inline u32
_base_readl(const volatile void __iomem *addr)
{
@@ -861,7 +878,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)

	dump_stack();

	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
	doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
		mpt3sas_print_fault_code(ioc, doorbell &
		    MPI2_DOORBELL_DATA_MASK);
@@ -2822,23 +2839,28 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
	struct sysinfo s;
	int dma_mask;
	u64 coherent_dma_mask, dma_mask;

	if (ioc->is_mcpu_endpoint ||
	    sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
	    dma_get_required_mask(&pdev->dev) <= 32)
		dma_mask = 32;
	if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
		ioc->dma_mask = 32;
		coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
	else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
		dma_mask = 63;
	else
		dma_mask = 64;
	} else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
		ioc->dma_mask = 63;
		coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
	} else {
		ioc->dma_mask = 64;
		coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
	}

	if (ioc->use_32bit_dma)
		coherent_dma_mask = DMA_BIT_MASK(32);

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
	if (dma_set_mask(&pdev->dev, dma_mask) ||
	    dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
		return -ENODEV;

	if (dma_mask > 32) {
	if (ioc->dma_mask > 32) {
		ioc->base_add_sg_single = &_base_add_sg_single_64;
		ioc->sge_size = sizeof(Mpi2SGESimple64_t);
	} else {
@@ -2848,7 +2870,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)

	si_meminfo(&s);
	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
		dma_mask, convert_to_kb(s.totalram));
		ioc->dma_mask, convert_to_kb(s.totalram));

	return 0;
}
@@ -4902,9 +4924,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
			dma_pool_free(ioc->pcie_sgl_dma_pool,
					ioc->pcie_sg_lookup[i].pcie_sgl,
					ioc->pcie_sg_lookup[i].pcie_sgl_dma);
			ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
		}
		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
	}
	kfree(ioc->pcie_sg_lookup);
	ioc->pcie_sg_lookup = NULL;

	if (ioc->config_page) {
		dexitprintk(ioc,
@@ -4960,6 +4985,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
		return 0;
}

/**
 * _base_reduce_hba_queue_depth- Retry with reduced queue depth
 * @ioc: Adapter object
 *
 * Return: 0 for success, non-zero for failure.
 **/
static inline int
_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
{
	int reduce_sz = 64;

	if ((ioc->hba_queue_depth - reduce_sz) >
	    (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
		ioc->hba_queue_depth -= reduce_sz;
		return 0;
	} else
		return -ENOMEM;
}

/**
 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
 *			for pcie sgl pools.
 * @ioc: Adapter object
 * @sz: DMA Pool size
 * @ct: Chain tracker
 * Return: 0 for success, non-zero for failure.
 */

static int
_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
{
	int i = 0, j = 0;
	struct chain_tracker *ct;

	ioc->pcie_sgl_dma_pool =
	    dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
	    ioc->page_size, 0);
	if (!ioc->pcie_sgl_dma_pool) {
		ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
		return -ENOMEM;
	}

	ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
	ioc->chains_per_prp_buffer =
	    min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
	for (i = 0; i < ioc->scsiio_depth; i++) {
		ioc->pcie_sg_lookup[i].pcie_sgl =
		    dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
		    &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
		if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
			ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
			return -EAGAIN;
		}

		if (!mpt3sas_check_same_4gb_region(
		    (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
			ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
			    ioc->pcie_sg_lookup[i].pcie_sgl,
			    (unsigned long long)
			    ioc->pcie_sg_lookup[i].pcie_sgl_dma);
			ioc->use_32bit_dma = true;
			return -EAGAIN;
		}

		for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
			ct = &ioc->chain_lookup[i].chains_per_smid[j];
			ct->chain_buffer =
			    ioc->pcie_sg_lookup[i].pcie_sgl +
			    (j * ioc->chain_segment_sz);
			ct->chain_buffer_dma =
			    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
			    (j * ioc->chain_segment_sz);
		}
	}
	dinitprintk(ioc, ioc_info(ioc,
	    "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
	    ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
	dinitprintk(ioc, ioc_info(ioc,
	    "Number of chains can fit in a PRP page(%d)\n",
	    ioc->chains_per_prp_buffer));
	return 0;
}

/**
 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
 *                     for reply queues.
@@ -5058,7 +5166,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
	unsigned short sg_tablesize;
	u16 sge_size;
	int i, j;
	int ret = 0;
	int ret = 0, rc = 0;
	struct chain_tracker *ct;

	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5357,6 +5465,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
	 * be required for NVMe PRP's, only each set of NVMe blocks will be
	 * contiguous, so a new set is allocated for each possible I/O.
	 */

	ioc->chains_per_prp_buffer = 0;
	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
		nvme_blocks_needed =
@@ -5371,43 +5480,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
			goto out;
		}
		sz = nvme_blocks_needed * ioc->page_size;
		ioc->pcie_sgl_dma_pool =
			dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
		if (!ioc->pcie_sgl_dma_pool) {
			ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
			goto out;
		}

		ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
		ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
						ioc->chains_needed_per_io);

		for (i = 0; i < ioc->scsiio_depth; i++) {
			ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
				ioc->pcie_sgl_dma_pool, GFP_KERNEL,
				&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
			if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
				ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
				goto out;
			}
			for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
				ct = &ioc->chain_lookup[i].chains_per_smid[j];
				ct->chain_buffer =
				    ioc->pcie_sg_lookup[i].pcie_sgl +
				    (j * ioc->chain_segment_sz);
				ct->chain_buffer_dma =
				    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
				    (j * ioc->chain_segment_sz);
			}
		}

		dinitprintk(ioc,
			    ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
				     ioc->scsiio_depth, sz,
				     (sz * ioc->scsiio_depth) / 1024));
		dinitprintk(ioc,
			    ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
				     ioc->chains_per_prp_buffer));
		rc = _base_allocate_pcie_sgl_pool(ioc, sz);
		if (rc == -ENOMEM)
			return -ENOMEM;
		else if (rc == -EAGAIN)
			goto try_32bit_dma;
		total_sz += sz * ioc->scsiio_depth;
	}

@@ -5577,6 +5654,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
		 ioc->shost->sg_tablesize);
	return 0;

try_32bit_dma:
	_base_release_memory_pools(ioc);
	if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
		/* Change dma coherent mask to 32 bit and reallocate */
		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
			pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
			    pci_name(ioc->pdev));
			return -ENODEV;
		}
	} else if (_base_reduce_hba_queue_depth(ioc) != 0)
		return -ENOMEM;
	goto retry_allocation;

 out:
	return -ENOMEM;
}
@@ -5594,7 +5684,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
{
	u32 s, sc;

	s = ioc->base_readl(&ioc->chip->Doorbell);
	s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
	sc = s & MPI2_IOC_STATE_MASK;
	return cooked ? sc : s;
}
@@ -5739,7 +5829,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
					   __func__, count, timeout));
			return 0;
		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
			doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
			if ((doorbell & MPI2_IOC_STATE_MASK) ==
			    MPI2_IOC_STATE_FAULT) {
				mpt3sas_print_fault_code(ioc, doorbell);
@@ -5779,7 +5869,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
	count = 0;
	cntdn = 1000 * timeout;
	do {
		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
		doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
			dhsprintk(ioc,
				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -5916,7 +6006,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
	__le32 *mfp;

	/* make sure doorbell is not in use */
	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
	if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
		return -EFAULT;
	}
@@ -5965,7 +6055,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
	}

	/* read the first two 16-bits, it gives the total length of the reply */
	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
	reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
	    & MPI2_DOORBELL_DATA_MASK);
	writel(0, &ioc->chip->HostInterruptStatus);
	if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5973,7 +6063,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
			__LINE__);
		return -EFAULT;
	}
	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
	reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
	    & MPI2_DOORBELL_DATA_MASK);
	writel(0, &ioc->chip->HostInterruptStatus);

@@ -5984,10 +6074,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
			return -EFAULT;
		}
		if (i >=  reply_bytes/2) /* overflow case */
			ioc->base_readl(&ioc->chip->Doorbell);
			ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
		else
			reply[i] = le16_to_cpu(
			    ioc->base_readl(&ioc->chip->Doorbell)
			    ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
			    & MPI2_DOORBELL_DATA_MASK);
		writel(0, &ioc->chip->HostInterruptStatus);
	}
@@ -6833,7 +6923,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
			goto out;
		}

		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
		drsprintk(ioc,
			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
				   count, host_diagnostic));
@@ -6853,7 +6943,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
	for (count = 0; count < (300000000 /
		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {

		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);

		if (host_diagnostic == 0xFFFFFFFF) {
			ioc_info(ioc,
@@ -7239,10 +7329,14 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)

	ioc->rdpq_array_enable_assigned = 0;
	ioc->use_32bit_dma = false;
	if (ioc->is_aero_ioc)
	ioc->dma_mask = 64;
	if (ioc->is_aero_ioc) {
		ioc->base_readl = &_base_readl_aero;
	else
		ioc->base_readl_ext_retry = &_base_readl_ext_retry;
	} else {
		ioc->base_readl = &_base_readl;
		ioc->base_readl_ext_retry = &_base_readl;
	}
	r = mpt3sas_base_map_resources(ioc);
	if (r)
		goto out_free_resources;
+2 −0
Original line number Diff line number Diff line
@@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER {
	u16		thresh_hold;
	u8		high_iops_queues;
	u32		drv_support_bitmap;
	u32             dma_mask;
	bool		enable_sdev_max_qd;
	bool		use_32bit_dma;

@@ -1468,6 +1469,7 @@ struct MPT3SAS_ADAPTER {
	u8		diag_trigger_active;
	u8		atomic_desc_capable;
	BASE_READ_REG	base_readl;
	BASE_READ_REG	base_readl_ext_retry;
	struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
	struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
	struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+1 −1
Original line number Diff line number Diff line
@@ -3501,6 +3501,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
		fw_event = list_first_entry(&ioc->fw_event_list,
				struct fw_event_work, list);
		list_del_init(&fw_event->list);
		fw_event_work_put(fw_event);
	}
	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);

@@ -3559,7 +3560,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
		if (cancel_work_sync(&fw_event->work))
			fw_event_work_put(fw_event);

		fw_event_work_put(fw_event);
	}
	ioc->fw_events_cleanup = 0;
}
+14 −2
Original line number Diff line number Diff line
@@ -670,7 +670,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
		goto out_fail;
	}
	port = sas_port_alloc_num(sas_node->parent_dev);
	if ((sas_port_add(port))) {
	if (!port || (sas_port_add(port))) {
		ioc_err(ioc, "failure at %s:%d/%s()!\n",
			__FILE__, __LINE__, __func__);
		goto out_fail;
@@ -695,6 +695,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
		rphy = sas_expander_alloc(port,
		    mpt3sas_port->remote_identify.device_type);

	if (!rphy) {
		ioc_err(ioc, "failure at %s:%d/%s()!\n",
			__FILE__, __LINE__, __func__);
		goto out_delete_port;
	}

	rphy->identify = mpt3sas_port->remote_identify;

	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -712,6 +718,9 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
	if ((sas_rphy_add(rphy))) {
		ioc_err(ioc, "failure at %s:%d/%s()!\n",
			__FILE__, __LINE__, __func__);
		sas_rphy_free(rphy);
		rphy = NULL;
		goto out_delete_port;
	}

	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -738,6 +747,9 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
		    rphy_to_expander_device(rphy));
	return mpt3sas_port;

out_delete_port:
	sas_port_delete(port);

out_fail:
	list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
	    port_siblings)