Commit 369af20a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more SCSI updates from James Bottomley:
 "This series is all the stragglers that didn't quite make the first
  merge window pull. It's mostly minor updates and bug fixes of merge
  window code"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: nsp_cs: Check of ioremap return value
  scsi: ufs: ufs-mediatek: Fix error checking in ufs_mtk_init_va09_pwr_ctrl()
  scsi: ufs: Modify Tactive time setting conditions
  scsi: efct: Remove useless DMA-32 fallback configuration
  scsi: message: fusion: mptctl: Use dma_alloc_coherent()
  scsi: message: fusion: mptsas: Use dma_alloc_coherent()
  scsi: message: fusion: Use dma_alloc_coherent() in mptsas_exp_repmanufacture_info()
  scsi: message: fusion: mptbase: Use dma_alloc_coherent()
  scsi: message: fusion: Use dma_alloc_coherent() in mpt_alloc_fw_memory()
  scsi: message: fusion: Remove usage of the deprecated "pci-dma-compat.h" API
  scsi: megaraid: Avoid mismatched storage type sizes
  scsi: hisi_sas: Remove unused variable and check in hisi_sas_send_ata_reset_each_phy()
  scsi: aic79xx: Remove redundant error variable
  scsi: pm80xx: Port reset timeout error handling correction
  scsi: mpi3mr: Fix formatting problems in some kernel-doc comments
  scsi: mpi3mr: Fix some spelling mistakes
  scsi: mpt3sas: Update persistent trigger pages from sysfs interface
  scsi: core: Fix scsi_mode_select() interface
  scsi: aacraid: Fix spelling of "its"
  scsi: qedf: Fix potential dereference of NULL pointer
parents b087788c 2576e153
Loading
Loading
Loading
Loading
+84 −65
Original line number Diff line number Diff line
@@ -300,8 +300,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
	if (!hdr.ExtPageLength)
		goto out;

	buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
	    &dma_handle);
	buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
				    &dma_handle, GFP_KERNEL);
	if (!buffer)
		goto out;

@@ -316,8 +316,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
		rc = 1;

 out_free_consistent:
	pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
	    buffer, dma_handle);
	dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
			  dma_handle);
 out:
	return rc;
}
@@ -1661,16 +1661,14 @@ mpt_mapresources(MPT_ADAPTER *ioc)
		const uint64_t required_mask = dma_get_required_mask
		    (&pdev->dev);
		if (required_mask > DMA_BIT_MASK(32)
			&& !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
			&& !pci_set_consistent_dma_mask(pdev,
						 DMA_BIT_MASK(64))) {
			&& !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))
			&& !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
			ioc->dma_mask = DMA_BIT_MASK(64);
			dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
				": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
				ioc->name));
		} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
			&& !pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(32))) {
		} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
			   && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
			ioc->dma_mask = DMA_BIT_MASK(32);
			dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
				": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -1681,9 +1679,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
			goto out_pci_release_region;
		}
	} else {
		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
			&& !pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(32))) {
		if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
			&& !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
			ioc->dma_mask = DMA_BIT_MASK(32);
			dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
				": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -2769,7 +2766,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)

	if (ioc->spi_data.pIocPg4 != NULL) {
		sz = ioc->spi_data.IocPg4Sz;
		pci_free_consistent(ioc->pcidev, sz,
		dma_free_coherent(&ioc->pcidev->dev, sz,
				  ioc->spi_data.pIocPg4,
				  ioc->spi_data.IocPg4_dma);
		ioc->spi_data.pIocPg4 = NULL;
@@ -3515,7 +3512,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
		rc = 0;
		goto out;
	}
	ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
	ioc->cached_fw = dma_alloc_coherent(&ioc->pcidev->dev, size,
					    &ioc->cached_fw_dma, GFP_ATOMIC);
	if (!ioc->cached_fw) {
		printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
		    ioc->name);
@@ -3548,7 +3546,8 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
	sz = ioc->facts.FWImageSize;
	dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image  @ %p[%p], sz=%d[%x] bytes\n",
		 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
	pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
	dma_free_coherent(&ioc->pcidev->dev, sz, ioc->cached_fw,
			  ioc->cached_fw_dma);
	ioc->alloc_total -= sz;
	ioc->cached_fw = NULL;
}
@@ -4447,9 +4446,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
		 */
		if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
		    ioc->dma_mask > DMA_BIT_MASK(35)) {
			if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
			    && !pci_set_consistent_dma_mask(ioc->pcidev,
			    DMA_BIT_MASK(32))) {
			if (!dma_set_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))
			    && !dma_set_coherent_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))) {
				dma_mask = DMA_BIT_MASK(35);
				d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
				    "setting 35 bit addressing for "
@@ -4457,9 +4455,9 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
				    ioc->name));
			} else {
				/*Reseting DMA mask to 64 bit*/
				pci_set_dma_mask(ioc->pcidev,
				dma_set_mask(&ioc->pcidev->dev,
					     DMA_BIT_MASK(64));
				pci_set_consistent_dma_mask(ioc->pcidev,
				dma_set_coherent_mask(&ioc->pcidev->dev,
						      DMA_BIT_MASK(64));

				printk(MYIOC_s_ERR_FMT
@@ -4595,8 +4593,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
		alloc_dma += ioc->reply_sz;
	}

	if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
	    ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
	if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
	    ioc->dma_mask) && !dma_set_coherent_mask(&ioc->pcidev->dev,
	    ioc->dma_mask))
		d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
		    "restoring 64 bit addressing\n", ioc->name));
@@ -4620,8 +4618,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
		ioc->sense_buf_pool = NULL;
	}

	if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
	    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
	if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
	    DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&ioc->pcidev->dev,
	    DMA_BIT_MASK(64)))
		d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
		    "restoring 64 bit addressing\n", ioc->name));
@@ -4968,7 +4966,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)

	if (hdr.PageLength > 0) {
		data_sz = hdr.PageLength * 4;
		ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
		ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
						  &page0_dma, GFP_KERNEL);
		rc = -ENOMEM;
		if (ppage0_alloc) {
			memset((u8 *)ppage0_alloc, 0, data_sz);
@@ -4982,7 +4981,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)

			}

			pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
			dma_free_coherent(&ioc->pcidev->dev, data_sz,
					  (u8 *)ppage0_alloc, page0_dma);

			/* FIXME!
			 *	Normalize endianness of structure data,
@@ -5014,7 +5014,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)

	data_sz = hdr.PageLength * 4;
	rc = -ENOMEM;
	ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
	ppage1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
					  &page1_dma, GFP_KERNEL);
	if (ppage1_alloc) {
		memset((u8 *)ppage1_alloc, 0, data_sz);
		cfg.physAddr = page1_dma;
@@ -5026,7 +5027,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
			memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
		}

		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
		dma_free_coherent(&ioc->pcidev->dev, data_sz,
				  (u8 *)ppage1_alloc, page1_dma);

		/* FIXME!
		 *	Normalize endianness of structure data,
@@ -5315,7 +5317,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
	/* Read the config page */
	data_sz = hdr.PageLength * 4;
	rc = -ENOMEM;
	ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
	ppage_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
					 &page_dma, GFP_KERNEL);
	if (ppage_alloc) {
		memset((u8 *)ppage_alloc, 0, data_sz);
		cfg.physAddr = page_dma;
@@ -5325,7 +5328,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
		if ((rc = mpt_config(ioc, &cfg)) == 0)
			ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);

		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
		dma_free_coherent(&ioc->pcidev->dev, data_sz,
				  (u8 *)ppage_alloc, page_dma);
	}

	return rc;
@@ -5400,7 +5404,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
		 return -EFAULT;

	if (header.PageLength > 0) {
		pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
		pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
					  header.PageLength * 4, &buf_dma,
					  GFP_KERNEL);
		if (pbuf) {
			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
			cfg.physAddr = buf_dma;
@@ -5456,7 +5462,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
				}
			}
			if (pbuf) {
				pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
				dma_free_coherent(&ioc->pcidev->dev,
						  header.PageLength * 4, pbuf,
						  buf_dma);
			}
		}
	}
@@ -5478,7 +5486,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
	if (header.PageLength > 0) {
		/* Allocate memory and read SCSI Port Page 2
		 */
		pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
		pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
					  header.PageLength * 4, &buf_dma,
					  GFP_KERNEL);
		if (pbuf) {
			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
			cfg.physAddr = buf_dma;
@@ -5543,7 +5553,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
				}
			}

			pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
			dma_free_coherent(&ioc->pcidev->dev,
					  header.PageLength * 4, pbuf,
					  buf_dma);
		}
	}

@@ -5659,8 +5671,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
	if (!hdr.PageLength)
		goto out;

	buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
	    &dma_handle);
	buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				    &dma_handle, GFP_KERNEL);

	if (!buffer)
		goto out;
@@ -5707,8 +5719,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)

 out:
	if (buffer)
		pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
		    dma_handle);
		dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				  buffer, dma_handle);
}

/**
@@ -5752,8 +5764,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
		goto out;
	}

	buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
	    &dma_handle);
	buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				    &dma_handle, GFP_KERNEL);

	if (!buffer) {
		rc = -ENOMEM;
@@ -5776,8 +5788,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
 out:

	if (buffer)
		pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
		    dma_handle);
		dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				  buffer, dma_handle);

	return rc;
}
@@ -5819,8 +5831,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
		goto out;
	}

	buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
	    &dma_handle);
	buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				    &dma_handle, GFP_KERNEL);

	if (!buffer) {
		rc = 0;
@@ -5840,8 +5852,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
 out:

	if (buffer)
		pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
		    dma_handle);
		dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				  buffer, dma_handle);

	return rc;
}
@@ -5891,8 +5903,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
		goto out;
	}

	buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
	    &dma_handle);
	buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				    &dma_handle, GFP_KERNEL);

	if (!buffer) {
		rc = -ENOMEM;
@@ -5929,8 +5941,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
 out:

	if (buffer)
		pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
		    dma_handle);
		dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				  buffer, dma_handle);

	return rc;
}
@@ -5986,7 +5998,8 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
		return -EFAULT;

	iocpage2sz = header.PageLength * 4;
	pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
	pIoc2 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage2sz, &ioc2_dma,
				   GFP_KERNEL);
	if (!pIoc2)
		return -ENOMEM;

@@ -6011,7 +6024,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
		    pIoc2->RaidVolume[i].VolumeID);

 out:
	pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
	dma_free_coherent(&ioc->pcidev->dev, iocpage2sz, pIoc2, ioc2_dma);

	return rc;
}
@@ -6053,7 +6066,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
	/* Read Header good, alloc memory
	 */
	iocpage3sz = header.PageLength * 4;
	pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
	pIoc3 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage3sz, &ioc3_dma,
				   GFP_KERNEL);
	if (!pIoc3)
		return 0;

@@ -6070,7 +6084,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
		}
	}

	pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
	dma_free_coherent(&ioc->pcidev->dev, iocpage3sz, pIoc3, ioc3_dma);

	return 0;
}
@@ -6104,7 +6118,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)

	if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
		iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
		pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
		pIoc4 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage4sz,
					   &ioc4_dma, GFP_KERNEL);
		if (!pIoc4)
			return;
		ioc->alloc_total += iocpage4sz;
@@ -6122,7 +6137,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
		ioc->spi_data.IocPg4_dma = ioc4_dma;
		ioc->spi_data.IocPg4Sz = iocpage4sz;
	} else {
		pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
		dma_free_coherent(&ioc->pcidev->dev, iocpage4sz, pIoc4,
				  ioc4_dma);
		ioc->spi_data.pIocPg4 = NULL;
		ioc->alloc_total -= iocpage4sz;
	}
@@ -6159,7 +6175,8 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
	/* Read Header good, alloc memory
	 */
	iocpage1sz = header.PageLength * 4;
	pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
	pIoc1 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage1sz, &ioc1_dma,
				   GFP_KERNEL);
	if (!pIoc1)
		return;

@@ -6210,7 +6227,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
		}
	}

	pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
	dma_free_coherent(&ioc->pcidev->dev, iocpage1sz, pIoc1, ioc1_dma);

	return;
}
@@ -6239,7 +6256,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
		goto out;

	cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
	pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
	pbuf = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
				  &buf_dma, GFP_KERNEL);
	if (!pbuf)
		goto out;

@@ -6255,7 +6273,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
out:

	if (pbuf)
		pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
		dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, pbuf,
				  buf_dma);
}

/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+49 −33
Original line number Diff line number Diff line
@@ -1041,14 +1041,15 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
	 * copying the data in this array into the correct place in the
	 * request and chain buffers.
	 */
	sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
	sglbuf = dma_alloc_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES,
				    sglbuf_dma, GFP_KERNEL);
	if (sglbuf == NULL)
		goto free_and_fail;

	if (sgdir & 0x04000000)
		dir = PCI_DMA_TODEVICE;
		dir = DMA_TO_DEVICE;
	else
		dir = PCI_DMA_FROMDEVICE;
		dir = DMA_FROM_DEVICE;

	/* At start:
	 *	sgl = sglbuf = point to beginning of sg buffer
@@ -1062,9 +1063,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
	while (bytes_allocd < bytes) {
		this_alloc = min(alloc_sz, bytes-bytes_allocd);
		buflist[buflist_ent].len = this_alloc;
		buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
		buflist[buflist_ent].kptr = dma_alloc_coherent(&ioc->pcidev->dev,
							       this_alloc,
								 &pa);
							       &pa, GFP_KERNEL);
		if (buflist[buflist_ent].kptr == NULL) {
			alloc_sz = alloc_sz / 2;
			if (alloc_sz == 0) {
@@ -1080,8 +1081,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,

			bytes_allocd += this_alloc;
			sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
			dma_addr = pci_map_single(ioc->pcidev,
				buflist[buflist_ent].kptr, this_alloc, dir);
			dma_addr = dma_map_single(&ioc->pcidev->dev,
						  buflist[buflist_ent].kptr,
						  this_alloc, dir);
			sgl->Address = dma_addr;

			fragcnt++;
@@ -1140,9 +1142,11 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
			kptr = buflist[i].kptr;
			len = buflist[i].len;

			pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
			dma_free_coherent(&ioc->pcidev->dev, len, kptr,
					  dma_addr);
		}
		pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
		dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sglbuf,
				  *sglbuf_dma);
	}
	kfree(buflist);
	return NULL;
@@ -1162,9 +1166,9 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
	int		 n = 0;

	if (sg->FlagsLength & 0x04000000)
		dir = PCI_DMA_TODEVICE;
		dir = DMA_TO_DEVICE;
	else
		dir = PCI_DMA_FROMDEVICE;
		dir = DMA_FROM_DEVICE;

	nib = (sg->FlagsLength & 0xF0000000) >> 28;
	while (! (nib & 0x4)) { /* eob */
@@ -1179,8 +1183,10 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
			dma_addr = sg->Address;
			kptr = bl->kptr;
			len = bl->len;
			pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
			pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
			dma_unmap_single(&ioc->pcidev->dev, dma_addr, len,
					 dir);
			dma_free_coherent(&ioc->pcidev->dev, len, kptr,
					  dma_addr);
			n++;
		}
		sg++;
@@ -1197,12 +1203,12 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
		dma_addr = sg->Address;
		kptr = bl->kptr;
		len = bl->len;
		pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
		pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
		dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, dir);
		dma_free_coherent(&ioc->pcidev->dev, len, kptr, dma_addr);
		n++;
	}

	pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
	dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sgl, sgl_dma);
	kfree(buflist);
	dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n",
	    ioc->name, n));
@@ -2100,8 +2106,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
			}
			flagsLength |= karg.dataOutSize;
			bufOut.len = karg.dataOutSize;
			bufOut.kptr = pci_alloc_consistent(
					ioc->pcidev, bufOut.len, &dma_addr_out);
			bufOut.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
							 bufOut.len,
							 &dma_addr_out, GFP_KERNEL);

			if (bufOut.kptr == NULL) {
				rc = -ENOMEM;
@@ -2134,8 +2141,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
			flagsLength |= karg.dataInSize;

			bufIn.len = karg.dataInSize;
			bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
					bufIn.len, &dma_addr_in);
			bufIn.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
							bufIn.len,
							&dma_addr_in, GFP_KERNEL);

			if (bufIn.kptr == NULL) {
				rc = -ENOMEM;
@@ -2283,13 +2291,13 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
	/* Free the allocated memory.
	 */
	if (bufOut.kptr != NULL) {
		pci_free_consistent(ioc->pcidev,
			bufOut.len, (void *) bufOut.kptr, dma_addr_out);
		dma_free_coherent(&ioc->pcidev->dev, bufOut.len,
				  (void *)bufOut.kptr, dma_addr_out);
	}

	if (bufIn.kptr != NULL) {
		pci_free_consistent(ioc->pcidev,
			bufIn.len, (void *) bufIn.kptr, dma_addr_in);
		dma_free_coherent(&ioc->pcidev->dev, bufIn.len,
				  (void *)bufIn.kptr, dma_addr_in);
	}

	/* mf is null if command issued successfully
@@ -2395,7 +2403,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
			/* Issue the second config page request */
			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;

			pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
			pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
						  hdr.PageLength * 4,
						  &buf_dma, GFP_KERNEL);
			if (pbuf) {
				cfg.physAddr = buf_dma;
				if (mpt_config(ioc, &cfg) == 0) {
@@ -2405,7 +2415,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
							pdata->BoardTracerNumber, 24);
					}
				}
				pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
				dma_free_coherent(&ioc->pcidev->dev,
						  hdr.PageLength * 4, pbuf,
						  buf_dma);
				pbuf = NULL;
			}
		}
@@ -2470,7 +2482,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
	else
		IstwiRWRequest->DeviceAddr = 0xB0;

	pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
	pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 4, &buf_dma, GFP_KERNEL);
	if (!pbuf)
		goto out;
	ioc->add_sge((char *)&IstwiRWRequest->SGL,
@@ -2519,7 +2531,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
	SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);

	if (pbuf)
		pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
		dma_free_coherent(&ioc->pcidev->dev, 4, pbuf, buf_dma);

	/* Copy the data from kernel memory to user memory
	 */
@@ -2585,7 +2597,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
       /* Get the data transfer speeds
        */
	data_sz = ioc->spi_data.sdp0length * 4;
	pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
	pg0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, &page_dma,
				       GFP_KERNEL);
	if (pg0_alloc) {
		hdr.PageVersion = ioc->spi_data.sdp0version;
		hdr.PageLength = data_sz;
@@ -2623,7 +2636,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
				karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
		}

		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
		dma_free_coherent(&ioc->pcidev->dev, data_sz, (u8 *)pg0_alloc,
				  page_dma);
	}

	/* Set defaults
@@ -2649,7 +2663,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
		/* Issue the second config page request */
		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
		data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
		pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
		pg3_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
					       &page_dma, GFP_KERNEL);
		if (pg3_alloc) {
			cfg.physAddr = page_dma;
			cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
@@ -2658,7 +2673,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
				karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
				karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
			}
			pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
			dma_free_coherent(&ioc->pcidev->dev, data_sz,
					  (u8 *)pg3_alloc, page_dma);
		}
	}
	hd = shost_priv(ioc->sh);
+48 −42
Original line number Diff line number Diff line
@@ -516,9 +516,9 @@ mpt_lan_close(struct net_device *dev)
		if (priv->RcvCtl[i].skb != NULL) {
/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/				  "is still out\n", i));
			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
					 priv->RcvCtl[i].len,
					 PCI_DMA_FROMDEVICE);
			dma_unmap_single(&mpt_dev->pcidev->dev,
					 priv->RcvCtl[i].dma,
					 priv->RcvCtl[i].len, DMA_FROM_DEVICE);
			dev_kfree_skb(priv->RcvCtl[i].skb);
		}
	}
@@ -528,9 +528,9 @@ mpt_lan_close(struct net_device *dev)

	for (i = 0; i < priv->tx_max_out; i++) {
		if (priv->SendCtl[i].skb != NULL) {
			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
					 priv->SendCtl[i].len,
					 PCI_DMA_TODEVICE);
			dma_unmap_single(&mpt_dev->pcidev->dev,
					 priv->SendCtl[i].dma,
					 priv->SendCtl[i].len, DMA_TO_DEVICE);
			dev_kfree_skb(priv->SendCtl[i].skb);
		}
	}
@@ -582,8 +582,8 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
			__func__, sent));

	priv->SendCtl[ctx].skb = NULL;
	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
	dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
			 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
	dev_kfree_skb_irq(sent);

	spin_lock_irqsave(&priv->txfidx_lock, flags);
@@ -648,8 +648,9 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
				__func__, sent));

		priv->SendCtl[ctx].skb = NULL;
		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
		dma_unmap_single(&mpt_dev->pcidev->dev,
				 priv->SendCtl[ctx].dma,
				 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
		dev_kfree_skb_irq(sent);

		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
@@ -720,8 +721,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
	skb_reset_mac_header(skb);
	skb_pull(skb, 12);

        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
			     PCI_DMA_TODEVICE);
	dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
			     DMA_TO_DEVICE);

	priv->SendCtl[ctx].skb = skb;
	priv->SendCtl[ctx].dma = dma;
@@ -868,13 +869,17 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
			return -ENOMEM;
		}

		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
					priv->RcvCtl[ctx].dma,
					priv->RcvCtl[ctx].len,
					DMA_FROM_DEVICE);

		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);

		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
					   priv->RcvCtl[ctx].dma,
					   priv->RcvCtl[ctx].len,
					   DMA_FROM_DEVICE);
		goto out;
	}

@@ -882,8 +887,8 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)

	priv->RcvCtl[ctx].skb = NULL;

	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
	dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
			 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);

out:
	spin_lock_irqsave(&priv->rxfidx_lock, flags);
@@ -927,8 +932,8 @@ mpt_lan_receive_post_free(struct net_device *dev,
//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));

		priv->RcvCtl[ctx].skb = NULL;
		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);

		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1028,16 +1033,16 @@ mpt_lan_receive_post_reply(struct net_device *dev,
//					IOC_AND_NETDEV_NAMES_s_s(dev),
//					i, l));

			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
			dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
						priv->RcvCtl[ctx].dma,
						priv->RcvCtl[ctx].len,
						    PCI_DMA_FROMDEVICE);
						DMA_FROM_DEVICE);
			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);

			pci_dma_sync_single_for_device(mpt_dev->pcidev,
			dma_sync_single_for_device(&mpt_dev->pcidev->dev,
						   priv->RcvCtl[ctx].dma,
						   priv->RcvCtl[ctx].len,
						       PCI_DMA_FROMDEVICE);
						   DMA_FROM_DEVICE);

			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
			szrem -= l;
@@ -1056,17 +1061,17 @@ mpt_lan_receive_post_reply(struct net_device *dev,
			return -ENOMEM;
		}

		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
					priv->RcvCtl[ctx].dma,
					priv->RcvCtl[ctx].len,
					    PCI_DMA_FROMDEVICE);
					DMA_FROM_DEVICE);

		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);

		pci_dma_sync_single_for_device(mpt_dev->pcidev,
		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
					   priv->RcvCtl[ctx].dma,
					   priv->RcvCtl[ctx].len,
					       PCI_DMA_FROMDEVICE);
					   DMA_FROM_DEVICE);

		spin_lock_irqsave(&priv->rxfidx_lock, flags);
		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1077,8 +1082,8 @@ mpt_lan_receive_post_reply(struct net_device *dev,

		priv->RcvCtl[ctx].skb = NULL;

		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
		priv->RcvCtl[ctx].dma = 0;

		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1199,10 +1204,10 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)

			skb = priv->RcvCtl[ctx].skb;
			if (skb && (priv->RcvCtl[ctx].len != len)) {
				pci_unmap_single(mpt_dev->pcidev,
				dma_unmap_single(&mpt_dev->pcidev->dev,
						 priv->RcvCtl[ctx].dma,
						 priv->RcvCtl[ctx].len,
						 PCI_DMA_FROMDEVICE);
						 DMA_FROM_DEVICE);
				dev_kfree_skb(priv->RcvCtl[ctx].skb);
				skb = priv->RcvCtl[ctx].skb = NULL;
			}
@@ -1218,8 +1223,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
					break;
				}

				dma = pci_map_single(mpt_dev->pcidev, skb->data,
						     len, PCI_DMA_FROMDEVICE);
				dma = dma_map_single(&mpt_dev->pcidev->dev,
						     skb->data, len,
						     DMA_FROM_DEVICE);

				priv->RcvCtl[ctx].skb = skb;
				priv->RcvCtl[ctx].dma = dma;
+48 −46

File changed.

Preview size limit exceeded, changes collapsed.

+1 −1
Original line number Diff line number Diff line
@@ -271,7 +271,7 @@ MODULE_PARM_DESC(msi, "IRQ handling."
	" 0=PIC(default), 1=MSI, 2=MSI-X)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
	" adapter to have it's kernel up and\n"
	" adapter to have its kernel up and\n"
	"running. This is typically adjusted for large systems that do not"
	" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
Loading