Commit 029aa462 authored by Giovanni Cabiddu's avatar Giovanni Cabiddu Committed by Herbert Xu
Browse files

crypto: qat - remove dma_free_coherent() for DH



The functions qat_dh_compute_value() allocates memory with
dma_alloc_coherent() if the source or the destination buffers are made
of multiple flat buffers or of a size that is not compatible with the
hardware.
This memory is then freed with dma_free_coherent() in the context of a
tasklet invoked to handle the response for the corresponding request.

According to Documentation/core-api/dma-api-howto.rst, the function
dma_free_coherent() cannot be called in an interrupt context.

Replace allocations with dma_alloc_coherent() in the function
qat_dh_compute_value() with kmalloc() + dma_map_single().

Cc: stable@vger.kernel.org
Fixes: c9839143 ("crypto: qat - Add DH support")
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarAdam Guerin <adam.guerin@intel.com>
Reviewed-by: default avatarWojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 3dfaf007
Loading
Loading
Loading
Loading
+34 −49
Original line number Diff line number Diff line
@@ -164,25 +164,20 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;

	if (areq->src) {
		if (req->src_align)
			dma_free_coherent(dev, req->ctx.dh->p_size,
					  req->src_align, req->in.dh.in.b);
		else
			dma_unmap_single(dev, req->in.dh.in.b,
					 req->ctx.dh->p_size, DMA_TO_DEVICE);
		dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
				 DMA_TO_DEVICE);
		kfree_sensitive(req->src_align);
	}

	areq->dst_len = req->ctx.dh->p_size;
	if (req->dst_align) {
		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
					 areq->dst_len, 1);
		kfree_sensitive(req->dst_align);
	}

		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
				  req->out.dh.r);
	} else {
	dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
			 DMA_FROM_DEVICE);
	}

	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
			 DMA_TO_DEVICE);
@@ -231,6 +226,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
	struct icp_qat_fw_pke_request *msg = &qat_req->req;
	int ret;
	int n_input_params = 0;
	u8 *vaddr;

	if (unlikely(!ctx->xa))
		return -EINVAL;
@@ -287,27 +283,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
		 */
		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
			qat_req->src_align = NULL;
			qat_req->in.dh.in.b = dma_map_single(dev,
							     sg_virt(req->src),
							     req->src_len,
							     DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(dev,
						       qat_req->in.dh.in.b)))
				return ret;

			vaddr = sg_virt(req->src);
		} else {
			int shift = ctx->p_size - req->src_len;

			qat_req->src_align = dma_alloc_coherent(dev,
								ctx->p_size,
								&qat_req->in.dh.in.b,
								GFP_KERNEL);
			qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
			if (unlikely(!qat_req->src_align))
				return ret;

			scatterwalk_map_and_copy(qat_req->src_align + shift,
						 req->src, 0, req->src_len, 0);

			vaddr = qat_req->src_align;
		}

		qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
						     DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
			goto unmap_src;
	}
	/*
	 * dst can be of any size in valid range, but HW expects it to be the
@@ -318,20 +311,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
	 */
	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
		qat_req->dst_align = NULL;
		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
						   req->dst_len,
						   DMA_FROM_DEVICE);

		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
			goto unmap_src;

		vaddr = sg_virt(req->dst);
	} else {
		qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
							&qat_req->out.dh.r,
							GFP_KERNEL);
		qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
		if (unlikely(!qat_req->dst_align))
			goto unmap_src;

		vaddr = qat_req->dst_align;
	}
	qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
					   DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
		goto unmap_dst;

	qat_req->in.dh.in_tab[n_input_params] = 0;
	qat_req->out.dh.out_tab[1] = 0;
@@ -371,23 +362,17 @@ static int qat_dh_compute_value(struct kpp_request *req)
				 sizeof(struct qat_dh_input_params),
				 DMA_TO_DEVICE);
unmap_dst:
	if (qat_req->dst_align)
		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
				  qat_req->out.dh.r);
	else
	if (!dma_mapping_error(dev, qat_req->out.dh.r))
		dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
				 DMA_FROM_DEVICE);
	kfree_sensitive(qat_req->dst_align);
unmap_src:
	if (req->src) {
		if (qat_req->src_align)
			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
					  qat_req->in.dh.in.b);
		else
		if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
			dma_unmap_single(dev, qat_req->in.dh.in.b,
					 ctx->p_size,
					 DMA_TO_DEVICE);
		kfree_sensitive(qat_req->src_align);
	}
	return ret;
}