Commit 38682383 authored by Giovanni Cabiddu's avatar Giovanni Cabiddu Committed by Herbert Xu
Browse files

crypto: qat - add backlog mechanism



The implementations of the crypto algorithms (aead, skcipher, etc) in
the QAT driver do not properly support requests with the
CRYPTO_TFM_REQ_MAY_BACKLOG flag set. If the HW queue is full, the driver
returns -EBUSY but does not enqueue the request. This can result in
applications like dm-crypt waiting indefinitely for the completion of a
request that was never submitted to the hardware.

Fix this by adding a software backlog queue: if the ring buffer is more
than eighty percent full, then the request is enqueued to a backlog
list and the error code -EBUSY is returned back to the caller.
Requests in the backlog queue are resubmitted at a later time, in the
context of the callback of a previously submitted request.
The request for which -EBUSY is returned is then marked as -EINPROGRESS
once submitted to the HW queues.

The submission loop inside the function qat_alg_send_message() has been
modified to decide which submission policy to use based on the request
flags. If the request does not have the CRYPTO_TFM_REQ_MAY_BACKLOG set,
the previous behaviour has been preserved.

Based on a patch by
Vishnu Das Ramachandran <vishnu.dasx.ramachandran@intel.com>

Cc: stable@vger.kernel.org
Fixes: d370cec3 ("crypto: qat - Intel(R) QAT crypto interface")
Reported-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Reported-by: default avatarKyle Sanderson <kyle.leet@gmail.com>
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarMarco Chiappero <marco.chiappero@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent af88d3c1
Loading
Loading
Loading
Loading
+11 −0
Original line number Original line Diff line number Diff line
@@ -8,6 +8,9 @@
#include "adf_cfg.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_common_drv.h"


#define ADF_MAX_RING_THRESHOLD		80
#define ADF_PERCENT(tot, percent)	(((tot) * (percent)) / 100)

static inline u32 adf_modulo(u32 data, u32 shift)
static inline u32 adf_modulo(u32 data, u32 shift)
{
{
	u32 div = data >> shift;
	u32 div = data >> shift;
@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
				      bank->irq_mask);
				      bank->irq_mask);
}
}


bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
{
	return atomic_read(ring->inflights) > ring->threshold;
}

int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
{
	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
	struct adf_etr_bank_data *bank;
	struct adf_etr_bank_data *bank;
	struct adf_etr_ring_data *ring;
	struct adf_etr_ring_data *ring;
	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
	int max_inflights;
	u32 ring_num;
	u32 ring_num;
	int ret;
	int ret;


@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
	ring->head = 0;
	ring->head = 0;
	ring->tail = 0;
	ring->tail = 0;
	max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
	ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
	atomic_set(ring->inflights, 0);
	atomic_set(ring->inflights, 0);
	ret = adf_init_ring(ring);
	ret = adf_init_ring(ring);
	if (ret)
	if (ret)
+1 −0
Original line number Original line Diff line number Diff line
@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
		    const char *ring_name, adf_callback_fn callback,
		    const char *ring_name, adf_callback_fn callback,
		    int poll_mode, struct adf_etr_ring_data **ring_ptr);
		    int poll_mode, struct adf_etr_ring_data **ring_ptr);


bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
#endif
+1 −0
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
	spinlock_t lock;	/* protects ring data struct */
	spinlock_t lock;	/* protects ring data struct */
	u16 head;
	u16 head;
	u16 tail;
	u16 tail;
	u32 threshold;
	u8 ring_number;
	u8 ring_number;
	u8 ring_size;
	u8 ring_size;
	u8 msg_size;
	u8 msg_size;
+15 −9
Original line number Original line Diff line number Diff line
@@ -935,19 +935,25 @@ void qat_alg_callback(void *resp)
	struct icp_qat_fw_la_resp *qat_resp = resp;
	struct icp_qat_fw_la_resp *qat_resp = resp;
	struct qat_crypto_request *qat_req =
	struct qat_crypto_request *qat_req =
				(void *)(__force long)qat_resp->opaque_data;
				(void *)(__force long)qat_resp->opaque_data;
	struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;


	qat_req->cb(qat_resp, qat_req);
	qat_req->cb(qat_resp, qat_req);

	qat_alg_send_backlog(backlog);
}
}


static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
				    struct qat_crypto_instance *inst)
				    struct qat_crypto_instance *inst,
				    struct crypto_async_request *base)
{
{
	struct qat_alg_req req;
	struct qat_alg_req *alg_req = &qat_req->alg_req;


	req.fw_req = (u32 *)&qat_req->req;
	alg_req->fw_req = (u32 *)&qat_req->req;
	req.tx_ring = inst->sym_tx;
	alg_req->tx_ring = inst->sym_tx;
	alg_req->base = base;
	alg_req->backlog = &inst->backlog;


	return qat_alg_send_message(&req);
	return qat_alg_send_message(alg_req);
}
}


static int qat_alg_aead_dec(struct aead_request *areq)
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -987,7 +993,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
	auth_param->auth_off = 0;
	auth_param->auth_off = 0;
	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;


	ret = qat_alg_send_sym_message(qat_req, ctx->inst);
	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
	if (ret == -ENOSPC)
	if (ret == -ENOSPC)
		qat_alg_free_bufl(ctx->inst, qat_req);
		qat_alg_free_bufl(ctx->inst, qat_req);


@@ -1031,7 +1037,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
	auth_param->auth_off = 0;
	auth_param->auth_off = 0;
	auth_param->auth_len = areq->assoclen + areq->cryptlen;
	auth_param->auth_len = areq->assoclen + areq->cryptlen;


	ret = qat_alg_send_sym_message(qat_req, ctx->inst);
	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
	if (ret == -ENOSPC)
	if (ret == -ENOSPC)
		qat_alg_free_bufl(ctx->inst, qat_req);
		qat_alg_free_bufl(ctx->inst, qat_req);


@@ -1212,7 +1218,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)


	qat_alg_set_req_iv(qat_req);
	qat_alg_set_req_iv(qat_req);


	ret = qat_alg_send_sym_message(qat_req, ctx->inst);
	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
	if (ret == -ENOSPC)
	if (ret == -ENOSPC)
		qat_alg_free_bufl(ctx->inst, qat_req);
		qat_alg_free_bufl(ctx->inst, qat_req);


@@ -1278,7 +1284,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
	qat_alg_set_req_iv(qat_req);
	qat_alg_set_req_iv(qat_req);
	qat_alg_update_iv(qat_req);
	qat_alg_update_iv(qat_req);


	ret = qat_alg_send_sym_message(qat_req, ctx->inst);
	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
	if (ret == -ENOSPC)
	if (ret == -ENOSPC)
		qat_alg_free_bufl(ctx->inst, qat_req);
		qat_alg_free_bufl(ctx->inst, qat_req);


+66 −1
Original line number Original line Diff line number Diff line
@@ -6,7 +6,7 @@


#define ADF_MAX_RETRIES		20
#define ADF_MAX_RETRIES		20


int qat_alg_send_message(struct qat_alg_req *req)
static int qat_alg_send_message_retry(struct qat_alg_req *req)
{
{
	int ret = 0, ctr = 0;
	int ret = 0, ctr = 0;


@@ -19,3 +19,68 @@ int qat_alg_send_message(struct qat_alg_req *req)


	return -EINPROGRESS;
	return -EINPROGRESS;
}
}

void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
{
	struct qat_alg_req *req, *tmp;

	spin_lock_bh(&backlog->lock);
	list_for_each_entry_safe(req, tmp, &backlog->list, list) {
		if (adf_send_message(req->tx_ring, req->fw_req)) {
			/* The HW ring is full. Do nothing.
			 * qat_alg_send_backlog() will be invoked again by
			 * another callback.
			 */
			break;
		}
		list_del(&req->list);
		req->base->complete(req->base, -EINPROGRESS);
	}
	spin_unlock_bh(&backlog->lock);
}

static void qat_alg_backlog_req(struct qat_alg_req *req,
				struct qat_instance_backlog *backlog)
{
	INIT_LIST_HEAD(&req->list);

	spin_lock_bh(&backlog->lock);
	list_add_tail(&req->list, &backlog->list);
	spin_unlock_bh(&backlog->lock);
}

static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
{
	struct qat_instance_backlog *backlog = req->backlog;
	struct adf_etr_ring_data *tx_ring = req->tx_ring;
	u32 *fw_req = req->fw_req;

	/* If any request is already backlogged, then add to backlog list */
	if (!list_empty(&backlog->list))
		goto enqueue;

	/* If ring is nearly full, then add to backlog list */
	if (adf_ring_nearly_full(tx_ring))
		goto enqueue;

	/* If adding request to HW ring fails, then add to backlog list */
	if (adf_send_message(tx_ring, fw_req))
		goto enqueue;

	return -EINPROGRESS;

enqueue:
	qat_alg_backlog_req(req, backlog);

	return -EBUSY;
}

int qat_alg_send_message(struct qat_alg_req *req)
{
	u32 flags = req->base->flags;

	if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
		return qat_alg_send_message_maybacklog(req);
	else
		return qat_alg_send_message_retry(req);
}
Loading