Torsten Duwe 0ca449
From e0831e7af4e03f2715de102e18e9179ec0a81562 Mon Sep 17 00:00:00 2001
Torsten Duwe 0ca449
From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Torsten Duwe 0ca449
Date: Mon, 9 May 2022 14:34:08 +0100
Torsten Duwe 0ca449
Subject: [PATCH] crypto: qat - use pre-allocated buffers in datapath
Torsten Duwe 0ca449
Git-commit: e0831e7af4e03f2715de102e18e9179ec0a81562
Torsten Duwe 0ca449
Patch-mainline: v5.19-rc1
Torsten Duwe 0ca449
References: jsc#PED-1073
Torsten Duwe 0ca449
Torsten Duwe 0ca449
In order to do DMAs, the QAT device requires that the scatterlist
Torsten Duwe 0ca449
structures are mapped and translated into a format that the firmware can
Torsten Duwe 0ca449
understand. This is defined as the composition of a scatter gather list
Torsten Duwe 0ca449
(SGL) descriptor header, the struct qat_alg_buf_list, plus a variable
Torsten Duwe 0ca449
number of flat buffer descriptors, the struct qat_alg_buf.
Torsten Duwe 0ca449
Torsten Duwe 0ca449
The allocation and mapping of these data structures is done each time a
Torsten Duwe 0ca449
request is received from the skcipher and aead APIs.
Torsten Duwe 0ca449
In an OOM situation, this behaviour might lead to a dead-lock if an
Torsten Duwe 0ca449
allocation fails.
Torsten Duwe 0ca449
Torsten Duwe 0ca449
Based on the conversation in [1], increase the size of the aead and
Torsten Duwe 0ca449
skcipher request contexts to include an SGL descriptor that can handle
Torsten Duwe 0ca449
a maximum of 4 flat buffers.
Torsten Duwe 0ca449
If requests exceed 4 entries buffers, memory is allocated dynamically.
Torsten Duwe 0ca449
Torsten Duwe 0ca449
[1] https://lore.kernel.org/linux-crypto/20200722072932.GA27544@gondor.apana.org.au/
Torsten Duwe 0ca449
Torsten Duwe 0ca449
Cc: stable@vger.kernel.org
Torsten Duwe 0ca449
Fixes: d370cec32194 ("crypto: qat - Intel(R) QAT crypto interface")
Torsten Duwe 0ca449
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Torsten Duwe 0ca449
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Torsten Duwe 0ca449
Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
Torsten Duwe 0ca449
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Torsten Duwe 0ca449
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Torsten Duwe 0ca449
Signed-off-by: Torsten Duwe <duwe@suse.de>
Torsten Duwe 0ca449
Torsten Duwe 0ca449
---
Torsten Duwe 0ca449
 drivers/crypto/qat/qat_common/qat_algs.c   | 64 +++++++++++++---------
Torsten Duwe 0ca449
 drivers/crypto/qat/qat_common/qat_crypto.h | 24 ++++++++
Torsten Duwe 0ca449
 2 files changed, 61 insertions(+), 27 deletions(-)
Torsten Duwe 0ca449
Torsten Duwe 0ca449
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
Torsten Duwe 0ca449
index f998ed58457c2..ec635fe44c1f4 100644
Torsten Duwe 0ca449
--- a/drivers/crypto/qat/qat_common/qat_algs.c
Torsten Duwe 0ca449
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
Torsten Duwe 0ca449
@@ -46,19 +46,6 @@
Torsten Duwe 0ca449
 static DEFINE_MUTEX(algs_lock);
Torsten Duwe 0ca449
 static unsigned int active_devs;
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
-struct qat_alg_buf {
Torsten Duwe 0ca449
-	u32 len;
Torsten Duwe 0ca449
-	u32 resrvd;
Torsten Duwe 0ca449
-	u64 addr;
Torsten Duwe 0ca449
-} __packed;
Torsten Duwe 0ca449
-
Torsten Duwe 0ca449
-struct qat_alg_buf_list {
Torsten Duwe 0ca449
-	u64 resrvd;
Torsten Duwe 0ca449
-	u32 num_bufs;
Torsten Duwe 0ca449
-	u32 num_mapped_bufs;
Torsten Duwe 0ca449
-	struct qat_alg_buf bufers[];
Torsten Duwe 0ca449
-} __packed __aligned(64);
Torsten Duwe 0ca449
-
Torsten Duwe 0ca449
 /* Common content descriptor */
Torsten Duwe 0ca449
 struct qat_alg_cd {
Torsten Duwe 0ca449
 	union {
Torsten Duwe 0ca449
@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
Torsten Duwe 0ca449
 				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
Torsten Duwe 0ca449
-	kfree(bl);
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+	if (!qat_req->buf.sgl_src_valid)
Torsten Duwe 0ca449
+		kfree(bl);
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
 	if (blp != blpout) {
Torsten Duwe 0ca449
 		/* If out of place operation dma unmap only data */
Torsten Duwe 0ca449
 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
Torsten Duwe 0ca449
@@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
Torsten Duwe 0ca449
 					 DMA_BIDIRECTIONAL);
Torsten Duwe 0ca449
 		}
Torsten Duwe 0ca449
 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
Torsten Duwe 0ca449
-		kfree(blout);
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+		if (!qat_req->buf.sgl_dst_valid)
Torsten Duwe 0ca449
+			kfree(blout);
Torsten Duwe 0ca449
 	}
Torsten Duwe 0ca449
 }
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
Torsten Duwe 0ca449
 	dma_addr_t blp = DMA_MAPPING_ERROR;
Torsten Duwe 0ca449
 	dma_addr_t bloutp = DMA_MAPPING_ERROR;
Torsten Duwe 0ca449
 	struct scatterlist *sg;
Torsten Duwe 0ca449
-	size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
Torsten Duwe 0ca449
+	size_t sz_out, sz = struct_size(bufl, bufers, n);
Torsten Duwe 0ca449
+	int node = dev_to_node(&GET_DEV(inst->accel_dev));
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 	if (unlikely(!n))
Torsten Duwe 0ca449
 		return -EINVAL;
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
-	bufl = kzalloc_node(sz, GFP_ATOMIC,
Torsten Duwe 0ca449
-			    dev_to_node(&GET_DEV(inst->accel_dev)));
Torsten Duwe 0ca449
-	if (unlikely(!bufl))
Torsten Duwe 0ca449
-		return -ENOMEM;
Torsten Duwe 0ca449
+	qat_req->buf.sgl_src_valid = false;
Torsten Duwe 0ca449
+	qat_req->buf.sgl_dst_valid = false;
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+	if (n > QAT_MAX_BUFF_DESC) {
Torsten Duwe 0ca449
+		bufl = kzalloc_node(sz, GFP_ATOMIC, node);
Torsten Duwe 0ca449
+		if (unlikely(!bufl))
Torsten Duwe 0ca449
+			return -ENOMEM;
Torsten Duwe 0ca449
+	} else {
Torsten Duwe 0ca449
+		bufl = &qat_req->buf.sgl_src.sgl_hdr;
Torsten Duwe 0ca449
+		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
Torsten Duwe 0ca449
+		qat_req->buf.sgl_src_valid = true;
Torsten Duwe 0ca449
+	}
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 	for_each_sg(sgl, sg, n, i)
Torsten Duwe 0ca449
 		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
Torsten Duwe 0ca449
@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
Torsten Duwe 0ca449
 		struct qat_alg_buf *bufers;
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 		n = sg_nents(sglout);
Torsten Duwe 0ca449
-		sz_out = struct_size(buflout, bufers, n + 1);
Torsten Duwe 0ca449
+		sz_out = struct_size(buflout, bufers, n);
Torsten Duwe 0ca449
 		sg_nctr = 0;
Torsten Duwe 0ca449
-		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
Torsten Duwe 0ca449
-				       dev_to_node(&GET_DEV(inst->accel_dev)));
Torsten Duwe 0ca449
-		if (unlikely(!buflout))
Torsten Duwe 0ca449
-			goto err_in;
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+		if (n > QAT_MAX_BUFF_DESC) {
Torsten Duwe 0ca449
+			buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
Torsten Duwe 0ca449
+			if (unlikely(!buflout))
Torsten Duwe 0ca449
+				goto err_in;
Torsten Duwe 0ca449
+		} else {
Torsten Duwe 0ca449
+			buflout = &qat_req->buf.sgl_dst.sgl_hdr;
Torsten Duwe 0ca449
+			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
Torsten Duwe 0ca449
+			qat_req->buf.sgl_dst_valid = true;
Torsten Duwe 0ca449
+		}
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 		bufers = buflout->bufers;
Torsten Duwe 0ca449
 		for_each_sg(sglout, sg, n, i)
Torsten Duwe 0ca449
@@ -810,7 +817,9 @@ err_out:
Torsten Duwe 0ca449
 			dma_unmap_single(dev, buflout->bufers[i].addr,
Torsten Duwe 0ca449
 					 buflout->bufers[i].len,
Torsten Duwe 0ca449
 					 DMA_BIDIRECTIONAL);
Torsten Duwe 0ca449
-	kfree(buflout);
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+	if (!qat_req->buf.sgl_dst_valid)
Torsten Duwe 0ca449
+		kfree(buflout);
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 err_in:
Torsten Duwe 0ca449
 	if (!dma_mapping_error(dev, blp))
Torsten Duwe 0ca449
@@ -823,7 +832,8 @@ err_in:
Torsten Duwe 0ca449
 					 bufl->bufers[i].len,
Torsten Duwe 0ca449
 					 DMA_BIDIRECTIONAL);
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
-	kfree(bufl);
Torsten Duwe 0ca449
+	if (!qat_req->buf.sgl_src_valid)
Torsten Duwe 0ca449
+		kfree(bufl);
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 	dev_err(dev, "Failed to map buf for dma\n");
Torsten Duwe 0ca449
 	return -ENOMEM;
Torsten Duwe 0ca449
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
Torsten Duwe 0ca449
index b6a4c95ae003f..0928f159ea993 100644
Torsten Duwe 0ca449
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
Torsten Duwe 0ca449
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
Torsten Duwe 0ca449
@@ -21,6 +21,26 @@ struct qat_crypto_instance {
Torsten Duwe 0ca449
 	atomic_t refctr;
Torsten Duwe 0ca449
 };
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
+#define QAT_MAX_BUFF_DESC	4
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+struct qat_alg_buf {
Torsten Duwe 0ca449
+	u32 len;
Torsten Duwe 0ca449
+	u32 resrvd;
Torsten Duwe 0ca449
+	u64 addr;
Torsten Duwe 0ca449
+} __packed;
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+struct qat_alg_buf_list {
Torsten Duwe 0ca449
+	u64 resrvd;
Torsten Duwe 0ca449
+	u32 num_bufs;
Torsten Duwe 0ca449
+	u32 num_mapped_bufs;
Torsten Duwe 0ca449
+	struct qat_alg_buf bufers[];
Torsten Duwe 0ca449
+} __packed;
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
+struct qat_alg_fixed_buf_list {
Torsten Duwe 0ca449
+	struct qat_alg_buf_list sgl_hdr;
Torsten Duwe 0ca449
+	struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
Torsten Duwe 0ca449
+} __packed __aligned(64);
Torsten Duwe 0ca449
+
Torsten Duwe 0ca449
 struct qat_crypto_request_buffs {
Torsten Duwe 0ca449
 	struct qat_alg_buf_list *bl;
Torsten Duwe 0ca449
 	dma_addr_t blp;
Torsten Duwe 0ca449
@@ -28,6 +48,10 @@ struct qat_crypto_request_buffs {
Torsten Duwe 0ca449
 	dma_addr_t bloutp;
Torsten Duwe 0ca449
 	size_t sz;
Torsten Duwe 0ca449
 	size_t sz_out;
Torsten Duwe 0ca449
+	bool sgl_src_valid;
Torsten Duwe 0ca449
+	bool sgl_dst_valid;
Torsten Duwe 0ca449
+	struct qat_alg_fixed_buf_list sgl_src;
Torsten Duwe 0ca449
+	struct qat_alg_fixed_buf_list sgl_dst;
Torsten Duwe 0ca449
 };
Torsten Duwe 0ca449
 
Torsten Duwe 0ca449
 struct qat_crypto_request;
Torsten Duwe 0ca449
-- 
Torsten Duwe 0ca449
2.35.3
Torsten Duwe 0ca449