diff --git a/patches.kernel.org/6.2.2-002-crypto-arm64-sm4-gcm-Fix-possible-crash-in-GCM-.patch b/patches.kernel.org/6.2.2-002-crypto-arm64-sm4-gcm-Fix-possible-crash-in-GCM-.patch new file mode 100644 index 0000000..86d8d7d --- /dev/null +++ b/patches.kernel.org/6.2.2-002-crypto-arm64-sm4-gcm-Fix-possible-crash-in-GCM-.patch @@ -0,0 +1,160 @@ +From: Herbert Xu +Date: Thu, 2 Feb 2023 16:33:47 +0800 +Subject: [PATCH] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption +References: bsc#1012628 +Patch-mainline: 6.2.2 +Git-commit: 4e4a08868f15897ca236528771c3733fded42c62 + +commit 4e4a08868f15897ca236528771c3733fded42c62 upstream. + +An often overlooked aspect of the skcipher walker API is that an +error is not just indicated by a non-zero return value, but by the +fact that walk->nbytes is zero. + +Thus it is an error to call skcipher_walk_done after getting back +walk->nbytes == 0 from the previous interaction with the walker. + +This is because when walk->nbytes is zero the walker is left in +an undefined state and any further calls to it may try to free +uninitialised stack memory. + +The sm4 arm64 ccm code gets this wrong and ends up calling +skcipher_walk_done even when walk->nbytes is zero. + +This patch rewrites the loop in a form that resembles other callers. + +Reported-by: Tianjia Zhang +Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode") +Signed-off-by: Herbert Xu +Tested-by: Tianjia Zhang +Signed-off-by: Herbert Xu +Cc: Nathan Chancellor +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Jiri Slaby +--- + arch/arm64/crypto/sm4-ce-gcm-glue.c | 51 ++++++++++++++--------------- + 1 file changed, 25 insertions(+), 26 deletions(-) + +diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c +index c450a202..73bfb697 100644 +--- a/arch/arm64/crypto/sm4-ce-gcm-glue.c ++++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c +@@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) + } + + static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk, +- struct sm4_gcm_ctx *ctx, u8 ghash[], ++ u8 ghash[], int err, + void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc, + u8 *dst, const u8 *src, u8 *iv, + unsigned int nbytes, u8 *ghash, + const u8 *ghash_table, const u8 *lengths)) + { ++ struct crypto_aead *aead = crypto_aead_reqtfm(req); ++ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) iv[SM4_BLOCK_SIZE]; + be128 __aligned(8) lengths; +- int err; + + memset(ghash, 0, SM4_BLOCK_SIZE); + + lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.b = cpu_to_be64(walk->total * 8); + +- memcpy(iv, walk->iv, GCM_IV_SIZE); ++ memcpy(iv, req->iv, GCM_IV_SIZE); + put_unaligned_be32(2, iv + GCM_IV_SIZE); + + kernel_neon_begin(); +@@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk, + if (req->assoclen) + gcm_calculate_auth_mac(req, ghash); + +- do { ++ while (walk->nbytes) { + unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + + if (walk->nbytes == walk->total) { +- tail = 0; +- + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, + walk->nbytes, ghash, + ctx->ghash_table, + (const u8 *)&lengths); +- } else if (walk->nbytes - tail) { +- sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, +- walk->nbytes - tail, ghash, +- ctx->ghash_table, NULL); ++ ++ kernel_neon_end(); ++ ++ return skcipher_walk_done(walk, 0); + } + ++ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, ++ walk->nbytes - tail, ghash, ++ ctx->ghash_table, NULL); ++ + kernel_neon_end(); + + err = skcipher_walk_done(walk, tail); +- if (err) +- return err; +- if (walk->nbytes) +- kernel_neon_begin(); +- } while (walk->nbytes > 0); + +- return 0; ++ kernel_neon_begin(); ++ } ++ ++ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv, ++ walk->nbytes, ghash, ctx->ghash_table, ++ (const u8 *)&lengths); ++ ++ kernel_neon_end(); ++ ++ return err; + } + + static int gcm_encrypt(struct aead_request *req) + { + struct crypto_aead *aead = crypto_aead_reqtfm(req); +- struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_aead_encrypt(&walk, req, false); +- if (err) +- return err; +- +- err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc); ++ err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc); + if (err) + return err; + +@@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req) + { + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(aead); +- struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; + u8 authtag[SM4_BLOCK_SIZE]; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_aead_decrypt(&walk, req, false); +- if (err) +- return err; +- +- err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec); ++ err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec); + if (err) + return err; + +-- +2.35.3 + diff --git a/series.conf b/series.conf index 8947bac..b6d526c 100644 --- a/series.conf +++ b/series.conf @@ -41,6 +41,7 @@ patches.kernel.org/6.2.1-012-bpf-add-missing-header-file-include.patch patches.kernel.org/6.2.1-013-Linux-6.2.1.patch patches.kernel.org/6.2.2-001-ALSA-hda-cs35l41-Correct-error-condition-handli.patch + patches.kernel.org/6.2.2-002-crypto-arm64-sm4-gcm-Fix-possible-crash-in-GCM-.patch ######################################################## # Build fixes that apply to the vanilla kernel too.