Blob Blame History Raw
From 17c25d89dedfd8488baa3b0249a01439530678da Mon Sep 17 00:00:00 2001
From: Nicolai Stange <nstange@suse.de>
Date: Thu, 23 Sep 2021 11:17:52 +0200
Subject: [PATCH 1/2] char/random: wire up userspace interface to SP800-90B
 compliant drbg
References: jsc#SLE-21132,bsc#1191259
Patch-mainline: Never, downstream band-aid to get SP800-90B for userspace

FIPS 140-3 requires that all entropy sources conform to SP800-90B. The
in-kernel users in crypto/ are fine as per their "stdrng" usage, which
maps to the SP800-90B conforming drbg_nopr_sha512 implementation.

However, drivers/char/random.c does not qualify as a suitable entropy
source and thus, something needs to be done about userspace libraries
such as Openssl, libgcrypt, GnuTLS or Mozilla-NSS, which currently
obtain their randomness exclusively through /dev/{u,}random and
getrandom(2) respectively.

There are two possible options:
1.) Make the kernel's drivers/char/random.c userspace interface conform
    to SP800-90B or
2.) touch all affected userspace libraries individually to make them
    mix in randomness obtained from a conforming source such as the
    userspace jitterentropy implementation.

This patch implements 1.) by wiring up the drivers/char/random.c userspace
interface to crypto/drbg.c.

More specifically, if in FIPS mode as indicated by fips_enabled, make
extract_crng_user() to forward the request to the new
extract_fips_drbg_user(), which would then extract the generated output
directly from a "drbg_nopr_sha512" drbg instance.

For better performance on NUMA systems, maintain one drbg_nopr_sha512
instance per node. Instantiate them in a lazy fashion upon first use. The
drbg instances require quite some amount of memory and even though I don't
expect their footprint to exceed 1MB in total (1024 nodes max * ~1kB
estimated per drbg instance), be conservative and prepare for allocation
failures by always keeping at least one "emergency" instance around to
fallback to. Use the instance for node 0 as that emergency instance and let
the lazy allocation code ensure that this one gets instantiated before
anything else.

Finally, as this is downstream-only code and we always have
CONFIG_CRYPTO_DRBG_HASH=y, CONFIG_CRYPTO_SHA512=y as well as
CRYPTO_JITTERENTROPY=y for our kernels, don't introduce proper Kconfig
dependencies in order to not pointlessly clutter the diff context for
potential future backports.

Signed-off-by: Nicolai Stange <nstange@suse.de>
---
 drivers/char/random.c |  189 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 189 insertions(+)

--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -337,6 +337,9 @@
 #include <linux/uuid.h>
 #include <crypto/chacha.h>
 #include <crypto/sha1.h>
+#include <crypto/rng.h>
+#include <crypto/sha2.h>
+#include <crypto/algapi.h>
 
 #include <asm/processor.h>
 #include <linux/uaccess.h>
@@ -743,6 +746,189 @@ static int credit_entropy_bits_safe(stru
 
 /*********************************************************************
  *
+ * 800-90B compliant DRBG wired up to userspace interface only.
+ *
+ *********************************************************************/
+static DEFINE_MUTEX(fips_init_mtx);
+
+static struct crypto_rng **fips_drbg_tfms;
+
+static void __fips_drbg_tfm_free(struct crypto_rng *tfm)
+{
+	crypto_free_rng(tfm);
+}
+
+static struct crypto_rng* __fips_drbg_tfm_alloc(void)
+{
+	struct crypto_rng *tfm;
+	int r;
+
+	tfm = crypto_alloc_rng("drbg_nopr_sha512", 0, 0);
+	if (IS_ERR(tfm))
+		return tfm;
+
+	r = crypto_rng_reset(tfm, NULL, crypto_rng_seedsize(tfm));
+	if (r) {
+		__fips_drbg_tfm_free(tfm);
+		return ERR_PTR(r);
+	}
+
+	return tfm;
+}
+
+static int fips_drbgs_init(void)
+{
+	struct crypto_rng **drbg_tfms;
+	struct crypto_rng *drbg_tfm;
+
+	mutex_lock(&fips_init_mtx);
+	if (fips_drbg_tfms) {
+		mutex_unlock(&fips_init_mtx);
+		return 0;
+	}
+
+	drbg_tfms = kcalloc(nr_node_ids, sizeof(*fips_drbg_tfms),
+			    GFP_KERNEL);
+	if (!drbg_tfms) {
+		pr_err_ratelimited("random: FIPS drbg: drbg array allocation failure");
+		mutex_unlock(&fips_init_mtx);
+		return -ENOMEM;
+	}
+
+	drbg_tfm = __fips_drbg_tfm_alloc();
+	if (IS_ERR(drbg_tfm)) {
+		mutex_unlock(&fips_init_mtx);
+		kfree(drbg_tfms);
+		pr_err_ratelimited("random: FIPS drbg: drbg init failed, %ld",
+				   PTR_ERR(drbg_tfm));
+		return PTR_ERR(drbg_tfm);
+	}
+
+	WRITE_ONCE(drbg_tfms[0], drbg_tfm);
+
+	pr_debug("random: FIPS drbg: init complete");
+	smp_store_release(&fips_drbg_tfms, drbg_tfms);
+
+	mutex_unlock(&fips_init_mtx);
+
+	return 0;
+}
+
+static struct crypto_rng* fips_drbg_tfm_get(int node)
+{
+	struct crypto_rng **drbg_tfms;
+	struct crypto_rng *drbg_tfm;
+
+	drbg_tfms = smp_load_acquire(&fips_drbg_tfms);
+	if (unlikely(!drbg_tfms)) {
+		int r;
+
+		r = fips_drbgs_init();
+		if (r)
+			return ERR_PTR(r);
+		drbg_tfms = smp_load_acquire(&fips_drbg_tfms);
+	}
+
+again:
+	drbg_tfm = smp_load_acquire(&drbg_tfms[node]);
+	if (likely(drbg_tfm))
+		return drbg_tfm;
+
+	if (WARN_ON_ONCE(!node))
+		return NULL;
+
+	mutex_lock(&fips_init_mtx);
+	if (drbg_tfms[node]) {
+		mutex_unlock(&fips_init_mtx);
+		goto again;
+	}
+
+	drbg_tfm = __fips_drbg_tfm_alloc();
+	if (IS_ERR(drbg_tfm)) {
+		mutex_unlock(&fips_init_mtx);
+		pr_warn_ratelimited("random: FIPS drbg: per-node drbg init failed (%ld), performance degraded.",
+				    PTR_ERR(drbg_tfm));
+		node = 0;
+		goto again;
+	}
+
+	smp_store_release(&drbg_tfms[node], drbg_tfm);
+	mutex_unlock(&fips_init_mtx);
+
+	return drbg_tfm;
+}
+
+static ssize_t extract_fips_drbg_user(void __user *buf, size_t nbytes,
+				      __u8 __tmp[SHA512_DIGEST_SIZE])
+{
+	struct crypto_rng *drbg_tfm;
+	unsigned int block_size;
+	__u8 *tmp;
+	ssize_t ret = 0, i;
+	int large_request = (nbytes > 256);
+
+	if (!nbytes)
+		return 0;
+
+	drbg_tfm = fips_drbg_tfm_get(numa_node_id());
+	if (IS_ERR(drbg_tfm))
+		return PTR_ERR(drbg_tfm);
+
+	/*
+	 * Each drbg_generate() invocation involves backtracking
+	 * protection and it is desirable from a performance POV to
+	 * amortize that over a number of SHA512_DIGEST_SIZE DRBG
+	 * output blocks.
+	 */
+	block_size = min_t(size_t, nbytes, 4 * SHA512_DIGEST_SIZE);
+	tmp = kmalloc(block_size, GFP_KERNEL);
+	if (!tmp) {
+		/* Use the on-stack tmp buffer from the caller as a backup. */
+		tmp = __tmp;
+		block_size = SHA512_DIGEST_SIZE;
+	}
+
+	while (nbytes) {
+		int r;
+
+		if (large_request && need_resched()) {
+			if (signal_pending(current)) {
+				if (ret == 0)
+					ret = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+		}
+
+		i = min_t(size_t, nbytes, block_size);
+		r = crypto_rng_get_bytes(drbg_tfm, tmp, (unsigned int)i);
+		if (r < 0) {
+			ret = r;
+			break;
+		}
+
+		if (copy_to_user(buf, tmp, i)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		nbytes -= i;
+		buf += i;
+		ret += i;
+	}
+
+	/* Wipe data just written to memory */
+	memzero_explicit(tmp, block_size);
+	if (likely(tmp != __tmp))
+		kfree(tmp);
+
+	return ret;
+}
+
+
+
+/*********************************************************************
+ *
  * CRNG using CHACHA20
  *
  *********************************************************************/
@@ -1059,6 +1245,9 @@ static ssize_t extract_crng_user(void __
 	__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
 	int large_request = (nbytes > 256);
 
+	if (fips_enabled)
+		return extract_fips_drbg_user(buf, nbytes, tmp);
+
 	while (nbytes) {
 		if (large_request && need_resched()) {
 			if (signal_pending(current)) {