Blob Blame History Raw
From 1f9c24273e5d5344dea413fe450452b50dbe8a1a Mon Sep 17 00:00:00 2001
From: "Lee, Chun-Yi" <jlee@suse.com>
Date: Mon, 11 Dec 2017 18:28:43 +0800
Subject: [PATCH 01/11] security: create hidden area to keep sensitive data
Patch-mainline: No, will be submitted to upstream
References: fate#316350

There have some sensitive data (symmetric key, password..) in kernel space.
When kernel locked down, those sensitive data should be filted out when
/dev/mem, /dev/kmem/, hibernation, kdump... interfaces be used.

Signed-off-by: Lee, Chun-Yi <jlee@suse.com>
---
 include/linux/security.h |  28 ++++++
 init/main.c              |   1 +
 security/Kconfig         |   6 ++
 security/Makefile        |   2 +
 security/hidden_area.c   | 226 +++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 263 insertions(+)
 create mode 100644 security/hidden_area.c

diff --git a/include/linux/security.h b/include/linux/security.h
index 03c91f5..d0d8580 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1771,5 +1771,33 @@ static inline void lock_kernel_down(void)
 }
 #endif
 
+#ifdef CONFIG_HIDDEN_AREA
+extern void __init hidden_area_init(void);
+extern void * memcpy_to_hidden_area(const void *source, unsigned long size);
+extern bool page_is_hidden(struct page *page);
+extern void clean_hidden_area(void);
+extern int encrypt_backup_hidden_area(void *key, unsigned long key_len);
+extern int decrypt_restore_hidden_area(void *key, unsigned long key_len);
+#else
+static inline void __init hidden_area_init(void) {}
+static inline void * memcpy_to_hidden_area(const void *source, unsigned long size)
+{
+	return NULL;
+}
+static inline bool page_is_hidden(struct page *page)
+{
+	return false;
+}
+static inline void clean_hidden_area(void) {}
+static inline int encrypt_backup_hidden_area(void *key, unsigned long key_len)
+{
+	return 0;
+}
+static inline int decrypt_restore_hidden_area(void *key, unsigned long key_len)
+{
+	return 0;
+}
+#endif
+
 #endif /* ! __LINUX_SECURITY_H */
 
diff --git a/init/main.c b/init/main.c
index fa0bcc7..87d52ec 100644
--- a/init/main.c
+++ b/init/main.c
@@ -663,6 +663,7 @@ asmlinkage __visible void __init start_kernel(void)
 	fork_init();
 	proc_caches_init();
 	buffer_init();
+	hidden_area_init();
 	key_init();
 	security_init();
 	dbg_late_init();
diff --git a/security/Kconfig b/security/Kconfig
index b99a550..2e891d1 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -213,6 +213,12 @@ config ALLOW_LOCKDOWN_LIFT
 	  ability of userspace to access the kernel image (eg. by SysRq+x under
 	  x86).
 
+config HIDDEN_AREA
+	def_bool n
+        help
+          The data in hidden area will be filtered out when user space uses
+          /dev/mem, /dev/kmem, hibernation, kdump...
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
diff --git a/security/Makefile b/security/Makefile
index 8c4a43e..620ec2f 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -32,3 +32,5 @@ obj-$(CONFIG_INTEGRITY)			+= integrity/
 
 # Allow the kernel to be locked down
 obj-$(CONFIG_LOCK_DOWN_KERNEL)		+= lock_down.o
+
+obj-$(CONFIG_HIDDEN_AREA)              += hidden_area.o
diff --git a/security/hidden_area.c b/security/hidden_area.c
new file mode 100644
index 0000000..d8dc186
--- /dev/null
+++ b/security/hidden_area.c
@@ -0,0 +1,226 @@
+/* Hidden area
+ *
+ * Copyright (C) 2017 Lee, Chun-Yi <jlee@suse.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+
+#include <asm/page.h>
+
+void *hidden_area_va;
+unsigned long hidden_area_size;
+unsigned long hidden_area_pa;
+unsigned long hidden_area_pfn;
+
+static void *cursor;
+static u8 iv[AES_BLOCK_SIZE];
+static void *encrypted_area_va;
+
+DEFINE_SPINLOCK(area_lock);
+
+void * memcpy_to_hidden_area(const void *source, unsigned long size)
+{
+	void * start_addr = NULL;
+
+	if (!source || !hidden_area_va)
+		return NULL;
+
+	if (page_to_pfn(virt_to_page(cursor + size)) != hidden_area_pfn)
+		return NULL;
+
+	spin_lock(&area_lock);
+	start_addr = memcpy(cursor, source, size);
+	if (start_addr)
+		cursor += size;
+	spin_unlock(&area_lock);
+
+	return start_addr;
+}
+EXPORT_SYMBOL(memcpy_to_hidden_area);
+
+bool page_is_hidden(struct page *page)
+{
+	if (!page || !hidden_area_va)
+		return false;
+
+	return (page_to_pfn(page) == hidden_area_pfn);
+}
+EXPORT_SYMBOL(page_is_hidden);
+
+void clean_hidden_area(void)
+{
+	spin_lock(&area_lock);
+	memset(hidden_area_va, 0, hidden_area_size);
+	spin_unlock(&area_lock);
+}
+EXPORT_SYMBOL(clean_hidden_area);
+
+/**
+  * encrypt and backup hidden area - Encrypt and backup hidden area.
+  * @key: The key is used to encrypt.
+  * @key_len: The size (bytes) of key. It must bigger than 32 (bytes).
+  *
+  * This routine will encrypt the plain text in hidden area. The cipher
+  * text will be put to a buffer as backup. The cipher text can be restored
+  * to the hidden area later.
+  */
+int encrypt_backup_hidden_area(void *key, unsigned long key_len)
+{
+	struct scatterlist src[1], dst[1];
+	struct crypto_skcipher *tfm;
+	struct skcipher_request *req;
+	u8 iv_tmp[AES_BLOCK_SIZE];
+	u8 *tmp;
+	int ret;
+
+	if (!key || key_len < 32)
+		return -EINVAL;
+
+	if (!encrypted_area_va) {
+		encrypted_area_va = (void *)get_zeroed_page(GFP_KERNEL);
+		if (!encrypted_area_va)
+			return -ENOMEM;
+	}
+
+	tmp = kmemdup(hidden_area_va, hidden_area_size, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		pr_err("failed to allocate skcipher (%d)\n", ret);
+		goto tfm_fail;
+	}
+
+	ret = crypto_skcipher_setkey(tfm, key, 32);
+	if (ret) {
+		pr_err("failed to setkey (%d)\n", ret);
+		goto set_fail;
+	}
+
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		pr_err("failed to allocate request\n");
+		ret = -ENOMEM;
+		goto set_fail;
+	}
+
+	spin_lock(&area_lock);
+	/* get initial vector from random pool */
+	get_random_bytes(iv, AES_BLOCK_SIZE);
+	memcpy(iv_tmp, iv, sizeof(iv));
+
+	memset(encrypted_area_va, 0, hidden_area_size);
+	sg_init_one(src, tmp, hidden_area_size);
+	sg_init_one(dst, encrypted_area_va, hidden_area_size);
+	skcipher_request_set_crypt(req, src, dst, hidden_area_size, iv_tmp);
+	ret = crypto_skcipher_encrypt(req);
+	spin_unlock(&area_lock);
+
+	skcipher_request_free(req);
+set_fail:
+	crypto_free_skcipher(tfm);
+tfm_fail:
+	memset(tmp, 0, hidden_area_size);
+	kfree(tmp);
+
+	return ret;
+}
+EXPORT_SYMBOL(encrypt_backup_hidden_area);
+
+int decrypt_restore_hidden_area(void *key, unsigned long key_len)
+{
+	struct scatterlist src[1], dst[1];
+	struct crypto_skcipher *tfm;
+	struct skcipher_request *req;
+	u8 iv_tmp[AES_BLOCK_SIZE];
+	void *tmp;
+	int ret;
+
+	if (!key || key_len < 32)
+		return -EINVAL;
+
+	if (!encrypted_area_va) {
+		pr_err("hidden area is not encrypted yet\n");
+		return -EINVAL;
+	}
+
+	/* allocate tmp buffer for decrypted data */
+	tmp = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		pr_err("failed to allocate skcipher (%ld)\n", PTR_ERR(tfm));
+		goto tfm_fail;
+	}
+
+	ret = crypto_skcipher_setkey(tfm, key, 32);
+	if (ret) {
+		pr_err("failed to setkey (%d)\n", ret);
+		goto set_fail;
+	}
+
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		pr_err("failed to allocate request\n");
+		ret = -ENOMEM;
+		goto set_fail;
+	}
+
+	spin_lock(&area_lock);
+	/* decrypt data to tmp buffer */
+	sg_init_one(src, encrypted_area_va, hidden_area_size);
+	sg_init_one(dst, tmp, hidden_area_size);
+	memcpy(iv_tmp, iv, sizeof(iv));
+	skcipher_request_set_crypt(req, src, dst, hidden_area_size, iv_tmp);
+	ret = crypto_skcipher_decrypt(req);
+
+	/* restore hidden area from tmp buffer */
+	if (!ret) {
+		memset(hidden_area_va, 0, hidden_area_size);
+		memcpy(hidden_area_va, tmp, hidden_area_size);
+		memset(encrypted_area_va, 0, hidden_area_size);
+		free_pages((unsigned long)encrypted_area_va, 0);
+		encrypted_area_va = NULL;
+	}
+	spin_unlock(&area_lock);
+
+	skcipher_request_free(req);
+set_fail:
+	crypto_free_skcipher(tfm);
+tfm_fail:
+	memset(tmp, 0, hidden_area_size);
+	free_pages((unsigned long)tmp, 0);
+	return ret;
+}
+EXPORT_SYMBOL(decrypt_restore_hidden_area);
+
+void __init hidden_area_init(void)
+{
+	cursor = NULL;
+	hidden_area_va = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!hidden_area_va) {
+		pr_err("Hidden Area: allocate page failed\n");
+	} else {
+		hidden_area_pa = virt_to_phys(hidden_area_va);
+		hidden_area_pfn = page_to_pfn(virt_to_page(hidden_area_va));
+		hidden_area_size = PAGE_SIZE;
+		cursor = hidden_area_va;
+	}
+}
-- 
2.10.2