Blob Blame History Raw
From: jbeulich@novell.com
Subject: eliminate scalability issues from initrd handling
Patch-mainline: n/a

Size restrictions native kernels wouldn't have resulted from the initrd
getting mapped into the initial mapping. The kernel doesn't really need
the initrd to be mapped, so use new infrastructure available in 4.1+
Xen to avoid the mapping and hence the restriction.

--- head.orig/arch/x86/include/mach-xen/asm/setup.h	2011-07-04 12:32:43.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/setup.h	2013-03-27 11:07:40.000000000 +0100
@@ -3,6 +3,8 @@
 void xen_start_kernel(void);
 void xen_arch_setup(void);
 
+extern unsigned long xen_initrd_start;
+
 #ifdef CONFIG_EFI
 void efi_probe(void);
 #else
--- head.orig/arch/x86/kernel/head-xen.c	2013-04-05 09:27:47.000000000 +0200
+++ head/arch/x86/kernel/head-xen.c	2013-05-13 14:14:55.000000000 +0200
@@ -92,6 +92,8 @@ extern void nmi(void);
 #define CALLBACK_ADDR(fn) { __KERNEL_CS, (unsigned long)(fn) }
 #endif
 
+unsigned long __initdata xen_initrd_start;
+
 const unsigned long *__read_mostly machine_to_phys_mapping =
 	(void *)MACH2PHYS_VIRT_START;
 EXPORT_SYMBOL(machine_to_phys_mapping);
--- head.orig/arch/x86/kernel/head32-xen.c	2015-03-12 15:12:43.000000000 +0100
+++ head/arch/x86/kernel/head32-xen.c	2013-08-12 16:21:28.000000000 +0200
@@ -65,6 +65,11 @@ asmlinkage __visible void __init i386_st
 		break;
 	}
 #else
+#ifdef CONFIG_BLK_DEV_INITRD
+	BUG_ON(xen_start_info->flags & SIF_MOD_START_PFN);
+	if (xen_start_info->mod_start)
+		xen_initrd_start = __pa(xen_start_info->mod_start);
+#endif
 	{
 		int max_cmdline;
 
--- head.orig/arch/x86/kernel/head64-xen.c	2013-05-10 14:51:39.000000000 +0200
+++ head/arch/x86/kernel/head64-xen.c	2013-05-10 14:51:43.000000000 +0200
@@ -215,6 +215,14 @@ void __init x86_64_start_reservations(ch
 {
 	copy_bootdata(__va(real_mode_data));
 
+#ifdef CONFIG_BLK_DEV_INITRD
+	/* Reserve INITRD if needed. */
+	if (xen_start_info->flags & SIF_MOD_START_PFN)
+		xen_initrd_start = PFN_PHYS(xen_start_info->mod_start);
+	else if (xen_start_info->mod_start)
+		xen_initrd_start = __pa(xen_start_info->mod_start);
+#endif
+
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 		xen_start_info->mfn_list = ~0UL;
 	else if (xen_start_info->mfn_list < __START_KERNEL_map)
--- head.orig/arch/x86/kernel/head_64-xen.S	2013-03-27 10:43:51.000000000 +0100
+++ head/arch/x86/kernel/head_64-xen.S	2013-03-05 15:51:13.000000000 +0100
@@ -189,6 +189,7 @@ NEXT_PAGE(empty_zero_page)
 	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .quad startup_64)
 	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page)
 	ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,   .quad _PAGE_PRESENT, _PAGE_PRESENT)
+	ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN,  .long 1)
 	ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M,       .quad VMEMMAP_START)
 	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .ascii "writable_page_tables";
 						 .ascii "|writable_descriptor_tables";
--- head.orig/arch/x86/kernel/setup-xen.c	2015-01-16 12:21:39.000000000 +0100
+++ head/arch/x86/kernel/setup-xen.c	2013-08-12 16:21:23.000000000 +0200
@@ -399,7 +399,7 @@ static u64 __init get_ramdisk_image(void
 
 	return ramdisk_image;
 #else
-	return xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0;
+	return xen_initrd_start;
 #endif
 }
 static u64 __init get_ramdisk_size(void)
--- head.orig/arch/x86/mm/init_64-xen.c	2015-01-21 14:48:35.000000000 +0100
+++ head/arch/x86/mm/init_64-xen.c	2015-01-21 14:52:29.000000000 +0100
@@ -927,9 +927,6 @@ void __init xen_finish_init_mapping(void
 	    && xen_start_info->mfn_list >= __START_KERNEL_map)
 		phys_to_machine_mapping =
 			__va(__pa_symbol(xen_start_info->mfn_list));
-	if (xen_start_info->mod_start)
-		xen_start_info->mod_start = (unsigned long)
-			__va(__pa(xen_start_info->mod_start));
 
 	/* Unpin the no longer used Xen provided page tables. */
 	mmuext.cmd = MMUEXT_UNPIN_TABLE;