Blob Blame History Raw
From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Subject: Linux: 3.11
Patch-mainline: 3.11

 This patch contains the differences between 3.10 and 3.11.

Automatically created from "patch-3.11" by xen-port-patches.py
Acked-by: jbeulich@suse.com

--- head.orig/arch/arm64/Kconfig	2014-02-17 17:26:39.000000000 +0100
+++ head/arch/arm64/Kconfig	2014-01-07 16:38:06.000000000 +0100
@@ -224,9 +224,9 @@ source "mm/Kconfig"
 
 config XEN_DOM0
 	def_bool y
-	depends on XEN
+	depends on PARAVIRT_XEN
 
-config XEN
+config PARAVIRT_XEN
 	bool "Xen guest support on ARM64 (EXPERIMENTAL)"
 	depends on ARM64 && OF
 	select SWIOTLB_XEN
--- head.orig/arch/arm64/Makefile	2014-01-20 03:40:07.000000000 +0100
+++ head/arch/arm64/Makefile	2013-12-03 08:36:05.000000000 +0100
@@ -44,7 +44,7 @@ export	TEXT_OFFSET GZFLAGS
 
 core-y		+= arch/arm64/kernel/ arch/arm64/mm/
 core-$(CONFIG_KVM) += arch/arm64/kvm/
-core-$(CONFIG_XEN) += arch/arm64/xen/
+core-$(CONFIG_PARAVIRT_XEN) += arch/arm64/xen/
 libs-y		:= arch/arm64/lib/ $(libs-y)
 libs-y		+= $(LIBGCC)
 
--- head.orig/arch/x86/Kconfig	2014-02-18 17:49:01.000000000 +0100
+++ head/arch/x86/Kconfig	2014-01-07 16:38:03.000000000 +0100
@@ -69,7 +69,7 @@ config X86
 	select HAVE_KERNEL_LZMA if !XEN
 	select HAVE_KERNEL_XZ if !XEN
 	select HAVE_KERNEL_LZO if !XEN
-	select HAVE_KERNEL_LZ4
+	select HAVE_KERNEL_LZ4 if !XEN
 	select HAVE_HW_BREAKPOINT
 	select HAVE_MIXED_BREAKPOINTS_REGS
 	select PERF_EVENTS
@@ -104,7 +104,7 @@ config X86
 	select HAVE_ARCH_SECCOMP_FILTER
 	select BUILDTIME_EXTABLE_SORT
 	select GENERIC_CMOS_UPDATE
-	select HAVE_ARCH_SOFT_DIRTY
+	select HAVE_ARCH_SOFT_DIRTY if !XEN
 	select CLOCKSOURCE_WATCHDOG if !XEN
 	select GENERIC_CLOCKEVENTS if !XEN
 	select ARCH_CLOCKSOURCE_DATA if X86_64
--- head.orig/arch/x86/include/asm/trace/irq_vectors.h	2014-01-20 03:40:07.000000000 +0100
+++ head/arch/x86/include/asm/trace/irq_vectors.h	2013-12-13 09:29:58.000000000 +0100
@@ -1,3 +1,5 @@
+#ifndef CONFIG_X86_NO_IDT
+
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM irq_vectors
 
@@ -113,3 +115,5 @@ DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
+
+#endif /* CONFIG_X86_NO_IDT */
--- head.orig/arch/x86/include/mach-xen/asm/desc.h	2012-06-14 11:23:26.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/desc.h	2013-12-13 09:40:53.000000000 +0100
@@ -37,8 +37,8 @@ static inline void fill_ldt(struct desc_
 #ifndef CONFIG_X86_NO_IDT
 extern struct desc_ptr idt_descr;
 extern gate_desc idt_table[];
-extern struct desc_ptr nmi_idt_descr;
-extern gate_desc nmi_idt_table[];
+extern struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[];
 #endif
 
 struct gdt_page {
@@ -341,7 +341,20 @@ static inline void set_nmi_gate(int gate
 	gate_desc s;
 
 	pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
-	write_idt_entry(nmi_idt_table, gate, &s);
+	write_idt_entry(debug_idt_table, gate, &s);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern struct desc_ptr trace_idt_descr;
+extern gate_desc trace_idt_table[];
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
+	write_idt_entry(trace_idt_table, entry, gate);
+}
+#else
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
 }
 #endif
 
@@ -356,6 +369,7 @@ static inline void _set_gate(int gate, u
 	 * setup time
 	 */
 	write_idt_entry(idt_table, gate, &s);
+	write_trace_idt_entry(gate, &s);
 }
 
 /*
@@ -385,12 +399,39 @@ static inline void alloc_system_vector(i
 	}
 }
 
-static inline void alloc_intr_gate(unsigned int n, void *addr)
+#ifdef CONFIG_TRACING
+static inline void trace_set_intr_gate(unsigned int gate, void *addr)
+{
+	gate_desc s;
+
+	pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
+	write_idt_entry(trace_idt_table, gate, &s);
+}
+
+static inline void __trace_alloc_intr_gate(unsigned int n, void *addr)
+{
+	trace_set_intr_gate(n, addr);
+}
+#else
+static inline void trace_set_intr_gate(unsigned int gate, void *addr)
+{
+}
+
+#define __trace_alloc_intr_gate(n, addr)
+#endif
+
+static inline void __alloc_intr_gate(unsigned int n, void *addr)
 {
-	alloc_system_vector(n);
 	set_intr_gate(n, addr);
 }
 
+#define alloc_intr_gate(n, addr)				\
+	do {							\
+		alloc_system_vector(n);				\
+		__alloc_intr_gate(n, addr);			\
+		__trace_alloc_intr_gate(n, trace_##addr);	\
+	} while (0)
+
 /*
  * This routine sets up an interrupt gate at directory privilege level 3.
  */
@@ -429,6 +470,73 @@ static inline void set_system_intr_gate_
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
 }
+
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU(u32, debug_idt_ctr);
+static inline bool is_debug_idt_enabled(void)
+{
+	if (this_cpu_read(debug_idt_ctr))
+		return true;
+
+	return false;
+}
+
+static inline void load_debug_idt(void)
+{
+	load_idt((const struct desc_ptr *)&debug_idt_descr);
+}
+#else
+static inline bool is_debug_idt_enabled(void)
+{
+	return false;
+}
+
+static inline void load_debug_idt(void)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern atomic_t trace_idt_ctr;
+static inline bool is_trace_idt_enabled(void)
+{
+	if (atomic_read(&trace_idt_ctr))
+		return true;
+
+	return false;
+}
+
+static inline void load_trace_idt(void)
+{
+	load_idt((const struct desc_ptr *)&trace_idt_descr);
+}
+#else
+static inline bool is_trace_idt_enabled(void)
+{
+	return false;
+}
+
+static inline void load_trace_idt(void)
+{
+}
+#endif
+
+/*
+ * The load_current_idt() must be called with interrupts disabled
+ * to avoid races. That way the IDT will always be set back to the expected
+ * descriptor. It's also called when a CPU is being initialized, and
+ * that doesn't need to disable interrupts, as nothing should be
+ * bothering the CPU then.
+ */
+static inline void load_current_idt(void)
+{
+	if (is_debug_idt_enabled())
+		load_debug_idt();
+	else if (is_trace_idt_enabled())
+		load_trace_idt();
+	else
+		load_idt((const struct desc_ptr *)&idt_descr);
+}
 #endif /* !CONFIG_X86_NO_IDT */
 
 #endif /* _ASM_X86_DESC_H */
--- head.orig/arch/x86/include/mach-xen/asm/fixmap.h	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/fixmap.h	2013-08-12 13:00:59.000000000 +0200
@@ -81,11 +81,11 @@ enum fixed_addresses {
 			    + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
 	VVAR_PAGE,
 	VSYSCALL_HPET,
-#endif
 #ifdef CONFIG_PARAVIRT_CLOCK
 	PVCLOCK_FIXMAP_BEGIN,
 	PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
 #endif
+#endif
 	FIX_DBGP_BASE,
 	FIX_EARLYCON_MEM_BASE,
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
--- head.orig/arch/x86/include/mach-xen/asm/fpu-internal.h	2012-11-15 14:56:34.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/fpu-internal.h	2013-08-12 13:00:59.000000000 +0200
@@ -9,7 +9,7 @@ static inline bool xen_thread_fpu_begin(
 {
 	bool ret = false;
 
-	if (mcl && !use_eager_fpu()) {
+	if (mcl && !static_cpu_has_safe(X86_FEATURE_EAGER_FPU)) {
 		mcl->op = __HYPERVISOR_fpu_taskswitch;
 		mcl->args[0] = 0;
 		ret = true;
--- head.orig/arch/x86/include/mach-xen/asm/io.h	2011-07-01 15:19:34.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/io.h	2013-08-12 13:00:59.000000000 +0200
@@ -340,4 +340,11 @@ extern bool is_early_ioremap_ptep(pte_t 
 
 #define IO_SPACE_LIMIT 0xffff
 
+#ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_add(unsigned long base,
+					 unsigned long size);
+extern void arch_phys_wc_del(int handle);
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
 #endif /* _ASM_X86_IO_H */
--- head.orig/arch/x86/include/mach-xen/asm/pgtable.h	2013-03-25 09:13:57.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/pgtable.h	2013-08-21 09:43:15.000000000 +0200
@@ -201,7 +201,7 @@ static inline pte_t pte_mkexec(pte_t pte
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-	return pte_set_flags(pte, _PAGE_DIRTY);
+	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
@@ -266,7 +266,7 @@ static inline pmd_t pmd_wrprotect(pmd_t 
 
 static inline pmd_t pmd_mkdirty(pmd_t pmd)
 {
-	return pmd_set_flags(pmd, _PAGE_DIRTY);
+	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 }
 
 static inline pmd_t pmd_mkhuge(pmd_t pmd)
@@ -290,6 +290,60 @@ static inline pmd_t pmd_mknotpresent(pmd
 }
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline int pte_soft_dirty(pte_t pte)
+{
+	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
+}
+#endif
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+#endif
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
--- head.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h	2012-08-20 14:37:06.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/pgtable-3level.h	2013-08-21 09:43:15.000000000 +0200
@@ -178,6 +178,9 @@ static inline pmd_t xen_pmdp_get_and_cle
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking bit 11 is taken from
+ * the low part of pte as well.
  */
 #define pte_to_pgoff(pte) ((pte).pte_high)
 #define pgoff_to_pte(off)						\
--- head.orig/arch/x86/include/mach-xen/asm/pgtable_types.h	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/pgtable_types.h	2013-08-21 09:43:15.000000000 +0200
@@ -55,6 +55,33 @@
 #define _PAGE_HIDDEN	(_AT(pteval_t, 0))
 #endif
 
+/*
+ * The same hidden bit is used by kmemcheck, but since kmemcheck
+ * works on kernel pages while soft-dirty engine on user space,
+ * they do not conflict with each other.
+ */
+
+#define _PAGE_BIT_SOFT_DIRTY	_PAGE_BIT_HIDDEN
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
+#else
+#define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 0))
+#endif
+
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY	_PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY	(_AT(pteval_t, 0))
+#endif
+
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
 #else
--- head.orig/arch/x86/include/mach-xen/asm/processor.h	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/processor.h	2013-08-12 13:00:59.000000000 +0200
@@ -91,10 +91,10 @@ struct cpuinfo_x86 {
 	char			wp_works_ok;	/* It doesn't on 386's */
 
 	/* Problems on some 486Dx4's and old 386's: */
-	char			hard_math;
 #ifndef CONFIG_XEN
 	char			rfu;
 	char			pad0;
+	char			pad1;
 #endif
 #else
 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@ -175,6 +175,7 @@ extern const struct seq_operations cpuin
 #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
+extern void fpu_detect(struct cpuinfo_x86 *c);
 
 extern void early_cpu_init(void);
 extern void identify_boot_cpu(void);
@@ -980,5 +981,5 @@ bool xen_set_default_idle(void);
 #endif
 
 void stop_this_cpu(void *dummy);
-
+void df_debug(struct pt_regs *regs, long error_code);
 #endif /* _ASM_X86_PROCESSOR_H */
--- head.orig/arch/x86/include/mach-xen/asm/smp.h	2013-01-09 15:32:33.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/smp.h	2013-08-12 13:00:59.000000000 +0200
@@ -205,7 +205,7 @@ static inline int wbinvd_on_all_cpus(voi
 int wbinvd_on_all_cpus(void);
 #endif
 
-extern unsigned disabled_cpus __cpuinitdata;
+extern unsigned disabled_cpus;
 
 #include <asm/smp-processor-id.h>
 
--- head.orig/arch/x86/include/mach-xen/asm/special_insns.h	2012-07-05 12:31:42.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/special_insns.h	2013-08-12 13:00:59.000000000 +0200
@@ -55,7 +55,7 @@ static inline void xen_stts(void)
  * all loads stores around it, which can hurt performance. Solution is to
  * use a variable and mimic reads and writes to it to enforce serialization
  */
-#define __force_order machine_to_phys_nr
+extern unsigned long __force_order;
 
 static inline unsigned long native_read_cr0(void)
 {
--- head.orig/arch/x86/include/mach-xen/asm/spinlock.h	2012-09-05 15:48:38.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/spinlock.h	2013-08-21 09:43:15.000000000 +0200
@@ -349,8 +349,4 @@ static inline void arch_write_unlock(arc
 #define arch_read_relax(lock)	cpu_relax()
 #define arch_write_relax(lock)	cpu_relax()
 
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
 #endif /* _ASM_X86_SPINLOCK_H */
--- head.orig/arch/x86/include/mach-xen/asm/time.h	2011-07-11 12:25:07.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/time.h	2013-08-14 08:45:49.000000000 +0200
@@ -1,10 +1,10 @@
 #ifndef _XEN_ASM_TIME_H
 #define _XEN_ASM_TIME_H
 
-unsigned long xen_read_wallclock(void);
-int xen_write_wallclock(unsigned long);
-
 struct timespec;
+void xen_read_wallclock(struct timespec *);
+int xen_write_wallclock(const struct timespec *);
+
 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
 int xen_update_wallclock(const struct timespec *);
 #else
--- head.orig/arch/x86/kernel/Makefile	2013-12-02 17:58:33.000000000 +0100
+++ head/arch/x86/kernel/Makefile	2013-12-03 08:36:16.000000000 +0100
@@ -120,5 +120,6 @@ ifeq ($(CONFIG_X86_64),y)
 endif
 
 disabled-obj-$(CONFIG_XEN) := crash.o early-quirks.o i8237.o i8253.o i8259.o \
-	irqinit.o pci-swiotlb.o reboot.o smpboot.o trampoline%.o tsc%.o vsmp%.o
+	irqinit.o pci-swiotlb.o reboot.o smpboot.o tracepoint.o trampoline%.o \
+	tsc%.o vsmp%.o
 disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += probe_roms.o
--- head.orig/arch/x86/kernel/acpi/boot.c	2013-09-26 13:00:44.000000000 +0200
+++ head/arch/x86/kernel/acpi/boot.c	2013-12-03 08:36:20.000000000 +0100
@@ -569,7 +569,7 @@ static int acpi_register_gsi_ioapic(stru
 int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
 			   int trigger, int polarity) = acpi_register_gsi_pic;
 
-#ifdef CONFIG_ACPI_SLEEP
+#if defined(CONFIG_ACPI_SLEEP) && !defined(CONFIG_ACPI_PV_SLEEP)
 int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel;
 #else
 int (*acpi_suspend_lowlevel)(void);
--- head.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c	2012-10-31 08:25:00.000000000 +0100
+++ head/arch/x86/kernel/acpi/processor_extcntl_xen.c	2013-08-14 15:15:50.000000000 +0200
@@ -211,11 +211,18 @@ static int xen_sleep(u8 sleep_state, u32
 	return -1;
 }
 
+static int xen_acpi_suspend_lowlevel(void)
+{
+	acpi_enter_sleep_state(ACPI_STATE_S3);
+	return 0;
+}
+
 static int __init init_extcntl(void)
 {
 	unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
 
 	acpi_os_set_prepare_sleep(xen_sleep);
+	acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
 
 	if (!pmbits)
 		return 0;
--- head.orig/arch/x86/kernel/cpu/common-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/kernel/cpu/common-xen.c	2013-08-12 13:23:15.000000000 +0200
@@ -71,7 +71,7 @@ void __init setup_cpu_local_masks(void)
 #endif
 }
 
-static void __cpuinit default_init(struct cpuinfo_x86 *c)
+static void default_init(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_64
 	cpu_detect_cache_sizes(c);
@@ -88,13 +88,13 @@ static void __cpuinit default_init(struc
 #endif
 }
 
-static const struct cpu_dev __cpuinitconst default_cpu = {
+static const struct cpu_dev default_cpu = {
 	.c_init		= default_init,
 	.c_vendor	= "Unknown",
 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
 };
 
-static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+static const struct cpu_dev *this_cpu = &default_cpu;
 
 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 #ifdef CONFIG_X86_64
@@ -177,7 +177,7 @@ static int __init x86_xsaveopt_setup(cha
 __setup("noxsaveopt", x86_xsaveopt_setup);
 
 #ifdef CONFIG_X86_32
-static int cachesize_override __cpuinitdata = -1;
+static int cachesize_override = -1;
 
 static int __init cachesize_setup(char *str)
 {
@@ -233,14 +233,14 @@ static inline int flag_is_changeable_p(u
 }
 
 /* Probe for the CPUID instruction */
-int __cpuinit have_cpuid_p(void)
+int have_cpuid_p(void)
 {
 	return flag_is_changeable_p(X86_EFLAGS_ID);
 }
 
-static int disable_x86_serial_nr __cpuinitdata = 1;
+static int disable_x86_serial_nr = 1;
 
-static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 {
 	unsigned long lo, hi;
 
@@ -318,7 +318,7 @@ struct cpuid_dependent_feature {
 	u32 level;
 };
 
-static const struct cpuid_dependent_feature __cpuinitconst
+static const struct cpuid_dependent_feature
 cpuid_dependent_features[] = {
 	{ X86_FEATURE_MWAIT,		0x00000005 },
 	{ X86_FEATURE_DCA,		0x00000009 },
@@ -326,7 +326,7 @@ cpuid_dependent_features[] = {
 	{ 0, 0 }
 };
 
-static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 {
 	const struct cpuid_dependent_feature *df;
 
@@ -364,7 +364,7 @@ static void __cpuinit filter_cpuid_featu
  */
 
 /* Look up CPU names by table lookup. */
-static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
+static const char *table_lookup_model(struct cpuinfo_x86 *c)
 {
 	const struct cpu_model_info *info;
 
@@ -384,8 +384,8 @@ static const char *__cpuinit table_looku
 	return NULL;		/* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
-__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
+__u32 cpu_caps_cleared[NCAPINTS];
+__u32 cpu_caps_set[NCAPINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -436,9 +436,9 @@ void switch_to_new_gdt(int cpu)
 	load_percpu_segment(cpu);
 }
 
-static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
+static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 
-static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
+static void get_model_name(struct cpuinfo_x86 *c)
 {
 	unsigned int *v;
 	char *p, *q;
@@ -467,7 +467,7 @@ static void __cpuinit get_model_name(str
 	}
 }
 
-void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
+void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 {
 	unsigned int n, dummy, ebx, ecx, edx, l2size;
 
@@ -521,7 +521,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
  */
 s8  __read_mostly tlb_flushall_shift = -1;
 
-void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
+void cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
 	if (this_cpu->c_detect_tlb)
 		this_cpu->c_detect_tlb(c);
@@ -535,7 +535,7 @@ void __cpuinit cpu_detect_tlb(struct cpu
 		tlb_flushall_shift);
 }
 
-void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+void detect_ht(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_HT
 	u32 eax, ebx, ecx, edx;
@@ -586,7 +586,7 @@ out:
 #endif
 }
 
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void get_cpu_vendor(struct cpuinfo_x86 *c)
 {
 	char *v = c->x86_vendor_id;
 	int i;
@@ -613,7 +613,7 @@ static void __cpuinit get_cpu_vendor(str
 	this_cpu = &default_cpu;
 }
 
-void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
+void cpu_detect(struct cpuinfo_x86 *c)
 {
 	/* Get vendor name */
 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -643,7 +643,7 @@ void __cpuinit cpu_detect(struct cpuinfo
 	}
 }
 
-void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void get_cpu_cap(struct cpuinfo_x86 *c)
 {
 	u32 tfms, xlvl;
 	u32 ebx;
@@ -697,7 +697,7 @@ void __cpuinit get_cpu_cap(struct cpuinf
 	init_scattered_cpuid_features(c);
 }
 
-static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_32
 	int i;
@@ -756,10 +756,9 @@ static void __init early_identify_cpu(st
 		return;
 
 	cpu_detect(c);
-
 	get_cpu_vendor(c);
-
 	get_cpu_cap(c);
+	fpu_detect(c);
 #ifdef CONFIG_XEN
 	if (!cpu_has_xsave)
 		x86_xsave_setup(NULL);
@@ -773,6 +772,8 @@ static void __init early_identify_cpu(st
 
 	if (this_cpu->c_bsp_init)
 		this_cpu->c_bsp_init(c);
+
+	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 }
 
 void __init early_cpu_init(void)
@@ -817,7 +818,7 @@ void __init early_cpu_init(void)
  * unless we can find a reliable way to detect all the broken cases.
  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
  */
-static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+static void detect_nopl(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_32
 	clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -826,7 +827,7 @@ static void __cpuinit detect_nopl(struct
 #endif
 }
 
-static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
+static void generic_identify(struct cpuinfo_x86 *c)
 {
 	c->extended_cpuid_level = 0;
 
@@ -865,7 +866,7 @@ static void __cpuinit generic_identify(s
 /*
  * This does the hard work of actually picking apart the CPU stuff...
  */
-static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+static void identify_cpu(struct cpuinfo_x86 *c)
 {
 	int i;
 
@@ -1018,7 +1019,7 @@ void __init identify_boot_cpu(void)
 void set_perf_event_pending(void) {}
 #endif
 
-void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+void identify_secondary_cpu(struct cpuinfo_x86 *c)
 {
 	BUG_ON(c == &boot_cpu_data);
 	identify_cpu(c);
@@ -1033,14 +1034,14 @@ struct msr_range {
 	unsigned	max;
 };
 
-static const struct msr_range msr_range_array[] __cpuinitconst = {
+static const struct msr_range msr_range_array[] = {
 	{ 0x00000000, 0x00000418},
 	{ 0xc0000000, 0xc000040b},
 	{ 0xc0010000, 0xc0010142},
 	{ 0xc0011000, 0xc001103b},
 };
 
-static void __cpuinit __print_cpu_msr(void)
+static void __print_cpu_msr(void)
 {
 	unsigned index_min, index_max;
 	unsigned index;
@@ -1059,7 +1060,7 @@ static void __cpuinit __print_cpu_msr(vo
 	}
 }
 
-static int show_msr __cpuinitdata;
+static int show_msr;
 
 static __init int setup_show_msr(char *arg)
 {
@@ -1080,7 +1081,7 @@ static __init int setup_noclflush(char *
 }
 __setup("noclflush", setup_noclflush);
 
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+void print_cpu_info(struct cpuinfo_x86 *c)
 {
 	const char *vendor = NULL;
 
@@ -1109,7 +1110,7 @@ void __cpuinit print_cpu_info(struct cpu
 	print_cpu_msr(c);
 }
 
-void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
+void print_cpu_msr(struct cpuinfo_x86 *c)
 {
 	if (c->cpu_index < show_msr)
 		__print_cpu_msr();
@@ -1131,8 +1132,8 @@ __setup("clearcpuid=", setup_disablecpui
 #ifdef CONFIG_X86_64
 #ifndef CONFIG_X86_NO_IDT
 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
-				    (unsigned long) nmi_idt_table };
+struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
+				    (unsigned long) debug_idt_table };
 #endif
 
 DEFINE_PER_CPU_FIRST(union irq_stack_union,
@@ -1181,7 +1182,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char,
 	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
 #endif
 
-void __cpuinit syscall_init(void)
+void syscall_init(void)
 {
 #ifndef CONFIG_XEN
 	/*
@@ -1236,20 +1237,20 @@ int is_debug_stack(unsigned long addr)
 		 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
 }
 
-static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
+DEFINE_PER_CPU(u32, debug_idt_ctr);
 
 void debug_stack_set_zero(void)
 {
-	this_cpu_inc(debug_stack_use_ctr);
-	load_idt((const struct desc_ptr *)&nmi_idt_descr);
+	this_cpu_inc(debug_idt_ctr);
+	load_current_idt();
 }
 
 void debug_stack_reset(void)
 {
-	if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
+	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
 		return;
-	if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
-		load_idt((const struct desc_ptr *)&idt_descr);
+	if (this_cpu_dec_return(debug_idt_ctr) == 0)
+		load_current_idt();
 }
 #endif
 
@@ -1304,7 +1305,7 @@ static void dbg_restore_debug_regs(void)
  */
 #ifdef CONFIG_X86_64
 
-void __cpuinit cpu_init(void)
+void cpu_init(void)
 {
 #ifndef CONFIG_X86_NO_TSS
 	struct orig_ist *oist;
@@ -1354,7 +1355,7 @@ void __cpuinit cpu_init(void)
 	loadsegment(fs, 0);
 
 #ifndef CONFIG_X86_NO_IDT
-	load_idt((const struct desc_ptr *)&idt_descr);
+	load_current_idt();
 #endif
 
 	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1422,7 +1423,7 @@ void __cpuinit cpu_init(void)
 
 #else
 
-void __cpuinit cpu_init(void)
+void cpu_init(void)
 {
 	int cpu = smp_processor_id();
 	struct task_struct *curr = current;
@@ -1473,3 +1474,17 @@ void __cpuinit cpu_init(void)
 	fpu_init();
 }
 #endif
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+void warn_pre_alternatives(void)
+{
+	WARN(1, "You're using static_cpu_has before alternatives have run!\n");
+}
+EXPORT_SYMBOL_GPL(warn_pre_alternatives);
+#endif
+
+inline bool __static_cpu_has_safe(u16 bit)
+{
+	return boot_cpu_has(bit);
+}
+EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
--- head.orig/arch/x86/kernel/cpu/mtrr/main-xen.c	2011-02-01 15:03:03.000000000 +0100
+++ head/arch/x86/kernel/cpu/mtrr/main-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -6,8 +6,12 @@
 #include <linux/init.h>
 
 #include <asm/mtrr.h>
+#include <asm/pat.h>
 #include "mtrr.h"
 
+/* arch_phys_wc_add returns an MTRR register index plus this offset. */
+#define MTRR_TO_PHYS_WC_OFFSET 1000
+
 static DEFINE_MUTEX(mtrr_mutex);
 
 void generic_get_mtrr(unsigned int reg, unsigned long *base,
@@ -158,6 +162,73 @@ int mtrr_del(int reg, unsigned long base
 }
 EXPORT_SYMBOL(mtrr_del);
 
+/**
+ * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
+ * @base: Physical base address
+ * @size: Size of region
+ *
+ * If PAT is available, this does nothing.  If PAT is unavailable, it
+ * attempts to add a WC MTRR covering size bytes starting at base and
+ * logs an error if this fails.
+ *
+ * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
+ * but drivers should not try to interpret that return value.
+ */
+int arch_phys_wc_add(unsigned long base, unsigned long size)
+{
+	int ret;
+
+	if (pat_enabled)
+		return 0;  /* Success!  (We don't need to do anything.) */
+
+	ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
+	if (ret < 0) {
+		pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
+			(void *)base, (void *)(base + size - 1));
+		return ret;
+	}
+	return ret + MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL(arch_phys_wc_add);
+
+/*
+ * arch_phys_wc_del - undoes arch_phys_wc_add
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This cleans up after mtrr_add_wc_if_needed.
+ *
+ * The API guarantees that mtrr_del_wc_if_needed(error code) and
+ * mtrr_del_wc_if_needed(0) do nothing.
+ */
+void arch_phys_wc_del(int handle)
+{
+	if (handle >= 1) {
+		WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
+		mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
+	}
+}
+EXPORT_SYMBOL(arch_phys_wc_del);
+
+/*
+ * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This will turn the return value from arch_phys_wc_add into an mtrr
+ * index suitable for debugging.
+ *
+ * Note: There is no legitimate use for this function, except possibly
+ * in printk line.  Alas there is an illegitimate use in some ancient
+ * drm ioctls.
+ */
+int phys_wc_to_mtrr_index(int handle)
+{
+	if (handle < MTRR_TO_PHYS_WC_OFFSET)
+		return -1;
+	else
+		return handle - MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
+
 /*
  * Returns the effective MTRR type for the region
  * Error returns:
--- head.orig/arch/x86/kernel/entry_32-xen.S	2013-03-25 09:13:57.000000000 +0100
+++ head/arch/x86/kernel/entry_32-xen.S	2013-08-12 13:00:59.000000000 +0200
@@ -865,7 +865,17 @@ ENTRY(name)				\
 	CFI_ENDPROC;			\
 ENDPROC(name)
 
-#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
+
+#ifdef CONFIG_TRACING
+#define TRACE_BUILD_INTERRUPT(name, nr)		\
+	BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
+#else
+#define TRACE_BUILD_INTERRUPT(name, nr)
+#endif
+
+#define BUILD_INTERRUPT(name, nr) \
+	BUILD_INTERRUPT3(name, nr, smp_##name); \
+	TRACE_BUILD_INTERRUPT(name, nr)
 
 /* The include is where all of the SMP etc. interrupts come from */
 #include <asm/entry_arch.h>
--- head.orig/arch/x86/kernel/entry_64-xen.S	2013-03-25 09:13:57.000000000 +0100
+++ head/arch/x86/kernel/entry_64-xen.S	2013-08-12 13:00:59.000000000 +0200
@@ -356,7 +356,7 @@ NMI_MASK = 0x80000000
 	/*CFI_REL_OFFSET	ss,0*/
 	pushq_cfi %rax /* rsp */
 	CFI_REL_OFFSET	rsp,0
-	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
+	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
 	/*CFI_REL_OFFSET	rflags,0*/
 	pushq_cfi $__KERNEL_CS /* cs */
 	/*CFI_REL_OFFSET	cs,0*/
--- head.orig/arch/x86/kernel/head32-xen.c	2013-03-25 09:13:57.000000000 +0100
+++ head/arch/x86/kernel/head32-xen.c	2013-08-12 14:13:54.000000000 +0200
@@ -44,6 +44,8 @@ void __init i386_start_kernel(void)
 	}
 
 	BUG_ON(pte_index(hypervisor_virt_start));
+
+	set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
 #endif
 
 #ifndef CONFIG_XEN
--- head.orig/arch/x86/kernel/irq-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/kernel/irq-xen.c	2013-08-14 09:29:01.000000000 +0200
@@ -18,6 +18,9 @@
 #include <asm/mce.h>
 #include <asm/hw_irq.h>
 
+#define CREATE_TRACE_POINTS
+#include <asm/trace/irq_vectors.h>
+
 atomic_t irq_err_count;
 
 #ifndef CONFIG_XEN
@@ -223,23 +226,21 @@ unsigned int __irq_entry do_IRQ(struct p
 /*
  * Handler for X86_PLATFORM_IPI_VECTOR.
  */
-void smp_x86_platform_ipi(struct pt_regs *regs)
+void __smp_x86_platform_ipi(void)
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
-
-	ack_APIC_irq();
-
-	irq_enter();
-
-	exit_idle();
-
 	inc_irq_stat(x86_platform_ipis);
 
 	if (x86_platform_ipi_callback)
 		x86_platform_ipi_callback();
+}
 
-	irq_exit();
+void smp_x86_platform_ipi(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
 
+	entering_ack_irq();
+	__smp_x86_platform_ipi();
+	exiting_irq();
 	set_irq_regs(old_regs);
 }
 
@@ -265,6 +266,18 @@ void smp_kvm_posted_intr_ipi(struct pt_r
 }
 #endif
 
+void smp_trace_x86_platform_ipi(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	entering_ack_irq();
+	trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
+	__smp_x86_platform_ipi();
+	trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
+	exiting_irq();
+	set_irq_regs(old_regs);
+}
+
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 #endif
 
--- head.orig/arch/x86/kernel/nmi.c	2013-08-09 15:25:46.000000000 +0200
+++ head/arch/x86/kernel/nmi.c	2013-08-14 14:26:32.000000000 +0200
@@ -17,6 +17,8 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/hardirq.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 
--- head.orig/arch/x86/kernel/process-xen.c	2013-07-02 10:03:05.000000000 +0200
+++ head/arch/x86/kernel/process-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -391,7 +391,7 @@ static void amd_e400_idle(void)
 }
 #endif
 
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+void select_idle_routine(const struct cpuinfo_x86 *c)
 {
 #ifndef CONFIG_XEN
 #ifdef CONFIG_SMP
--- head.orig/arch/x86/kernel/process_32-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/kernel/process_32-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -113,11 +113,16 @@ void __show_regs(struct pt_regs *regs, i
 	get_debugreg(d1, 1);
 	get_debugreg(d2, 2);
 	get_debugreg(d3, 3);
-	printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
-			d0, d1, d2, d3);
-
 	get_debugreg(d6, 6);
 	get_debugreg(d7, 7);
+
+	/* Only print out debug registers if they are in their non-default state. */
+	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+	    (d6 == DR6_RESERVED) && (d7 == 0x400))
+		return;
+
+	printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
+			d0, d1, d2, d3);
 	printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
 			d6, d7);
 }
@@ -150,7 +155,7 @@ int copy_thread(unsigned long clone_flag
 		childregs->bp = arg;
 		childregs->orig_ax = -1;
 		childregs->cs = __KERNEL_CS | get_kernel_rpl();
-		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
 		p->fpu_counter = 0;
 		p->thread.io_bitmap_ptr = NULL;
 		memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
--- head.orig/arch/x86/kernel/process_64-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/kernel/process_64-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -108,11 +108,18 @@ void __show_regs(struct pt_regs *regs, i
 	get_debugreg(d0, 0);
 	get_debugreg(d1, 1);
 	get_debugreg(d2, 2);
-	printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
 	get_debugreg(d3, 3);
 	get_debugreg(d6, 6);
 	get_debugreg(d7, 7);
+
+	/* Only print out debug registers if they are in their non-default state. */
+	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+	    (d6 == DR6_RESERVED) && (d7 == 0x400))
+		return;
+
+	printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
 	printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
+
 }
 
 void xen_load_gs_index(unsigned gs)
@@ -184,7 +191,7 @@ int copy_thread(unsigned long clone_flag
 		childregs->bp = arg;
 		childregs->orig_ax = -1;
 		childregs->cs = __KERNEL_CS | get_kernel_rpl();
-		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
 		return 0;
 	}
 	*childregs = *current_pt_regs();
--- head.orig/arch/x86/kernel/setup-xen.c	2013-12-06 15:10:50.000000000 +0100
+++ head/arch/x86/kernel/setup-xen.c	2013-12-06 15:10:53.000000000 +0100
@@ -201,14 +201,12 @@ static struct resource bss_resource = {
 
 #ifdef CONFIG_X86_32
 /* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
+struct cpuinfo_x86 new_cpu_data = {
 	.wp_works_ok = 1,
-	.hard_math = 1,
 };
 /* common cpu data for all cpus */
 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
 	.wp_works_ok = 1,
-	.hard_math = 1,
 };
 EXPORT_SYMBOL(boot_cpu_data);
 
@@ -1176,7 +1174,6 @@ void __init setup_arch(char **cmdline_p)
 	/* max_low_pfn get updated here */
 	find_low_pfn_range();
 #else
-	num_physpages = max_pfn;
 	max_mapnr = max_pfn;
 
 #ifdef CONFIG_X86_LOCAL_APIC
--- head.orig/arch/x86/kernel/time-xen.c	2014-01-22 14:18:10.000000000 +0100
+++ head/arch/x86/kernel/time-xen.c	2014-01-22 14:19:32.000000000 +0100
@@ -315,7 +315,7 @@ static unsigned long long local_clock(vo
 	return time;
 }
 
-unsigned long xen_read_wallclock(void)
+void xen_read_wallclock(struct timespec *now)
 {
 	const shared_info_t *s = HYPERVISOR_shared_info;
 	u32 version, sec, nsec;
@@ -330,12 +330,11 @@ unsigned long xen_read_wallclock(void)
 	} while ((s->wc_version & 1) | (version ^ s->wc_version));
 
 	delta = local_clock() + (u64)sec * NSEC_PER_SEC + nsec;
-	do_div(delta, NSEC_PER_SEC);
-
-	return delta;
+	now->tv_nsec = do_div(delta, NSEC_PER_SEC);
+	now->tv_sec = delta;
 }
 
-int xen_write_wallclock(unsigned long now)
+int xen_write_wallclock(const struct timespec *now)
 {
 	if (!is_initial_xendomain() || independent_wallclock)
 		return 0;
@@ -844,7 +843,7 @@ void time_resume(void)
 #ifdef CONFIG_SMP
 static char timer_name[NR_CPUS][15];
 
-int __cpuinit local_setup_timer(unsigned int cpu)
+int local_setup_timer(unsigned int cpu)
 {
 	int seq, irq;
 
@@ -883,7 +882,7 @@ int __cpuinit local_setup_timer(unsigned
 	return 0;
 }
 
-void __cpuinit local_teardown_timer(unsigned int cpu)
+void local_teardown_timer(unsigned int cpu)
 {
 	BUG_ON(cpu == 0);
 	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
--- head.orig/arch/x86/kernel/traps-xen.c	2013-11-07 12:39:30.000000000 +0100
+++ head/arch/x86/kernel/traps-xen.c	2013-11-07 12:39:32.000000000 +0100
@@ -63,19 +63,21 @@
 #include <asm/x86_init.h>
 #include <asm/pgalloc.h>
 #include <asm/proto.h>
+
+#ifndef CONFIG_X86_NO_IDT
+/* No need to be aligned, but done to keep all IDTs defined the same way. */
+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
+#endif
 #else
 #include <asm/processor-flags.h>
 #include <asm/setup.h>
 
 asmlinkage int system_call(void);
+#endif
 
 #ifndef CONFIG_X86_NO_IDT
-/*
- * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.
- */
-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
-#endif
+/* Must be page-aligned because the real IDT is used in a fixmap. */
+gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
 #endif
 
 #ifndef CONFIG_XEN
@@ -258,6 +260,9 @@ dotraplinkage void do_double_fault(struc
 	tsk->thread.error_code = error_code;
 	tsk->thread.trap_nr = X86_TRAP_DF;
 
+#ifdef CONFIG_DOUBLEFAULT
+	df_debug(regs, error_code);
+#endif
 	/*
 	 * This is always a kernel trap and never fixable (and thus must
 	 * never return).
@@ -441,7 +446,7 @@ dotraplinkage void __kprobes do_debug(st
 	/* Store the virtualized DR6 value */
 	tsk->thread.debugreg6 = dr6;
 
-	if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
+	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
 							SIGTRAP) == NOTIFY_STOP)
 		goto exit;
 
@@ -734,7 +739,7 @@ static const trap_info_t __initconst ear
 	{ X86_TRAP_PF, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
 	{ }
 };
-static const trap_info_t __cpuinitconst trap_table[] = {
+static const trap_info_t trap_table[] = {
 	{ X86_TRAP_DE, 0|X, __KERNEL_CS, (unsigned long)divide_error		},
 	{ X86_TRAP_DB, 0|4, __KERNEL_CS, (unsigned long)debug			},
 	{ X86_TRAP_BP, 3|4, __KERNEL_CS, (unsigned long)int3			},
@@ -798,7 +803,7 @@ void __init trap_init(void)
 	x86_init.irqs.trap_init();
 }
 
-void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
+void smp_trap_init(trap_info_t *trap_ctxt)
 {
 	const trap_info_t *t = trap_table;
 
--- head.orig/arch/x86/kernel/vsyscall_64-xen.c	2013-01-09 15:32:33.000000000 +0100
+++ head/arch/x86/kernel/vsyscall_64-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -337,7 +337,7 @@ sigsegv:
  * Assume __initcall executes before all user space. Hopefully kmod
  * doesn't violate that. We'll find out if it does.
  */
-static void __cpuinit vsyscall_set_cpu(int cpu)
+static void vsyscall_set_cpu(int cpu)
 {
 	unsigned long d;
 	unsigned long node = 0;
@@ -359,13 +359,13 @@ static void __cpuinit vsyscall_set_cpu(i
 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
 }
 
-static void __cpuinit cpu_vsyscall_init(void *arg)
+static void cpu_vsyscall_init(void *arg)
 {
 	/* preemption should be already off */
 	vsyscall_set_cpu(raw_smp_processor_id());
 }
 
-static int __cpuinit
+static int
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
 	long cpu = (long)arg;
--- head.orig/arch/x86/kernel/x86_init-xen.c	2013-03-25 09:13:57.000000000 +0100
+++ head/arch/x86/kernel/x86_init-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -23,7 +23,7 @@
 #include <asm/iommu.h>
 #include <asm/mach_traps.h>
 
-void __cpuinit x86_init_noop(void) { }
+void x86_init_noop(void) { }
 void __init x86_init_uint_noop(unsigned int unused) { }
 int __init iommu_init_noop(void) { return 0; }
 
--- head.orig/arch/x86/mm/highmem_32-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/mm/highmem_32-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -1,6 +1,7 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/swap.h> /* for totalram_pages */
+#include <linux/bootmem.h>
 
 void *kmap(struct page *page)
 {
@@ -168,6 +169,11 @@ void __init set_highmem_pages_init(void)
 	struct zone *zone;
 	int nid;
 
+	/*
+	 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
+	 * is invoked before free_all_bootmem()
+	 */
+	reset_all_zones_managed_pages();
 	for_each_zone(zone) {
 		unsigned long zone_start_pfn, zone_end_pfn;
 
--- head.orig/arch/x86/mm/init-xen.c	2013-08-15 13:02:54.000000000 +0200
+++ head/arch/x86/mm/init-xen.c	2013-09-04 13:03:05.000000000 +0200
@@ -88,8 +88,8 @@ __ref void *alloc_low_pages(unsigned int
 	return __va(pfn << PAGE_SHIFT);
 }
 
-/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE	(5 * PAGE_SIZE)
+/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
+#define INIT_PGT_BUF_SIZE	(6 * PAGE_SIZE)
 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
 void  __init early_alloc_pgt_buf(void)
 {
@@ -512,7 +512,6 @@ int devmem_is_allowed(unsigned long page
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-	unsigned long addr;
 	unsigned long begin_aligned, end_aligned;
 
 	/* Make sure boundaries are page aligned */
@@ -527,8 +526,6 @@ void free_init_pages(char *what, unsigne
 	if (begin >= end)
 		return;
 
-	addr = begin;
-
 	/*
 	 * If debugging page accesses then do not free this memory but
 	 * mark them not present - any buggy init-section access will
@@ -547,12 +544,11 @@ void free_init_pages(char *what, unsigne
 	set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
 	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
 
-	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
-	for (; addr < end; addr += PAGE_SIZE) {
-		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
 #ifdef CONFIG_X86_64
-		if (addr >= __START_KERNEL_map) {
+	if (begin >= __START_KERNEL_map) {
+		unsigned long addr;
+
+		for (addr = begin; addr < end; addr += PAGE_SIZE) {
 			paddr_t pa = __pa_symbol(addr);
 
 			/* make_readonly() reports all kernel addresses. */
@@ -564,15 +560,17 @@ void free_init_pages(char *what, unsigne
 			if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
 				BUG();
 		}
-#endif
-		free_reserved_page(virt_to_page(addr));
+		begin = (unsigned long)__va(__pa_symbol(begin));
+		end = (unsigned long)__va(__pa_symbol(end));
 	}
 #endif
+	free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what);
+#endif
 }
 
 void free_initmem(void)
 {
-	free_init_pages("unused kernel memory",
+	free_init_pages("unused kernel",
 			(unsigned long)(&__init_begin),
 			(unsigned long)(&__init_end));
 }
@@ -598,7 +596,7 @@ void __init free_initrd_mem(unsigned lon
 	 *   - relocate_initrd()
 	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
 	 */
-	free_init_pages("initrd memory", start, PAGE_ALIGN(end));
+	free_init_pages("initrd", start, PAGE_ALIGN(end));
 }
 #endif
 
--- head.orig/arch/x86/mm/init_32-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/mm/init_32-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -651,10 +651,8 @@ void __init initmem_init(void)
 		highstart_pfn = max_low_pfn;
 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
 		pages_to_mb(highend_pfn - highstart_pfn));
-	num_physpages = highend_pfn;
 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
 #else
-	num_physpages = max_low_pfn;
 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
 #endif
 
@@ -662,7 +660,7 @@ void __init initmem_init(void)
 	sparse_memory_present_with_active_regions(0);
 
 #ifdef CONFIG_FLATMEM
-	max_mapnr = num_physpages;
+	max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
 #endif
 	__vmalloc_start_set = true;
 
@@ -730,8 +728,6 @@ static void __init test_wp_bit(void)
 
 void __init mem_init(void)
 {
-	int codesize, reservedpages, datasize, initsize;
-	int tmp;
 	unsigned long pfn;
 
 	pci_iommu_alloc();
@@ -751,37 +747,16 @@ void __init mem_init(void)
 	set_highmem_pages_init();
 
 	/* this will put all low memory onto the freelists */
-	totalram_pages += free_all_bootmem();
+	free_all_bootmem();
 	/* XEN: init low-mem pages outside initial allocation. */
 	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
 		ClearPageReserved(pfn_to_page(pfn));
 		init_page_count(pfn_to_page(pfn));
 	}
 
-	reservedpages = 0;
-	for (tmp = 0; tmp < max_low_pfn; tmp++)
-		/*
-		 * Only count reserved RAM pages:
-		 */
-		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-			reservedpages++;
-
 	after_bootmem = 1;
 
-	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
-	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
-			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
-		nr_free_pages() << (PAGE_SHIFT-10),
-		num_physpages << (PAGE_SHIFT-10),
-		codesize >> 10,
-		reservedpages << (PAGE_SHIFT-10),
-		datasize >> 10,
-		initsize >> 10,
-		totalhigh_pages << (PAGE_SHIFT-10));
-
+	mem_init_print_info(NULL);
 	printk(KERN_INFO "virtual kernel memory layout:\n"
 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
--- head.orig/arch/x86/mm/init_64-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/mm/init_64-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -438,7 +438,7 @@ void __init init_extra_mapping_uc(unsign
  *
  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
  *
- * phys_addr holds the negative offset to the kernel, which is added
+ * phys_base holds the negative offset to the kernel, which is added
  * to the compile time generated pmds. This results in invalid pmds up
  * to the point where we hit the physaddr 0 mapping.
  *
@@ -1002,36 +1002,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory);
 
 static void __meminit free_pagetable(struct page *page, int order)
 {
-	struct zone *zone;
-	bool bootmem = false;
 	unsigned long magic;
 	unsigned int nr_pages = 1 << order;
 
 	/* bootmem page has reserved flag */
 	if (PageReserved(page)) {
 		__ClearPageReserved(page);
-		bootmem = true;
 
 		magic = (unsigned long)page->lru.next;
 		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 			while (nr_pages--)
 				put_page_bootmem(page++);
 		} else
-			__free_pages_bootmem(page, order);
+			while (nr_pages--)
+				free_reserved_page(page++);
 	} else
 		free_pages((unsigned long)page_address(page), order);
-
-	/*
-	 * SECTION_INFO pages and MIX_SECTION_INFO pages
-	 * are all allocated by bootmem.
-	 */
-	if (bootmem) {
-		zone = page_zone(page);
-		zone_span_writelock(zone);
-		zone->present_pages += nr_pages;
-		zone_span_writeunlock(zone);
-		totalram_pages += nr_pages;
-	}
 }
 
 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
@@ -1348,8 +1334,6 @@ static void __init register_page_bootmem
 
 void __init mem_init(void)
 {
-	long codesize, reservedpages, datasize, initsize;
-	unsigned long absent_pages;
 	unsigned long pfn;
 
 	pci_iommu_alloc();
@@ -1359,7 +1343,7 @@ void __init mem_init(void)
 	register_page_bootmem_info();
 
 	/* this will put all memory onto the freelists */
-	totalram_pages = free_all_bootmem();
+	free_all_bootmem();
 
 	/* XEN: init pages outside initial allocation. */
 	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
@@ -1367,27 +1351,13 @@ void __init mem_init(void)
 		init_page_count(pfn_to_page(pfn));
 	}
 
-	absent_pages = absent_pages_in_range(0, max_pfn);
-	reservedpages = max_pfn - totalram_pages - absent_pages;
 	after_bootmem = 1;
 
-	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
 	/* Register memory areas for /proc/kcore */
 	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
 			 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
 
-	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
-			 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
-		nr_free_pages() << (PAGE_SHIFT-10),
-		max_pfn << (PAGE_SHIFT-10),
-		codesize >> 10,
-		absent_pages << (PAGE_SHIFT-10),
-		reservedpages << (PAGE_SHIFT-10),
-		datasize >> 10,
-		initsize >> 10);
+	mem_init_print_info(NULL);
 }
 
 #ifdef CONFIG_DEBUG_RODATA
@@ -1463,11 +1433,10 @@ void mark_rodata_ro(void)
 	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 
-	free_init_pages("unused kernel memory",
+	free_init_pages("unused kernel",
 			(unsigned long) __va(__pa_symbol(text_end)),
 			(unsigned long) __va(__pa_symbol(rodata_start)));
-
-	free_init_pages("unused kernel memory",
+	free_init_pages("unused kernel",
 			(unsigned long) __va(__pa_symbol(rodata_end)),
 			(unsigned long) __va(__pa_symbol(_sdata)));
 }
--- head.orig/arch/x86/mm/ioremap-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/arch/x86/mm/ioremap-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -678,15 +678,15 @@ __early_ioremap(resource_size_t phys_add
 	}
 
 	if (slot < 0) {
-		printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
-			 (u64)phys_addr, size);
+		printk(KERN_INFO "%s(%08llx, %08lx) not found slot\n",
+		       __func__, (u64)phys_addr, size);
 		WARN_ON(1);
 		return NULL;
 	}
 
 	if (early_ioremap_debug) {
-		printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
-		       (u64)phys_addr, size, slot);
+		printk(KERN_INFO "%s(%08llx, %08lx) [%d] => ",
+		       __func__, (u64)phys_addr, size, slot);
 		dump_stack();
 	}
 
--- head.orig/arch/x86/mm/pgtable-xen.c	2013-12-10 11:40:21.000000000 +0100
+++ head/arch/x86/mm/pgtable-xen.c	2013-12-10 11:40:30.000000000 +0100
@@ -611,7 +611,6 @@ static void pgd_mop_up_pmds(struct mm_st
 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 {
 	pud_t *pud;
-	unsigned long addr;
 	int i;
 
 	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
@@ -619,8 +618,7 @@ static void pgd_prepopulate_pmd(struct m
 
 	pud = pud_offset(pgd, 0);
 
-	for (addr = i = 0; i < PREALLOCATED_PMDS;
-	     i++, pud++, addr += PUD_SIZE) {
+	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
 		pmd_t *pmd = pmds[i];
 
 		if (i >= KERNEL_PGD_BOUNDARY) {
--- head.orig/arch/x86/vdso/vdso32-setup-xen.c	2012-04-11 13:26:23.000000000 +0200
+++ head/arch/x86/vdso/vdso32-setup-xen.c	2013-08-12 13:23:01.000000000 +0200
@@ -202,13 +202,13 @@ static struct page *vdso32_pages[1];
 #define	vdso32_sysenter()	(boot_cpu_has(X86_FEATURE_SYSENTER32))
 #define	vdso32_syscall()	(boot_cpu_has(X86_FEATURE_SYSCALL32))
 
-void __cpuinit syscall32_cpu_init(void)
+void syscall32_cpu_init(void)
 {
-	static const struct callback_register __cpuinitconst cstar = {
+	static const struct callback_register cstar = {
 		.type = CALLBACKTYPE_syscall32,
 		.address = (unsigned long)ia32_cstar_target
 	};
-	static const struct callback_register __cpuinitconst sysenter = {
+	static const struct callback_register sysenter = {
 		.type = CALLBACKTYPE_sysenter,
 		.address = (unsigned long)ia32_sysenter_target
 	};
@@ -234,16 +234,16 @@ static inline void map_compat_vdso(int m
 #define vdso32_syscall()	(boot_cpu_has(X86_FEATURE_SYSCALL32))
 
 extern asmlinkage void ia32pv_cstar_target(void);
-static const struct callback_register __cpuinitconst cstar = {
+static const struct callback_register cstar = {
 	.type = CALLBACKTYPE_syscall32,
 	.address = { __KERNEL_CS, (unsigned long)ia32pv_cstar_target },
 };
 #endif
 
-void __cpuinit enable_sep_cpu(void)
+void enable_sep_cpu(void)
 {
 	extern asmlinkage void ia32pv_sysenter_target(void);
-	static struct callback_register __cpuinitdata sysenter = {
+	static struct callback_register sysenter = {
 		.type = CALLBACKTYPE_sysenter,
 		.address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target },
 	};
@@ -420,7 +420,7 @@ subsys_initcall(sysenter_setup);
 /* Register vsyscall32 into the ABI table */
 #include <linux/sysctl.h>
 
-static ctl_table abi_table2[] = {
+static struct ctl_table abi_table2[] = {
 	{
 		.procname	= "vsyscall32",
 		.data		= &sysctl_vsyscall32,
@@ -431,7 +431,7 @@ static ctl_table abi_table2[] = {
 	{}
 };
 
-static ctl_table abi_root_table2[] = {
+static struct ctl_table abi_root_table2[] = {
 	{
 		.procname = "abi",
 		.mode = 0555,
--- head.orig/drivers/acpi/acpi_processor.c	2013-12-03 08:28:52.000000000 +0100
+++ head/drivers/acpi/acpi_processor.c	2013-08-14 15:26:05.000000000 +0200
@@ -30,9 +30,7 @@
 ACPI_MODULE_NAME("processor");
 
 DEFINE_PER_CPU(struct acpi_processor *, processors);
-#ifndef CONFIG_XEN
 EXPORT_PER_CPU_SYMBOL(processors);
-#endif
 
 /* --------------------------------------------------------------------------
                                 Errata Handling
--- head.orig/drivers/acpi/processor_core.c	2013-12-02 17:52:08.000000000 +0100
+++ head/drivers/acpi/processor_core.c	2013-08-14 15:17:26.000000000 +0200
@@ -26,6 +26,35 @@ ACPI_MODULE_NAME("processor_core");
  */
 const struct processor_extcntl_ops *processor_extcntl_ops;
 EXPORT_SYMBOL(processor_extcntl_ops);
+
+int processor_notify_external(struct acpi_processor *pr, int event, int type)
+{
+	int ret = -EINVAL;
+
+	if (!processor_cntl_external())
+		return -EINVAL;
+
+	switch (event) {
+	case PROCESSOR_PM_INIT:
+	case PROCESSOR_PM_CHANGE:
+		if (type >= PM_TYPE_MAX
+		    || !processor_extcntl_ops->pm_ops[type])
+			break;
+
+		ret = processor_extcntl_ops->pm_ops[type](pr, event);
+		break;
+	case PROCESSOR_HOTPLUG:
+		if (processor_extcntl_ops->hotplug)
+			ret = processor_extcntl_ops->hotplug(pr, type);
+		break;
+	default:
+		pr_err("Unsupported processor event %d.\n", event);
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(processor_notify_external);
 #endif
 
 static int __init set_no_mwait(const struct dmi_system_id *id)
--- head.orig/drivers/acpi/processor_extcntl.c	2011-02-01 15:03:03.000000000 +0100
+++ head/drivers/acpi/processor_extcntl.c	2013-08-14 15:16:46.000000000 +0200
@@ -67,34 +67,6 @@ static int processor_notify_smm(void)
 	return 0;
 }
 
-int processor_notify_external(struct acpi_processor *pr, int event, int type)
-{
-	int ret = -EINVAL;
-
-	if (!processor_cntl_external())
-		return -EINVAL;
-
-	switch (event) {
-	case PROCESSOR_PM_INIT:
-	case PROCESSOR_PM_CHANGE:
-		if ((type >= PM_TYPE_MAX) ||
-			!processor_extcntl_ops->pm_ops[type])
-			break;
-
-		ret = processor_extcntl_ops->pm_ops[type](pr, event);
-		break;
-	case PROCESSOR_HOTPLUG:
-		if (processor_extcntl_ops->hotplug)
-			ret = processor_extcntl_ops->hotplug(pr, type);
-		break;
-	default:
-		pr_err("Unsupported processor event %d.\n", event);
-		break;
-	}
-
-	return ret;
-}
-
 /*
  * This is called from ACPI processor init, and targeted to hold
  * some tricky housekeeping jobs to satisfy external control model.
--- head.orig/drivers/hwmon/coretemp-xen.c	2013-05-24 11:49:51.000000000 +0200
+++ head/drivers/hwmon/coretemp-xen.c	2013-08-12 13:00:59.000000000 +0200
@@ -587,7 +587,6 @@ static int coretemp_remove(struct platfo
 
 	device_remove_file(&pdev->dev, &pdata->name_attr);
 	hwmon_device_unregister(pdata->hwmon_dev);
-	platform_set_drvdata(pdev, NULL);
 	kfree(pdata);
 	return 0;
 }
--- head.orig/drivers/xen/Makefile	2013-03-25 09:13:58.000000000 +0100
+++ head/drivers/xen/Makefile	2013-08-12 13:00:59.000000000 +0200
@@ -4,12 +4,11 @@ xen-balloon_$(CONFIG_PARAVIRT_XEN) := xe
 xen-evtchn-name-$(CONFIG_PARAVIRT_XEN) := xen-evtchn
 xen-privcmd_$(CONFIG_PARAVIRT_XEN) := xen-privcmd.o
 
-ifneq ($(CONFIG_ARM),y)
-obj-$(CONFIG_PARAVIRT_XEN)	+= manage.o
+ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
 obj-$(CONFIG_HOTPLUG_CPU)	+= $(xen-hotplug-y)
 endif
 obj-$(CONFIG_X86)		+= fallback.o
-obj-$(CONFIG_PARAVIRT_XEN)	+= grant-table.o features.o events.o balloon.o
+obj-$(CONFIG_PARAVIRT_XEN)	+= grant-table.o features.o events.o balloon.o manage.o
 
 xen-balloon_$(CONFIG_XEN)	:= balloon/
 xen-privcmd_$(CONFIG_XEN)	:= privcmd/
--- head.orig/drivers/xen/balloon/balloon.c	2014-01-22 14:19:22.000000000 +0100
+++ head/drivers/xen/balloon/balloon.c	2014-01-24 16:18:14.000000000 +0100
@@ -75,26 +75,6 @@ struct balloon_stats balloon_stats;
 /* We increase/decrease in batches which fit in a page */
 static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
 
-#ifdef CONFIG_HIGHMEM
-#define inc_totalhigh_pages() (totalhigh_pages++)
-#define dec_totalhigh_pages() (totalhigh_pages--)
-#else
-#define inc_totalhigh_pages() ((void)0)
-#define dec_totalhigh_pages() ((void)0)
-#endif
-
-#ifndef CONFIG_XEN
-/*
- * In HVM guests accounting here uses the Xen visible values, but the kernel
- * determined totalram_pages value shouldn't get altered. Since totalram_pages
- * includes neither the kernel static image nor any memory allocated prior to
- * or from the bootmem allocator, we have to synchronize the two values.
- */
-static unsigned long __read_mostly totalram_bias;
-#else
-#define totalram_bias 0
-#endif
-
 /* List of ballooned pages, threaded through the mem_map array. */
 static LIST_HEAD(ballooned_pages);
 
@@ -128,13 +108,14 @@ static void balloon_append(struct page *
 	if (PageHighMem(page)) {
 		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
 		bs.balloon_high++;
-		if (account)
-			dec_totalhigh_pages();
 	} else {
 		list_add(PAGE_TO_LIST(page), &ballooned_pages);
 		bs.balloon_low++;
 	}
 
+	if (account)
+		adjust_managed_page_count(page, -1);
+
 	pfn = page_to_pfn(page);
 	if (account) {
 		SetPageReserved(page);
@@ -159,12 +140,12 @@ static struct page *balloon_retrieve(int
 	UNLIST_PAGE(page);
 	BUG_ON(!PageReserved(page));
 
-	if (PageHighMem(page)) {
+	if (PageHighMem(page))
 		bs.balloon_high--;
-		inc_totalhigh_pages();
-	}
 	else
 		bs.balloon_low--;
+	adjust_managed_page_count(page, 1);
+
 	zone = page_zone(page);
 	*was_empty |= !populated_zone(zone);
 	zone->present_pages++;
@@ -212,10 +193,23 @@ static unsigned long current_target(void
 	return target;
 }
 
+#ifndef CONFIG_XEN
+static unsigned long balloon_num_physpages(void)
+{
+	unsigned int nid;
+	unsigned long phys_pages = 0;
+
+	for_each_online_node(nid)
+		phys_pages += node_spanned_pages(nid);
+
+	return phys_pages;
+}
+#endif
+
 static unsigned long minimum_target(void)
 {
 #ifndef CONFIG_XEN
-#define max_pfn num_physpages
+#define max_pfn balloon_num_physpages()
 #endif
 	unsigned long min_pages, curr_pages = current_target();
 
@@ -306,7 +300,6 @@ static int increase_reservation(unsigned
 	}
 
 	bs.current_pages += rc;
-	totalram_pages = bs.current_pages - totalram_bias;
 
  out:
 	balloon_unlock(flags);
@@ -386,7 +379,6 @@ static int decrease_reservation(unsigned
 	BUG_ON(ret != nr_pages);
 
 	bs.current_pages -= nr_pages;
-	totalram_pages = bs.current_pages - totalram_bias;
 
 	balloon_unlock(flags);
 
@@ -539,6 +531,7 @@ static int __init balloon_init(void)
 	} xen_pod_target_t;
 # endif
 	xen_pod_target_t pod_target = { .domid = DOMID_SELF };
+	unsigned long num_physpages = balloon_num_physpages();
 	int rc;
 #elif defined(CONFIG_X86)
 	unsigned long pfn;
@@ -567,7 +560,6 @@ static int __init balloon_init(void)
 			   - pod_target.pod_cache_pages;
 	if (rc || bs.current_pages > num_physpages)
 		bs.current_pages = num_physpages;
-	totalram_bias = bs.current_pages - totalram_pages;
 #endif
 	bs.target_pages  = bs.current_pages;
 	bs.balloon_low   = 0;
@@ -705,9 +697,8 @@ struct page **alloc_empty_pages_and_page
 			goto err;
 		}
 
-		totalram_pages = --bs.current_pages - totalram_bias;
-		if (PageHighMem(page))
-			dec_totalhigh_pages();
+		--bs.current_pages;
+		adjust_managed_page_count(page, -1);
 		page_zone(page)->present_pages--;
 
 		balloon_unlock(flags);
@@ -756,7 +747,7 @@ void balloon_release_driver_page(struct 
 
 	balloon_lock(flags);
 	balloon_append(page, 1);
-	totalram_pages = --bs.current_pages - totalram_bias;
+	bs.current_pages--;
 	bs.driver_pages--;
 	balloon_unlock(flags);
 
--- head.orig/drivers/xen/char/mem.c	2013-03-25 09:13:58.000000000 +0100
+++ head/drivers/xen/char/mem.c	2013-08-12 13:00:59.000000000 +0200
@@ -193,7 +193,7 @@ static loff_t memory_lseek(struct file *
 		offset += file->f_pos;
 	case SEEK_SET:
 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
-		if ((unsigned long long)offset >= ~0xFFFULL) {
+		if (IS_ERR_VALUE((unsigned long long)offset)) {
 			ret = -EOVERFLOW;
 			break;
 		}
--- head.orig/drivers/xen/core/smpboot.c	2013-06-04 14:06:43.000000000 +0200
+++ head/drivers/xen/core/smpboot.c	2013-08-12 13:20:04.000000000 +0200
@@ -74,7 +74,7 @@ void __init prefill_possible_map(void)
 			++total_cpus;
 }
 
-static int __cpuinit xen_smp_intr_init(unsigned int cpu)
+static int xen_smp_intr_init(unsigned int cpu)
 {
 	int rc;
 
@@ -167,7 +167,7 @@ static int __cpuinit xen_smp_intr_init(u
 	return rc;
 }
 
-static void __cpuinit xen_smp_intr_exit(unsigned int cpu)
+static void xen_smp_intr_exit(unsigned int cpu)
 {
 	if (cpu != 0)
 		local_teardown_timer(cpu);
@@ -182,7 +182,7 @@ static void __cpuinit xen_smp_intr_exit(
 	xen_spinlock_cleanup(cpu);
 }
 
-static void __cpuinit cpu_bringup(void)
+static void cpu_bringup(void)
 {
 	unsigned int cpu;
 
@@ -196,14 +196,13 @@ static void __cpuinit cpu_bringup(void)
 	local_irq_enable();
 }
 
-static void __cpuinit cpu_bringup_and_idle(void)
+static void cpu_bringup_and_idle(void)
 {
 	cpu_bringup();
 	cpu_startup_entry(CPUHP_ONLINE);
 }
 
-static void __cpuinit cpu_initialize_context(unsigned int cpu,
-					     unsigned long sp0)
+static void cpu_initialize_context(unsigned int cpu, unsigned long sp0)
 {
 	/* vcpu_guest_context_t is too large to allocate on the stack.
 	 * Hence we allocate statically and protect it with a lock */
@@ -343,7 +342,7 @@ static int __init initialize_cpu_present
 }
 core_initcall(initialize_cpu_present_map);
 
-int __cpuinit __cpu_disable(void)
+int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -356,7 +355,7 @@ int __cpuinit __cpu_disable(void)
 	return 0;
 }
 
-void __cpuinit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
 	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
 		current->state = TASK_UNINTERRUPTIBLE;
@@ -368,7 +367,7 @@ void __cpuinit __cpu_die(unsigned int cp
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	int rc;
 
--- head.orig/drivers/xen/core/spinlock.c	2014-01-07 17:18:21.000000000 +0100
+++ head/drivers/xen/core/spinlock.c	2014-01-07 17:18:44.000000000 +0100
@@ -40,7 +40,7 @@ static DEFINE_PER_CPU(struct rm_seq, rm_
 static bool __read_mostly nopoll;
 module_param(nopoll, bool, 0);
 
-int __cpuinit xen_spinlock_init(unsigned int cpu)
+int xen_spinlock_init(unsigned int cpu)
 {
 	struct evtchn_bind_ipi bind_ipi;
 	int rc;
@@ -60,7 +60,7 @@ int __cpuinit xen_spinlock_init(unsigned
 	return rc;
 }
 
-void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
+void xen_spinlock_cleanup(unsigned int cpu)
 {
 	struct evtchn_close close;
 
@@ -72,7 +72,7 @@ void __cpuinit xen_spinlock_cleanup(unsi
 }
 
 #ifdef CONFIG_PM_SLEEP
-void __cpuinit spinlock_resume(void)
+void spinlock_resume(void)
 {
 	unsigned int cpu;
 
--- head.orig/drivers/xen/usbback/xenbus.c	2011-06-30 17:05:05.000000000 +0200
+++ head/drivers/xen/usbback/xenbus.c	2013-08-14 15:01:18.000000000 +0200
@@ -44,6 +44,7 @@
  */
 
 #include "usbback.h"
+#include <linux/usb/ch11.h>
 
 static int start_xenusbd(usbif_t *usbif)
 {
--- head.orig/drivers/xen/xen-pciback/slot.c	2011-09-19 14:36:47.000000000 +0200
+++ head/drivers/xen/xen-pciback/slot.c	2013-08-12 15:04:23.000000000 +0200
@@ -6,6 +6,8 @@
  *   Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/spinlock.h>
 #include "pciback.h"
 
@@ -63,8 +65,7 @@ static int _xen_pcibk_add_pci_dev(struct
 	for (bus = 0; bus < PCI_BUS_NBR; bus++)
 		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
 			if (slot_dev->slots[bus][slot] == NULL) {
-				pr_info("pciback: slot: %s: assign to"
-					" virtual slot %d, bus %d\n",
+				pr_info("slot: %s: assign to virtual slot %d, bus %d\n",
 					pci_name(dev), slot, bus);
 				slot_dev->slots[bus][slot] = dev;
 				goto unlock;
--- head.orig/drivers/xen/xenbus/xenbus_comms.c	2012-10-29 17:13:41.000000000 +0100
+++ head/drivers/xen/xenbus/xenbus_comms.c	2013-08-12 13:51:56.000000000 +0200
@@ -30,6 +30,8 @@
  * IN THE SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/wait.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
@@ -238,13 +240,12 @@ int xb_init_comms(void)
 	int err;
 
 	if (intf->req_prod != intf->req_cons)
-		pr_err("XENBUS request ring is not quiescent "
-		       "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
+		pr_err("request ring is not quiescent (%08x:%08x)!\n",
+		       intf->req_cons, intf->req_prod);
 
 	if (intf->rsp_prod != intf->rsp_cons) {
-		pr_warning("XENBUS response ring is not quiescent"
-			   " (%08x:%08x): fixing up\n",
-			   intf->rsp_cons, intf->rsp_prod);
+		pr_warn("response ring is not quiescent (%08x:%08x): fixing up\n",
+			intf->rsp_cons, intf->rsp_prod);
 		/* breaks kdump */
 		if (!reset_devices)
 			intf->rsp_cons = intf->rsp_prod;
@@ -258,7 +259,7 @@ int xb_init_comms(void)
 		xen_store_evtchn, wake_waiting,
 		0, "xenbus", &xb_waitq);
 	if (err <= 0) {
-		pr_err("XENBUS request irq failed %i\n", err);
+		pr_err("request irq failed %i\n", err);
 		return err;
 	}
 
@@ -271,7 +272,7 @@ int xb_init_comms(void)
 		err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
 						0, "xenbus", &xb_waitq);
 		if (err < 0) {
-			pr_err("XENBUS request irq failed %i\n", err);
+			pr_err("request irq failed %i\n", err);
 			return err;
 		}
 		xenbus_irq = err;
--- head.orig/drivers/xen/xenbus/xenbus_probe.c	2013-06-04 14:12:33.000000000 +0200
+++ head/drivers/xen/xenbus/xenbus_probe.c	2013-08-12 15:14:21.000000000 +0200
@@ -31,6 +31,8 @@
  * IN THE SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define DPRINTK(fmt, args...)				\
 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
 		 __FUNCTION__, __LINE__, ##args)
@@ -549,7 +551,7 @@ int xenbus_probe_node(struct xen_bus_typ
 
 		err = bus->get_bus_id(devname, xendev->nodename);
 		if (!err)
-			dev_set_name(&xendev->dev, devname);
+			dev_set_name(&xendev->dev, "%s", devname);
 	}
 #else
 	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
@@ -576,13 +578,13 @@ static int frontend_bus_id(char bus_id[X
 {
 	nodename = strchr(nodename, '/');
 	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
-		pr_warning("XENBUS: bad frontend %s\n", nodename);
+		pr_warn("bad frontend %s\n", nodename);
 		return -EINVAL;
 	}
 
 	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
 	if (!strchr(bus_id, '/')) {
-		pr_warning("XENBUS: bus_id %s no slash\n", bus_id);
+		pr_warn("bus_id %s no slash\n", bus_id);
 		return -EINVAL;
 	}
 	*strchr(bus_id, '/') = '-';
@@ -814,8 +816,7 @@ int xenbus_dev_suspend(struct device *de
 	if (drv->suspend)
 		err = drv->suspend(xdev);
 	if (err)
-		pr_warning("xenbus: suspend %s failed: %i\n",
-			   dev_name(dev), err);
+		pr_warn("suspend %s failed: %i\n", dev_name(dev), err);
 	return 0;
 }
 PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend);
@@ -836,8 +837,8 @@ static int __maybe_unused suspend_cancel
 	if (drv->suspend_cancel)
 		err = drv->suspend_cancel(xdev);
 	if (err)
-		pr_warning("xenbus: suspend_cancel %s failed: %i\n",
-			   dev_name(dev), err);
+		pr_warn("suspend_cancel %s failed: %i\n",
+			dev_name(dev), err);
 	return 0;
 }
 
@@ -858,8 +859,8 @@ int xenbus_dev_resume(struct device *dev
 	drv = to_xenbus_driver(dev->driver);
 	err = talk_to_otherend(xdev);
 	if (err) {
-		pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n",
-			   dev_name(dev), err);
+		pr_warn("resume (talk_to_otherend) %s failed: %i\n",
+			dev_name(dev), err);
 		return err;
 	}
 
@@ -868,16 +869,15 @@ int xenbus_dev_resume(struct device *dev
 	if (drv->resume) {
 		err = drv->resume(xdev);
 		if (err) {
-			pr_warning("xenbus: resume %s failed: %i\n",
-				   dev_name(dev), err);
+			pr_warn("resume %s failed: %i\n", dev_name(dev), err);
 			return err;
 		}
 	}
 
 	err = watch_otherend(xdev);
 	if (err) {
-		pr_warning("xenbus_probe: resume (watch_otherend) %s failed:"
-			   " %d\n", dev_name(dev), err);
+		pr_warn("resume (watch_otherend) %s failed: %d.\n",
+			dev_name(dev), err);
 		return err;
 	}
 
@@ -971,7 +971,7 @@ static void xenbus_reset_wait_for_backen
 	timeout = wait_event_interruptible_timeout(backend_state_wq,
 			backend_state == expected, 5 * HZ);
 	if (timeout <= 0)
-		pr_info("XENBUS: backend %s timed out.\n", be);
+		pr_info("backend %s timed out.\n", be);
 }
 
 /*
@@ -994,7 +994,7 @@ static void xenbus_reset_frontend(char *
 	be_watch.callback = xenbus_reset_backend_state_changed;
 	backend_state = XenbusStateUnknown;
 
-	pr_info("XENBUS: triggering reconnect on %s\n", be);
+	pr_info("triggering reconnect on %s\n", be);
 	register_xenbus_watch(&be_watch);
 
 	/* fall through to forward backend to state XenbusStateInitialising */
@@ -1013,7 +1013,7 @@ static void xenbus_reset_frontend(char *
 	}
 
 	unregister_xenbus_watch(&be_watch);
-	pr_info("XENBUS: reconnect done on %s\n", be);
+	pr_info("reconnect done on %s\n", be);
 	kfree(be_watch.node);
 }
 
@@ -1227,8 +1227,7 @@ int xenbus_conn(domid_t remote_dom, gran
 fail1:
 	rc2 = close_evtchn(xen_store_evtchn);
 	if (rc2 != 0)
-		pr_warning("XENBUS: Error freeing xenstore event channel:"
-			   " %d\n", rc2);
+		pr_warn("Error freeing xenstore event channel: %d\n", rc2);
 fail0:
 	xen_store_evtchn = -1;
 	return rc;
@@ -1297,8 +1296,8 @@ xenbus_init(void)
 	/* Register ourselves with the kernel bus subsystem */
 	xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
 	if (xenbus_frontend.error)
-		pr_warning("XENBUS: Error registering frontend bus: %i\n",
-			   xenbus_frontend.error);
+		pr_warn("Error registering frontend bus: %i\n",
+			xenbus_frontend.error);
 	xenbus_backend_bus_register();
 
 	/*
@@ -1393,8 +1392,7 @@ xenbus_init(void)
 	/* Initialize the interface to xenstore. */
 	err = xs_init();
 	if (err) {
-		pr_warning("XENBUS: Error initializing xenstore comms: %i\n",
-			   err);
+		pr_warn("Error initializing xenstore comms: %i\n", err);
 		goto out_error;
 	}
 
@@ -1404,8 +1402,8 @@ xenbus_init(void)
 		xenbus_frontend.error = device_register(&xenbus_frontend.dev);
 		if (xenbus_frontend.error) {
 			bus_unregister(&xenbus_frontend.bus);
-			pr_warning("XENBUS: Error registering frontend device:"
-				   " %d\n", xenbus_frontend.error);
+			pr_warn("Error registering frontend device: %d\n",
+				xenbus_frontend.error);
 		}
 	}
 	xenbus_backend_device_register();
@@ -1487,8 +1485,7 @@ static int print_device_status(struct de
 
 	if (!dev->driver) {
 		/* Information only: is this too noisy? */
-		pr_info("XENBUS: Device with no driver: %s\n",
-			xendev->nodename);
+		pr_info("Device with no driver: %s\n", xendev->nodename);
 		return 0;
 	}
 
@@ -1496,15 +1493,14 @@ static int print_device_status(struct de
 		enum xenbus_state rstate = XenbusStateUnknown;
 		if (xendev->otherend)
 			rstate = xenbus_read_driver_state(xendev->otherend);
-		pr_warning("XENBUS: Timeout connecting to device: %s"
-			   " (local state %d, remote state %d)\n",
-			   xendev->nodename, xendev->state, rstate);
+		pr_warn("Timeout connecting to device: %s"
+			" (local state %d, remote state %d)\n",
+			xendev->nodename, xendev->state, rstate);
 	}
 
 	xendrv = to_xenbus_driver(dev->driver);
 	if (xendrv->is_ready && !xendrv->is_ready(xendev))
-		pr_warning("XENBUS: Device not ready: %s\n",
-			   xendev->nodename);
+		pr_warn("Device not ready: %s\n", xendev->nodename);
 
 	return 0;
 }
@@ -1538,8 +1534,7 @@ static void wait_for_devices(struct xenb
 	while (exists_connecting_device(drv)) {
 		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
 			if (!seconds_waited)
-				pr_warning("XENBUS: Waiting for "
-					   "devices to initialise: ");
+				pr_warn("Waiting for devices to initialise: ");
 			seconds_waited += 5;
 			pr_cont("%us...", 300 - seconds_waited);
 			if (seconds_waited == 300)
--- head.orig/drivers/xen/xenbus/xenbus_probe_backend.c	2011-11-17 15:56:06.000000000 +0100
+++ head/drivers/xen/xenbus/xenbus_probe_backend.c	2013-08-12 13:55:51.000000000 +0200
@@ -31,9 +31,11 @@
  * IN THE SOFTWARE.
  */
 
-#define DPRINTK(fmt, args...)				\
-	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
-		 __func__, __LINE__, ##args)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DPRINTK(fmt, ...)				\
+	pr_debug("(%s:%d) " fmt "\n",			\
+		 __func__, __LINE__, ##__VA_ARGS__)
 
 #include <linux/kernel.h>
 #include <linux/version.h>
@@ -326,8 +328,8 @@ void xenbus_backend_bus_register(void)
 {
 	xenbus_backend.error = bus_register(&xenbus_backend.bus);
 	if (xenbus_backend.error)
-		pr_warning("XENBUS: Error registering backend bus: %i\n",
-			   xenbus_backend.error);
+		pr_warn("Error registering backend bus: %i\n",
+			xenbus_backend.error);
 }
 
 void xenbus_backend_device_register(void)
@@ -338,8 +340,8 @@ void xenbus_backend_device_register(void
 	xenbus_backend.error = device_register(&xenbus_backend.dev);
 	if (xenbus_backend.error) {
 		bus_unregister(&xenbus_backend.bus);
-		pr_warning("XENBUS: Error registering backend device: %i\n",
-			   xenbus_backend.error);
+		pr_warn("Error registering backend device: %i\n",
+			xenbus_backend.error);
 	}
 }
 
--- head.orig/drivers/xen/xenbus/xenbus_xs.c	2013-01-30 12:03:06.000000000 +0100
+++ head/drivers/xen/xenbus/xenbus_xs.c	2013-08-12 13:00:59.000000000 +0200
@@ -31,6 +31,8 @@
  * IN THE SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/unistd.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -139,8 +141,8 @@ static int get_error(const char *errorst
 
 	for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
 		if (i == ARRAY_SIZE(xsd_errors) - 1) {
-			pr_warning("XENBUS xen store gave: unknown error %s",
-				   errorstring);
+			pr_warn("xen store gave: unknown error %s\n",
+				errorstring);
 			return EINVAL;
 		}
 	}
@@ -284,7 +286,7 @@ static void *xs_talkv(struct xenbus_tran
 	}
 
 	if (msg.type != type) {
-		pr_warn_ratelimited("XENBUS unexpected type [%d], expected [%d]\n",
+		pr_warn_ratelimited("unexpected type [%d], expected [%d]\n",
 				    msg.type, type);
 		kfree(ret);
 		return ERR_PTR(-EINVAL);
@@ -672,7 +674,7 @@ static void xs_reset_watches(void)
 
 	err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
 	if (err && err != -EEXIST)
-		pr_warning("xs_reset_watches failed: %d\n", err);
+		pr_warn("xs_reset_watches failed: %d\n", err);
 #endif
 }
 
@@ -727,8 +729,7 @@ void unregister_xenbus_watch(struct xenb
 
 	err = xs_unwatch(watch->node, token);
 	if (err)
-		pr_warning("XENBUS Failed to release watch %s: %i\n",
-			   watch->node, err);
+		pr_warn("Failed to release watch %s: %i\n", watch->node, err);
 
 	up_read(&xs_state.watch_mutex);
 
@@ -970,8 +971,7 @@ static int xenbus_thread(void *unused)
 	for (;;) {
 		err = process_msg();
 		if (err)
-			pr_warning("XENBUS error %d while reading "
-				   "message\n", err);
+			pr_warn("error %d while reading message\n", err);
 		if (kthread_should_stop())
 			break;
 	}
--- head.orig/include/xen/blkif.h	2014-02-06 14:39:10.000000000 +0100
+++ head/include/xen/blkif.h	2014-01-30 16:54:37.000000000 +0100
@@ -25,6 +25,12 @@
 #include <xen/interface/io/blkif.h>
 #include <xen/interface/io/protocols.h>
 
+#define BLKIF_SEGS_PER_INDIRECT_FRAME \
+	(PAGE_SIZE / sizeof(struct blkif_request_segment))
+#define BLKIF_INDIRECT_PAGES(segs) \
+	(((segs) + BLKIF_SEGS_PER_INDIRECT_FRAME - 1) \
+	 / BLKIF_SEGS_PER_INDIRECT_FRAME)
+
 /* Not a real protocol.  Used to generate ring structs which contain
  * the elements common to all protocols only.  This way we get a
  * compiler-checkable way to use common struct elements, so we can
@@ -39,6 +45,7 @@ struct blkif_common_response {
 union __attribute__((transparent_union)) blkif_union {
 	struct blkif_request *generic;
 	struct blkif_request_discard *discard;
+	struct blkif_request_indirect *indirect;
 };
 
 /* i386 protocol version */
@@ -59,9 +66,20 @@ struct blkif_x86_32_discard {
 	blkif_sector_t sector_number;/* start sector idx on disk             */
 	uint64_t       nr_sectors;   /* number of contiguous sectors         */
 };
+struct blkif_x86_32_indirect {
+	uint8_t        operation;    /* BLKIF_OP_INDIRECT                    */
+	uint8_t        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
+	uint16_t       nr_segments;  /* number of segments                   */
+	uint64_t       id;           /* private guest value, echoed in resp  */
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	blkif_vdev_t   handle;       /* same as for read/write requests      */
+	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+	uint64_t       pad;          /* make it 64 byte aligned */
+};
 union blkif_x86_32_union {
 	struct blkif_x86_32_request generic;
 	struct blkif_x86_32_discard discard;
+	struct blkif_x86_32_indirect indirect;
 };
 struct blkif_x86_32_response {
 	uint64_t        id;              /* copied from request */
@@ -87,9 +105,19 @@ struct blkif_x86_64_discard {
 	blkif_sector_t sector_number;/* start sector idx on disk             */
 	uint64_t       nr_sectors;   /* number of contiguous sectors         */
 };
+struct blkif_x86_64_indirect {
+	uint8_t        operation;    /* BLKIF_OP_INDIRECT                    */
+	uint8_t        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
+	uint16_t       nr_segments;  /* number of segments                   */
+	uint64_t       __attribute__((__aligned__(8))) id;
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	blkif_vdev_t   handle;       /* same as for read/write requests      */
+	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+};
 union blkif_x86_64_union {
 	struct blkif_x86_64_request generic;
 	struct blkif_x86_64_discard discard;
+	struct blkif_x86_64_indirect indirect;
 };
 struct blkif_x86_64_response {
 	uint64_t       __attribute__((__aligned__(8))) id;
@@ -141,6 +169,14 @@ static void inline blkif_get_x86_32_req(
 		/* All fields up to sector_number got copied above already. */
 		dst.discard->nr_sectors = src->discard.nr_sectors;
 		break;
+	case BLKIF_OP_INDIRECT:
+		/* All fields up to sector_number got copied above already. */
+		dst.indirect->handle = src->indirect.handle;
+		n = min_t(unsigned int, BLKIF_INDIRECT_PAGES(dst.indirect->nr_segments),
+			  BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
+		for (i = 0; i < n; ++i)
+			dst.indirect->indirect_grefs[i] = src->indirect.indirect_grefs[i];
+		break;
 	}
 #endif
 }
@@ -170,6 +206,14 @@ static void inline blkif_get_x86_64_req(
 		/* All fields up to sector_number got copied above already. */
 		dst.discard->nr_sectors = src->discard.nr_sectors;
 		break;
+	case BLKIF_OP_INDIRECT:
+		/* All fields up to sector_number got copied above already. */
+		dst.indirect->handle = src->indirect.handle;
+		n = min_t(unsigned int, BLKIF_INDIRECT_PAGES(dst.indirect->nr_segments),
+			  BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
+		for (i = 0; i < n; ++i)
+			dst.indirect->indirect_grefs[i] = src->indirect.indirect_grefs[i];
+		break;
 	}
 #endif
 }
--- head.orig/include/xen/interface/io/blkif.h	2014-01-30 10:34:23.000000000 +0100
+++ head/include/xen/interface/io/blkif.h	2014-01-30 10:34:40.000000000 +0100
@@ -562,6 +562,23 @@ struct blkif_request {
 #endif
 		uint64_t     id;         /* private guest value, echoed in resp  */
 	} other;
+	struct __attribute__((__packed__)) blkif_request_indirect {
+		uint8_t        indirect_op;
+		uint16_t       nr_segments;
+#ifdef CONFIG_X86_64
+		uint32_t       _pad1;    /* offsetof(blkif_...,u.indirect.id) == 8 */
+#endif
+		uint64_t       id;
+		blkif_sector_t sector_number;
+		blkif_vdev_t   handle;
+		uint16_t       _pad2;
+		grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+#ifdef CONFIG_X86_64
+		uint32_t      _pad3;     /* make it 64 byte aligned */
+#else
+		uint64_t      _pad3;     /* make it 64 byte aligned */
+#endif
+	} indirect;
     } u;
 } __attribute__((__packed__));
 #endif
--- head.orig/mm/page_alloc.c	2014-01-22 14:18:29.000000000 +0100
+++ head/mm/page_alloc.c	2014-01-22 14:19:44.000000000 +0100
@@ -5656,8 +5656,8 @@ static void __setup_per_zone_wmarks(void
 			high = percpu_pagelist_fraction
 			       ? zone->present_pages / percpu_pagelist_fraction
 			       : 5 * zone_batchsize(zone);
-			setup_pagelist_highmark(
-				per_cpu_ptr(zone->pageset, cpu), high);
+			pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
+					 high);
 		}
 	}
 #endif