Blob Blame History Raw
From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Subject: Linux: 3.4
Patch-mainline: 3.4

 This patch contains the differences between 3.3 and 3.4.

Automatically created from "patch-3.4" by xen-port-patches.py
Acked-by: jbeulich@suse.com

--- head.orig/arch/x86/include/asm/acpi.h	2013-03-21 14:20:18.000000000 +0100
+++ head/arch/x86/include/asm/acpi.h	2013-08-09 15:36:59.000000000 +0200
@@ -31,10 +31,6 @@
 #include <asm/mpspec.h>
 #include <asm/realmode.h>
 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#include <xen/interface/platform.h>
-#endif
-
 #define COMPILER_DEPENDENT_INT64   long long
 #define COMPILER_DEPENDENT_UINT64  unsigned long long
 
@@ -120,27 +116,6 @@ extern int (*acpi_suspend_lowlevel)(void
 /* Physical address to resume after wakeup */
 #define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-static inline int acpi_notify_hypervisor_state(u8 sleep_state,
-					       u32 pm1a_cnt_val,
-					       u32 pm1b_cnt_val)
-{
-	struct xen_platform_op op = {
-		.cmd = XENPF_enter_acpi_sleep,
-		.interface_version = XENPF_INTERFACE_VERSION,
-		.u = {
-			.enter_acpi_sleep = {
-				.pm1a_cnt_val = pm1a_cnt_val,
-				.pm1b_cnt_val = pm1b_cnt_val,
-				.sleep_state = sleep_state,
-			},
-		},
-	};
-
-	return HYPERVISOR_platform_op(&op);
-}
-#endif
-
 /*
  * Check if the CPU can handle C2 and deeper
  */
--- head.orig/arch/x86/include/asm/compat.h	2013-08-15 11:59:07.000000000 +0200
+++ head/arch/x86/include/asm/compat.h	2012-10-23 16:00:15.000000000 +0200
@@ -301,7 +301,11 @@ static inline void __user *arch_compat_a
 		sp = task_pt_regs(current)->sp;
 	} else {
 		/* -128 for the x32 ABI redzone */
+#ifndef CONFIG_XEN
 		sp = this_cpu_read(old_rsp) - 128;
+#else
+		sp = task_pt_regs(current)->sp - 128;
+#endif
 	}
 
 	return (void __user *)round_down(sp - len, 16);
--- head.orig/arch/x86/include/asm/debugreg.h	2013-01-08 12:02:11.000000000 +0100
+++ head/arch/x86/include/asm/debugreg.h	2013-01-08 12:07:43.000000000 +0100
@@ -19,6 +19,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7);
 
 static inline unsigned long native_get_debugreg(int regno)
 {
+#ifndef CONFIG_XEN
 	unsigned long val = 0;	/* Damn you, gcc! */
 
 	switch (regno) {
@@ -44,10 +45,14 @@ static inline unsigned long native_get_d
 		BUG();
 	}
 	return val;
+#else
+	return HYPERVISOR_get_debugreg(regno);
+#endif
 }
 
 static inline void native_set_debugreg(int regno, unsigned long value)
 {
+#ifndef CONFIG_XEN
 	switch (regno) {
 	case 0:
 		asm("mov %0, %%db0"	::"r" (value));
@@ -70,6 +75,9 @@ static inline void native_set_debugreg(i
 	default:
 		BUG();
 	}
+#else
+	WARN_ON(HYPERVISOR_set_debugreg(regno, value));
+#endif
 }
 
 static inline void hw_breakpoint_disable(void)
--- head.orig/arch/x86/include/mach-xen/asm/agp.h	2011-02-01 14:54:13.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/agp.h	2012-04-12 09:53:01.000000000 +0200
@@ -3,7 +3,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
-#include <asm/system.h>
+#include <asm/special_insns.h>
 
 /*
  * Functions to keep the agpgart mappings coherent with the MMU. The
--- head.orig/arch/x86/include/mach-xen/asm/dma-mapping.h	2011-02-01 14:54:13.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/dma-mapping.h	2012-04-11 14:25:16.000000000 +0200
@@ -18,7 +18,8 @@ static inline phys_addr_t dma_to_phys(st
 	return machine_to_phys(daddr);
 }
 
-void dma_generic_free_coherent(struct device *, size_t, void *, dma_addr_t);
+void dma_generic_free_coherent(struct device *, size_t, void *, dma_addr_t,
+			       struct dma_attrs *);
 
 extern int range_straddles_page_boundary(paddr_t p, size_t size);
 
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ head/arch/x86/include/mach-xen/asm/fpu-internal.h	2012-10-29 15:23:05.000000000 +0100
@@ -0,0 +1,55 @@
+#ifndef _FPU_INTERNAL_H
+#include <asm/i387.h>
+#define switch_fpu_prepare native_switch_fpu_prepare
+#include_next <asm/fpu-internal.h>
+#undef switch_fpu_prepare
+
+static inline void xen_thread_fpu_begin(struct task_struct *tsk,
+					multicall_entry_t *mcl)
+{
+	if (mcl) {
+		mcl->op = __HYPERVISOR_fpu_taskswitch;
+		mcl->args[0] = 0;
+	}
+	__thread_set_has_fpu(tsk);
+}
+
+static inline fpu_switch_t xen_switch_fpu_prepare(struct task_struct *old,
+						  struct task_struct *new,
+						  int cpu,
+						  multicall_entry_t **mcl)
+{
+	fpu_switch_t fpu;
+
+	fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
+	if (__thread_has_fpu(old)) {
+		if (!__save_init_fpu(old))
+			cpu = ~0;
+		old->thread.fpu.last_cpu = cpu;
+		old->thread.fpu.has_fpu = 0;	/* But leave fpu_owner_task! */
+
+		/* Don't change CR0.TS if we just switch! */
+		if (fpu.preload) {
+			new->fpu_counter++;
+			__thread_set_has_fpu(new);
+			prefetch(new->thread.fpu.state);
+		} else {
+			(*mcl)->op = __HYPERVISOR_fpu_taskswitch;
+			(*mcl)++->args[0] = 1;
+		}
+	} else {
+		old->fpu_counter = 0;
+		old->thread.fpu.last_cpu = ~0;
+		if (fpu.preload) {
+			new->fpu_counter++;
+			if (fpu_lazy_restore(new, cpu))
+				fpu.preload = 0;
+			else
+				prefetch(new->thread.fpu.state);
+			xen_thread_fpu_begin(new, (*mcl)++);
+		}
+	}
+	return fpu;
+}
+
+#endif
--- head.orig/arch/x86/include/mach-xen/asm/highmem.h	2011-02-01 15:09:47.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/highmem.h	2012-04-11 13:26:23.000000000 +0200
@@ -60,7 +60,7 @@ void *kmap(struct page *page);
 void kunmap(struct page *page);
 
 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
 void __kunmap_atomic(void *kvaddr);
 void *kmap_atomic_pfn(unsigned long pfn);
 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
--- head.orig/arch/x86/include/mach-xen/asm/hypervisor.h	2011-08-23 13:35:01.000000000 +0200
+++ head/arch/x86/include/mach-xen/asm/hypervisor.h	2012-05-11 16:45:57.000000000 +0200
@@ -45,11 +45,14 @@
 
 extern shared_info_t *HYPERVISOR_shared_info;
 
-#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
-#ifdef CONFIG_SMP
-#define current_vcpu_info() vcpu_info(smp_processor_id())
-#else
-#define current_vcpu_info() vcpu_info(0)
+#ifdef CONFIG_XEN
+# define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
+# ifdef CONFIG_SMP
+#  include <asm/smp-processor-id.h>
+#  define current_vcpu_info() vcpu_info(smp_processor_id())
+# else
+#  define current_vcpu_info() vcpu_info(0)
+# endif
 #endif
 
 #ifdef CONFIG_X86_32
@@ -132,9 +135,9 @@ bool __cold hypervisor_oom(void);
 u64 jiffies_to_st(unsigned long jiffies);
 
 #ifdef CONFIG_XEN_SCRUB_PAGES
-void scrub_pages(void *, unsigned int);
+void xen_scrub_pages(void *, unsigned int);
 #else
-#define scrub_pages(_p,_n) ((void)0)
+#define xen_scrub_pages(_p,_n) ((void)0)
 #endif
 
 #if defined(CONFIG_XEN) && !defined(MODULE)
--- head.orig/arch/x86/include/mach-xen/asm/processor.h	2012-02-29 10:59:05.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/processor.h	2012-05-23 13:41:10.000000000 +0200
@@ -14,13 +14,13 @@ struct mm_struct;
 #include <asm/sigcontext.h>
 #include <asm/current.h>
 #include <asm/cpufeature.h>
-#include <asm/system.h>
 #include <asm/page.h>
 #include <asm/pgtable_types.h>
 #include <asm/percpu.h>
 #include <asm/msr.h>
 #include <asm/desc_defs.h>
 #include <asm/nops.h>
+#include <asm/special_insns.h>
 
 #include <linux/personality.h>
 #include <linux/cpumask.h>
@@ -29,9 +29,18 @@ struct mm_struct;
 #include <linux/math64.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include <linux/irqflags.h>
 
 #include <xen/interface/physdev.h>
 
+/*
+ * We handle most unaligned accesses in hardware.  On the other hand
+ * unaligned DMA can be quite expensive on some Nehalem processors.
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN	0
+
 #define HBP_NUM 4
 /*
  * Default implementation of macro that returns current
@@ -175,6 +184,7 @@ extern void early_cpu_init(void);
 extern void identify_boot_cpu(void);
 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
 extern void print_cpu_info(struct cpuinfo_x86 *);
+void print_cpu_msr(struct cpuinfo_x86 *);
 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
 extern unsigned short num_cache_leaves;
@@ -472,7 +482,7 @@ struct thread_struct {
 	unsigned long           ptrace_dr7;
 	/* Fault info: */
 	unsigned long		cr2;
-	unsigned long		trap_no;
+	unsigned long		trap_nr;
 	unsigned long		error_code;
 	/* floating point and extended processor state */
 	struct fpu		fpu;
@@ -490,16 +500,6 @@ struct thread_struct {
 	unsigned		io_bitmap_max;
 };
 
-static inline unsigned long xen_get_debugreg(int regno)
-{
-	return HYPERVISOR_get_debugreg(regno);
-}
-
-static inline void xen_set_debugreg(int regno, unsigned long value)
-{
-	WARN_ON(HYPERVISOR_set_debugreg(regno, value));
-}
-
 /*
  * Set IOPL bits in EFLAGS from given mask
  */
@@ -535,14 +535,6 @@ native_load_sp0(struct tss_struct *tss, 
 #define __cpuid			xen_cpuid
 #define paravirt_enabled()	1
 
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register)				\
-	(var) = xen_get_debugreg(register)
-#define set_debugreg(value, register)				\
-	xen_set_debugreg(register, value)
-
 #define load_sp0 xen_load_sp0
 
 #define set_iopl_mask xen_set_iopl_mask
@@ -880,9 +872,9 @@ extern unsigned long thread_saved_pc(str
 #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
 					0xc0000000 : 0xFFFFe000)
 
-#define TASK_SIZE		(test_thread_flag(TIF_IA32) ? \
+#define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_IA32)) ? \
+#define TASK_SIZE_OF(child)	((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
 
 #define STACK_TOP		TASK_SIZE
@@ -903,6 +895,12 @@ extern unsigned long thread_saved_pc(str
 #define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
 
 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
+
+/*
+ * User space RSP while inside the SYSCALL fast path
+ */
+DECLARE_PER_CPU(unsigned long, old_rsp);
+
 #endif /* CONFIG_X86_64 */
 
 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
@@ -977,4 +975,14 @@ extern bool cpu_has_amd_erratum(const in
 #define cpu_has_amd_erratum(x)	(false)
 #endif /* CONFIG_CPU_SUP_AMD */
 
+void cpu_idle_wait(void);
+
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+void xen_idle(void);
+bool set_pm_idle_to_default(void);
+
+void stop_this_cpu(void *dummy);
+
 #endif /* _ASM_X86_PROCESSOR_H */
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ head/arch/x86/include/mach-xen/asm/special_insns.h	2012-07-05 12:19:22.000000000 +0200
@@ -0,0 +1,247 @@
+#ifndef _ASM_X86_SPECIAL_INSNS_H
+#define _ASM_X86_SPECIAL_INSNS_H
+
+
+#ifdef __KERNEL__
+
+#include <asm/barrier.h>
+#include <asm/hypervisor.h>
+#include <asm/maddr.h>
+
+DECLARE_PER_CPU(unsigned long, xen_x86_cr0);
+DECLARE_PER_CPU(unsigned long, xen_x86_cr0_upd);
+
+static inline unsigned long xen_read_cr0_upd(void)
+{
+	unsigned long upd = percpu_read(xen_x86_cr0_upd);
+	rmb();
+	return upd;
+}
+
+static inline void xen_clear_cr0_upd(void)
+{
+	wmb();
+	percpu_write(xen_x86_cr0_upd, 0);
+}
+
+static inline void xen_clts(void)
+{
+	if (unlikely(xen_read_cr0_upd()))
+		HYPERVISOR_fpu_taskswitch(0);
+	else if (percpu_read(xen_x86_cr0) & X86_CR0_TS) {
+		percpu_write(xen_x86_cr0_upd, X86_CR0_TS);
+		HYPERVISOR_fpu_taskswitch(0);
+		percpu_and(xen_x86_cr0, ~X86_CR0_TS);
+		xen_clear_cr0_upd();
+	}
+}
+
+static inline void xen_stts(void)
+{
+	if (unlikely(xen_read_cr0_upd()))
+		HYPERVISOR_fpu_taskswitch(1);
+	else if (!(percpu_read(xen_x86_cr0) & X86_CR0_TS)) {
+		percpu_write(xen_x86_cr0_upd, X86_CR0_TS);
+		HYPERVISOR_fpu_taskswitch(1);
+		percpu_or(xen_x86_cr0, X86_CR0_TS);
+		xen_clear_cr0_upd();
+	}
+}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+#define __force_order machine_to_phys_nr
+
+static inline unsigned long native_read_cr0(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline unsigned long xen_read_cr0(void)
+{
+	return likely(!xen_read_cr0_upd()) ?
+	       percpu_read(xen_x86_cr0) : native_read_cr0();
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+	asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
+}
+
+static inline void xen_write_cr0(unsigned long val)
+{
+	unsigned long upd = val ^ percpu_read(xen_x86_cr0);
+
+	if (unlikely(percpu_cmpxchg(xen_x86_cr0_upd, 0, upd))) {
+		native_write_cr0(val);
+		return;
+	}
+	switch (upd) {
+	case 0:
+		return;
+	case X86_CR0_TS:
+		HYPERVISOR_fpu_taskswitch(!!(val & X86_CR0_TS));
+		break;
+	default:
+		native_write_cr0(val);
+		break;
+	}
+	percpu_write(xen_x86_cr0, val);
+	xen_clear_cr0_upd();
+}
+
+#define xen_read_cr2() (current_vcpu_info()->arch.cr2)
+#define xen_write_cr2(val) ((void)(current_vcpu_info()->arch.cr2 = (val)))
+
+static inline unsigned long xen_read_cr3(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
+#ifdef CONFIG_X86_32
+	return mfn_to_pfn(xen_cr3_to_pfn(val)) << PAGE_SHIFT;
+#else
+	return machine_to_phys(val);
+#endif
+}
+
+static inline void xen_write_cr3(unsigned long val)
+{
+#ifdef CONFIG_X86_32
+	val = xen_pfn_to_cr3(pfn_to_mfn(val >> PAGE_SHIFT));
+#else
+	val = phys_to_machine(val);
+#endif
+	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long xen_read_cr4(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
+	return val;
+}
+
+#define xen_read_cr4_safe() xen_read_cr4()
+
+static inline void xen_write_cr4(unsigned long val)
+{
+	asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long xen_read_cr8(void)
+{
+	return 0;
+}
+
+static inline void xen_write_cr8(unsigned long val)
+{
+	BUG_ON(val);
+}
+#endif
+
+static inline void native_wbinvd(void)
+{
+	asm volatile("wbinvd": : :"memory");
+}
+
+extern void xen_load_gs_index(unsigned);
+
+static inline unsigned long read_cr0(void)
+{
+	return xen_read_cr0();
+}
+
+static inline void write_cr0(unsigned long x)
+{
+	xen_write_cr0(x);
+}
+
+static inline unsigned long read_cr2(void)
+{
+	return xen_read_cr2();
+}
+
+static inline void write_cr2(unsigned long x)
+{
+	xen_write_cr2(x);
+}
+
+static inline unsigned long read_cr3(void)
+{
+	return xen_read_cr3();
+}
+
+static inline void write_cr3(unsigned long x)
+{
+	xen_write_cr3(x);
+}
+
+static inline unsigned long read_cr4(void)
+{
+	return xen_read_cr4();
+}
+
+static inline unsigned long read_cr4_safe(void)
+{
+	return xen_read_cr4_safe();
+}
+
+static inline void write_cr4(unsigned long x)
+{
+	xen_write_cr4(x);
+}
+
+static inline void wbinvd(void)
+{
+	native_wbinvd();
+}
+
+#ifdef CONFIG_X86_64
+
+static inline unsigned long read_cr8(void)
+{
+	return xen_read_cr8();
+}
+
+static inline void write_cr8(unsigned long x)
+{
+	xen_write_cr8(x);
+}
+
+static inline void load_gs_index(unsigned selector)
+{
+	xen_load_gs_index(selector);
+}
+
+#endif
+
+/* Clear the 'TS' bit */
+static inline void clts(void)
+{
+	xen_clts();
+}
+
+static inline void stts(void)
+{
+	xen_stts();
+}
+
+static inline void clflush(volatile void *__p)
+{
+	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
+}
+
+#define nop() asm volatile ("nop")
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_SPECIAL_INSNS_H */
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ head/arch/x86/include/mach-xen/asm/switch_to.h	2012-04-11 13:47:55.000000000 +0200
@@ -0,0 +1,9 @@
+#ifndef _ASM_X86_SWITCH_TO_H
+
+#define __switch_to_xtra(prev, next, tss) __switch_to_xtra(prev, next)
+
+#include_next <asm/switch_to.h>
+
+#undef __switch_to_xtra
+
+#endif /* _ASM_X86_SWITCH_TO_H */
--- head.orig/arch/x86/include/mach-xen/asm/tlbflush.h	2011-02-01 15:09:47.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/tlbflush.h	2012-04-11 13:26:23.000000000 +0200
@@ -5,7 +5,7 @@
 #include <linux/sched.h>
 
 #include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/special_insns.h>
 
 #define __flush_tlb() xen_tlb_flush()
 #define __flush_tlb_global() xen_tlb_flush()
--- head.orig/arch/x86/kernel/acpi/processor_extcntl_xen.c	2011-02-01 15:03:10.000000000 +0100
+++ head/arch/x86/kernel/acpi/processor_extcntl_xen.c	2013-08-14 15:14:50.000000000 +0200
@@ -190,10 +190,33 @@ static struct processor_extcntl_ops xen_
 	.hotplug		= xen_hotplug_notifier,
 };
 
+static int xen_sleep(u8 sleep_state, u32 val_a, u32 val_b, bool extended)
+{
+	struct xen_platform_op op = {
+		.cmd = XENPF_enter_acpi_sleep,
+		.interface_version = XENPF_INTERFACE_VERSION,
+		.u.enter_acpi_sleep = {
+			.pm1a_cnt_val = val_a,
+			.pm1b_cnt_val = val_b,
+			.sleep_state = sleep_state,
+			.flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
+		},
+	};
+	int err = HYPERVISOR_platform_op(&op);
+
+	if (!err)
+		return 1;
+
+	pr_err("ACPI: Hypervisor failure [%d]\n", err);
+	return -1;
+}
+
 static int __init init_extcntl(void)
 {
 	unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
 
+	acpi_os_set_prepare_sleep(xen_sleep);
+
 	if (!pmbits)
 		return 0;
 	if (pmbits & XEN_PROCESSOR_PM_CX)
--- head.orig/arch/x86/kernel/apic/io_apic-xen.c	2012-02-09 12:32:50.000000000 +0100
+++ head/arch/x86/kernel/apic/io_apic-xen.c	2012-04-11 14:22:38.000000000 +0200
@@ -72,9 +72,30 @@ unsigned long io_apic_irqs;
 #endif /* CONFIG_XEN */
 
 #define __apicdebuginit(type) static type __init
+
 #define for_each_irq_pin(entry, head) \
 	for (entry = head; entry; entry = entry->next)
 
+#ifndef CONFIG_XEN
+static void		__init __ioapic_init_mappings(void);
+
+static unsigned int	__io_apic_read  (unsigned int apic, unsigned int reg);
+static void		__io_apic_write (unsigned int apic, unsigned int reg, unsigned int val);
+static void		__io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+
+static struct io_apic_ops io_apic_ops = {
+	.init	= __ioapic_init_mappings,
+	.read	= __io_apic_read,
+	.write	= __io_apic_write,
+	.modify = __io_apic_modify,
+};
+
+void __init set_io_apic_ops(const struct io_apic_ops *ops)
+{
+	io_apic_ops = *ops;
+}
+#endif
+
 /*
  *      Is the SiS APIC rmw bug present ?
  *      -1 = don't know, 0 = no, 1 = yes
@@ -309,6 +330,22 @@ static void free_irq_at(unsigned int at,
 	irq_free_desc(at);
 }
 
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+	return io_apic_ops.read(apic, reg);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+	io_apic_ops.write(apic, reg, value);
+}
+
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+	io_apic_ops.modify(apic, reg, value);
+}
+
+
 struct io_apic {
 	unsigned int index;
 	unsigned int unused[3];
@@ -328,53 +365,29 @@ static inline void io_apic_eoi(unsigned 
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
 	writel(vector, &io_apic->eoi);
 }
-#endif /* !CONFIG_XEN */
 
-static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+static unsigned int __io_apic_read(unsigned int apic, unsigned int reg)
 {
-#ifndef CONFIG_XEN
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
 	writel(reg, &io_apic->index);
 	return readl(&io_apic->data);
-#else
-	struct physdev_apic apic_op;
-	int ret;
-
-	apic_op.apic_physbase = mpc_ioapic_addr(apic);
-	apic_op.reg = reg;
-	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
-	if (ret)
-		return ret;
-	return apic_op.value;
-#endif
 }
 
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+static void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
 {
-#ifndef CONFIG_XEN
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
+
 	writel(reg, &io_apic->index);
 	writel(value, &io_apic->data);
-#else
-	struct physdev_apic apic_op;
-
-	apic_op.apic_physbase = mpc_ioapic_addr(apic);
-	apic_op.reg = reg;
-	apic_op.value = value;
-	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
-#endif
 }
 
-#ifdef CONFIG_XEN
-#define io_apic_modify io_apic_write
-#else
 /*
  * Re-write a value: to be used for read-modify-write
  * cycles where the read already set up the index register.
  *
  * Older SiS APIC requires we rewrite the index register
  */
-static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
 {
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
 
@@ -405,7 +418,32 @@ static bool io_apic_level_ack_pending(st
 
 	return false;
 }
-#endif /* CONFIG_XEN */
+#else /* !CONFIG_XEN */
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+	struct physdev_apic apic_op;
+	int ret;
+
+	apic_op.apic_physbase = mpc_ioapic_addr(apic);
+	apic_op.reg = reg;
+	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
+	if (ret)
+		return ret;
+	return apic_op.value;
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+	struct physdev_apic apic_op;
+
+	apic_op.apic_physbase = mpc_ioapic_addr(apic);
+	apic_op.reg = reg;
+	apic_op.value = value;
+	WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
+}
+
+#define io_apic_modify io_apic_write
+#endif /* !CONFIG_XEN */
 
 union entry_union {
 	struct { u32 w1, w2; };
@@ -419,6 +457,7 @@ static struct IO_APIC_route_entry __ioap
 
 	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
 	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+
 	return eu.entry;
 }
 
@@ -426,9 +465,11 @@ static struct IO_APIC_route_entry ioapic
 {
 	union entry_union eu;
 	unsigned long flags;
+
 	raw_spin_lock_irqsave(&ioapic_lock, flags);
 	eu.entry = __ioapic_read_entry(apic, pin);
 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+
 	return eu.entry;
 }
 #endif
@@ -439,8 +480,7 @@ static struct IO_APIC_route_entry ioapic
  * the interrupt, and we need to make sure the entry is fully populated
  * before that happens.
  */
-static void
-__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
 	union entry_union eu = {{0, 0}};
 
@@ -452,6 +492,7 @@ __ioapic_write_entry(int apic, int pin, 
 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
 	unsigned long flags;
+
 	raw_spin_lock_irqsave(&ioapic_lock, flags);
 	__ioapic_write_entry(apic, pin, e);
 	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -479,8 +520,7 @@ static void ioapic_mask_entry(int apic, 
  * shared ISA-space IRQs, so we have to support them. We are super
  * fast in the common case, and fast for shared ISA-space IRQs.
  */
-static int
-__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
 {
 	struct irq_pin_list **last, *entry;
 
@@ -565,6 +605,7 @@ static void io_apic_sync(struct irq_pin_
 	 * a dummy read from the IO-APIC
 	 */
 	struct io_apic __iomem *io_apic;
+
 	io_apic = io_apic_base(entry->apic);
 	readl(&io_apic->data);
 }
@@ -2593,21 +2634,73 @@ static void ack_apic_edge(struct irq_dat
 
 atomic_t irq_mis_count;
 
-static void ack_apic_level(struct irq_data *data)
-{
-	struct irq_cfg *cfg = data->chip_data;
-	int i, do_unmask_irq = 0, irq = data->irq;
-	unsigned long v;
-
-	irq_complete_move(cfg);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+{
 	/* If we are moving the irq we need to mask it */
 	if (unlikely(irqd_is_setaffinity_pending(data))) {
-		do_unmask_irq = 1;
 		mask_ioapic(cfg);
+		return true;
 	}
+	return false;
+}
+
+static inline void ioapic_irqd_unmask(struct irq_data *data,
+				      struct irq_cfg *cfg, bool masked)
+{
+	if (unlikely(masked)) {
+		/* Only migrate the irq if the ack has been received.
+		 *
+		 * On rare occasions the broadcast level triggered ack gets
+		 * delayed going to ioapics, and if we reprogram the
+		 * vector while Remote IRR is still set the irq will never
+		 * fire again.
+		 *
+		 * To prevent this scenario we read the Remote IRR bit
+		 * of the ioapic.  This has two effects.
+		 * - On any sane system the read of the ioapic will
+		 *   flush writes (and acks) going to the ioapic from
+		 *   this cpu.
+		 * - We get to see if the ACK has actually been delivered.
+		 *
+		 * Based on failed experiments of reprogramming the
+		 * ioapic entry from outside of irq context starting
+		 * with masking the ioapic entry and then polling until
+		 * Remote IRR was clear before reprogramming the
+		 * ioapic I don't trust the Remote IRR bit to be
+		 * completey accurate.
+		 *
+		 * However there appears to be no other way to plug
+		 * this race, so if the Remote IRR bit is not
+		 * accurate and is causing problems then it is a hardware bug
+		 * and you can go talk to the chipset vendor about it.
+		 */
+		if (!io_apic_level_ack_pending(cfg))
+			irq_move_masked_irq(data);
+		unmask_ioapic(cfg);
+	}
+}
+#else
+static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+{
+	return false;
+}
+static inline void ioapic_irqd_unmask(struct irq_data *data,
+				      struct irq_cfg *cfg, bool masked)
+{
+}
 #endif
 
+static void ack_apic_level(struct irq_data *data)
+{
+	struct irq_cfg *cfg = data->chip_data;
+	int i, irq = data->irq;
+	unsigned long v;
+	bool masked;
+
+	irq_complete_move(cfg);
+	masked = ioapic_irqd_mask(data, cfg);
+
 	/*
 	 * It appears there is an erratum which affects at least version 0x11
 	 * of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2662,38 +2755,7 @@ static void ack_apic_level(struct irq_da
 		eoi_ioapic_irq(irq, cfg);
 	}
 
-	/* Now we can move and renable the irq */
-	if (unlikely(do_unmask_irq)) {
-		/* Only migrate the irq if the ack has been received.
-		 *
-		 * On rare occasions the broadcast level triggered ack gets
-		 * delayed going to ioapics, and if we reprogram the
-		 * vector while Remote IRR is still set the irq will never
-		 * fire again.
-		 *
-		 * To prevent this scenario we read the Remote IRR bit
-		 * of the ioapic.  This has two effects.
-		 * - On any sane system the read of the ioapic will
-		 *   flush writes (and acks) going to the ioapic from
-		 *   this cpu.
-		 * - We get to see if the ACK has actually been delivered.
-		 *
-		 * Based on failed experiments of reprogramming the
-		 * ioapic entry from outside of irq context starting
-		 * with masking the ioapic entry and then polling until
-		 * Remote IRR was clear before reprogramming the
-		 * ioapic I don't trust the Remote IRR bit to be
-		 * completey accurate.
-		 *
-		 * However there appears to be no other way to plug
-		 * this race, so if the Remote IRR bit is not
-		 * accurate and is causing problems then it is a hardware bug
-		 * and you can go talk to the chipset vendor about it.
-		 */
-		if (!io_apic_level_ack_pending(cfg))
-			irq_move_masked_irq(data);
-		unmask_ioapic(cfg);
-	}
+	ioapic_irqd_unmask(data, cfg, masked);
 }
 
 #ifdef CONFIG_IRQ_REMAP
@@ -3989,6 +4051,11 @@ static struct resource * __init ioapic_s
 
 void __init ioapic_and_gsi_init(void)
 {
+	io_apic_ops.init();
+}
+
+static void __init __ioapic_init_mappings(void)
+{
 	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
 	struct resource *ioapic_res;
 	int i;
@@ -4084,18 +4151,36 @@ int mp_find_ioapic_pin(int ioapic, u32 g
 static __init int bad_ioapic(unsigned long address)
 {
 	if (nr_ioapics >= MAX_IO_APICS) {
-		printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
-		       "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
+		pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
+			MAX_IO_APICS, nr_ioapics);
 		return 1;
 	}
 	if (!address) {
-		printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
-		       " found in table, skipping!\n");
+		pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
 		return 1;
 	}
 	return 0;
 }
 
+static __init int bad_ioapic_register(int idx)
+{
+	union IO_APIC_reg_00 reg_00;
+	union IO_APIC_reg_01 reg_01;
+	union IO_APIC_reg_02 reg_02;
+
+	reg_00.raw = io_apic_read(idx, 0);
+	reg_01.raw = io_apic_read(idx, 1);
+	reg_02.raw = io_apic_read(idx, 2);
+
+	if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
+		pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
+			mpc_ioapic_addr(idx));
+		return 1;
+	}
+
+	return 0;
+}
+
 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
 {
 	int idx = 0;
@@ -4114,6 +4199,14 @@ void __init mp_register_ioapic(int id, u
 #ifndef CONFIG_XEN
 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 #endif
+
+	if (bad_ioapic_register(idx)) {
+#ifndef CONFIG_XEN
+		clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+#endif
+		return;
+	}
+
 	ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
 	ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
 
@@ -4134,10 +4227,10 @@ void __init mp_register_ioapic(int id, u
 	if (gsi_cfg->gsi_end >= gsi_top)
 		gsi_top = gsi_cfg->gsi_end + 1;
 
-	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-	       "GSI %d-%d\n", idx, mpc_ioapic_id(idx),
-	       mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
-	       gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+	pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
+		idx, mpc_ioapic_id(idx),
+		mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
+		gsi_cfg->gsi_base, gsi_cfg->gsi_end);
 
 	nr_ioapics++;
 }
--- head.orig/arch/x86/kernel/cpu/amd.c	2013-08-09 15:26:04.000000000 +0200
+++ head/arch/x86/kernel/cpu/amd.c	2013-05-23 17:53:36.000000000 +0200
@@ -619,6 +619,7 @@ static void init_amd(struct cpuinfo_x86 
 		}
 	}
 
+#ifndef CONFIG_XEN
 	/* re-enable TopologyExtensions if switched off by BIOS */
 	if ((c->x86 == 0x15) &&
 	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
@@ -635,6 +636,7 @@ static void init_amd(struct cpuinfo_x86 
 			}
 		}
 	}
+#endif
 
 	/*
 	 * The way access filter has a performance penalty on some workloads.
--- head.orig/arch/x86/kernel/cpu/common-xen.c	2012-08-01 12:12:01.000000000 +0200
+++ head/arch/x86/kernel/cpu/common-xen.c	2012-08-01 12:13:15.000000000 +0200
@@ -18,6 +18,7 @@
 #include <asm/archrandom.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
+#include <asm/debugreg.h>
 #include <asm/sections.h>
 #include <linux/topology.h>
 #include <linux/cpumask.h>
@@ -28,6 +29,7 @@
 #include <asm/apic.h>
 #include <asm/desc.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/mtrr.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
@@ -992,7 +994,7 @@ static const struct msr_range msr_range_
 	{ 0xc0011000, 0xc001103b},
 };
 
-static void __cpuinit print_cpu_msr(void)
+static void __cpuinit __print_cpu_msr(void)
 {
 	unsigned index_min, index_max;
 	unsigned index;
@@ -1056,13 +1058,13 @@ void __cpuinit print_cpu_info(struct cpu
 	else
 		printk(KERN_CONT "\n");
 
-#ifdef CONFIG_SMP
+	print_cpu_msr(c);
+}
+
+void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
+{
 	if (c->cpu_index < show_msr)
-		print_cpu_msr();
-#else
-	if (show_msr)
-		print_cpu_msr();
-#endif
+		__print_cpu_msr();
 }
 
 static __init int setup_disablecpuid(char *arg)
@@ -1114,7 +1116,6 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
 DEFINE_PER_CPU(unsigned int, irq_count) = -1;
 
 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
 
 #ifndef CONFIG_X86_NO_TSS
 /*
@@ -1204,7 +1205,6 @@ void debug_stack_reset(void)
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
@@ -1251,17 +1251,6 @@ static void dbg_restore_debug_regs(void)
 #define dbg_restore_debug_regs()
 #endif /* ! CONFIG_KGDB */
 
-#ifndef CONFIG_XEN
-/*
- * Prints an error where the NUMA and configured core-number mismatch and the
- * platform didn't override this to fix it up
- */
-void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
-{
-	pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
-}
-#endif
-
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
--- head.orig/arch/x86/kernel/entry_32-xen.S	2013-01-30 11:57:33.000000000 +0100
+++ head/arch/x86/kernel/entry_32-xen.S	2012-04-11 13:26:23.000000000 +0200
@@ -102,12 +102,6 @@ NMI_MASK	= 0x80000000
 #endif
 .endm
 
-#ifdef CONFIG_VM86
-#define resume_userspace_sig	check_userspace
-#else
-#define resume_userspace_sig	resume_userspace
-#endif
-
 /*
  * User gs save/restore
  *
@@ -331,10 +325,19 @@ ret_from_exception:
 	preempt_stop(CLBR_ANY)
 ret_from_intr:
 	GET_THREAD_INFO(%ebp)
-check_userspace:
+resume_userspace_sig:
+#ifdef CONFIG_VM86
 	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
 	movb PT_CS(%esp), %al
 	andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
+	/*
+	 * We can be coming here from a syscall done in the kernel space,
+	 * e.g. a failed kernel_execve().
+	 */
+	movl PT_CS(%esp), %eax
+	andl $SEGMENT_RPL_MASK, %eax
+#endif
 	cmpl $USER_RPL, %eax
 	jb resume_kernel		# not returning to v8086 or userspace
 
--- head.orig/arch/x86/kernel/entry_64-xen.S	2013-05-24 08:26:15.000000000 +0200
+++ head/arch/x86/kernel/entry_64-xen.S	2012-04-11 13:26:23.000000000 +0200
@@ -354,7 +354,7 @@ NMI_MASK = 0x80000000
 	movq %rsp, %rsi
 
 	leaq -RBP(%rsp),%rdi	/* arg1 for handler */
-	testl $3, CS(%rdi)
+	testl $3, CS-RBP(%rsi)
 	je 1f
 	SWAPGS
 	/*
@@ -364,11 +364,10 @@ NMI_MASK = 0x80000000
 	 * moving irq_enter into assembly, which would be too much work)
 	 */
 1:	incl PER_CPU_VAR(irq_count)
-	jne 2f
-	mov PER_CPU_VAR(irq_stack_ptr),%rsp
+	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
 	CFI_DEF_CFA_REGISTER	rsi
 
-2:	/* Store previous stack value */
+	/* Store previous stack value */
 	pushq %rsi
 	CFI_ESCAPE	0x0f /* DW_CFA_def_cfa_expression */, 6, \
 			0x77 /* DW_OP_breg7 */, 0, \
@@ -501,7 +500,12 @@ ENTRY(system_call)
 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz tracesys
 system_call_fastpath:
+#if __SYSCALL_MASK == ~0
 	cmpq $__NR_syscall_max,%rax
+#else
+	andl $__SYSCALL_MASK,%eax
+	cmpl $__NR_syscall_max,%eax
+#endif
 	ja badsys
 	movq %r10,%rcx
 	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
@@ -613,7 +617,12 @@ tracesys:
 	 */
 	LOAD_ARGS ARGOFFSET, 1
 	RESTORE_REST
+#if __SYSCALL_MASK == ~0
 	cmpq $__NR_syscall_max,%rax
+#else
+	andl $__SYSCALL_MASK,%eax
+	cmpl $__NR_syscall_max,%eax
+#endif
 	ja   int_ret_from_sys_call	/* RAX(%rsp) set to -ENOSYS above */
 	movq %r10,%rcx	/* fixup for C */
 	call *sys_call_table(,%rax,8)
@@ -753,6 +762,40 @@ ENTRY(stub_rt_sigreturn)
 	CFI_ENDPROC
 END(stub_rt_sigreturn)
 
+#ifdef CONFIG_X86_X32_ABI
+	PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
+
+ENTRY(stub_x32_rt_sigreturn)
+	CFI_STARTPROC
+	addq $8, %rsp
+	PARTIAL_FRAME 0
+	SAVE_REST
+	movq %rsp,%rdi
+	FIXUP_TOP_OF_STACK %r11
+	call sys32_x32_rt_sigreturn
+	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
+	RESTORE_REST
+	jmp int_ret_from_sys_call
+	CFI_ENDPROC
+END(stub_x32_rt_sigreturn)
+
+ENTRY(stub_x32_execve)
+	CFI_STARTPROC
+	addq $8, %rsp
+	PARTIAL_FRAME 0
+	SAVE_REST
+	FIXUP_TOP_OF_STACK %r11
+	movq %rsp, %rcx
+	call sys32_execve
+	RESTORE_TOP_OF_STACK %r11
+	movq %rax,RAX(%rsp)
+	RESTORE_REST
+	jmp int_ret_from_sys_call
+	CFI_ENDPROC
+END(stub_x32_execve)
+
+#endif
+
 /*
  * Interrupt exit.
  */ 
--- head.orig/arch/x86/kernel/irq-xen.c	2013-05-24 10:37:34.000000000 +0200
+++ head/arch/x86/kernel/irq-xen.c	2013-05-24 10:37:38.000000000 +0200
@@ -302,8 +302,13 @@ void fixup_irqs(void)
 		else if (data->chip != &no_irq_chip && !(warned++))
 			set_affinity = 0;
 
+		/*
+		 * We unmask if the irq was not marked masked by the
+		 * core code. That respects the lazy irq disable
+		 * behaviour.
+		 */
 		if (!irqd_can_move_in_process_context(data) &&
-		    !irqd_irq_disabled(data) && chip->irq_unmask)
+		    !irqd_irq_masked(data) && chip->irq_unmask)
 			chip->irq_unmask(data);
 
 		raw_spin_unlock(&desc->lock);
--- head.orig/arch/x86/kernel/ldt-xen.c	2011-02-01 15:03:03.000000000 +0100
+++ head/arch/x86/kernel/ldt-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -15,7 +15,6 @@
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 
-#include <asm/system.h>
 #include <asm/ldt.h>
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
--- head.orig/arch/x86/kernel/microcode_core-xen.c	2012-02-09 14:22:00.000000000 +0100
+++ head/arch/x86/kernel/microcode_core-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -39,6 +39,7 @@
 
 #include <asm/microcode.h>
 #include <asm/processor.h>
+#include <asm/cpu_device_id.h>
 
 MODULE_DESCRIPTION("Microcode Update Driver");
 MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -183,6 +184,20 @@ static int request_microcode(const char 
 	return error;
 }
 
+#ifdef MODULE
+/* Autoload on Intel and AMD systems */
+static const struct x86_cpu_id microcode_id[] = {
+#ifdef CONFIG_MICROCODE_INTEL
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
+#endif
+#ifdef CONFIG_MICROCODE_AMD
+	{ X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, },
+#endif
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, microcode_id);
+#endif
+
 static int __init microcode_init(void)
 {
 	const struct cpuinfo_x86 *c = &boot_cpu_data;
--- head.orig/arch/x86/kernel/pci-dma-xen.c	2012-04-04 14:32:53.000000000 +0200
+++ head/arch/x86/kernel/pci-dma-xen.c	2012-04-11 14:25:52.000000000 +0200
@@ -78,8 +78,8 @@ int dma_set_mask(struct device *dev, u64
 EXPORT_SYMBOL(dma_set_mask);
 
 static struct dma_map_ops swiotlb_dma_ops = {
-	.alloc_coherent = dma_generic_alloc_coherent,
-	.free_coherent = dma_generic_free_coherent,
+	.alloc = dma_generic_alloc_coherent,
+	.free = dma_generic_free_coherent,
 	.mapping_error = swiotlb_dma_mapping_error,
 	.map_page = swiotlb_map_page,
 	.unmap_page = swiotlb_unmap_page,
@@ -126,7 +126,8 @@ void __init pci_iommu_alloc(void)
 	}
 }
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-				 dma_addr_t *dma_addr, gfp_t flag)
+				 dma_addr_t *dma_addr, gfp_t flag,
+				 struct dma_attrs *attrs)
 {
 	unsigned long dma_mask;
 	struct page *page;
@@ -179,7 +180,7 @@ again:
 
 #ifdef CONFIG_XEN
 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
-			       dma_addr_t dma_addr)
+			       dma_addr_t dma_addr, struct dma_attrs *attrs)
 {
 	unsigned int order = get_order(size);
 	unsigned long va = (unsigned long)vaddr;
@@ -354,10 +355,11 @@ rootfs_initcall(pci_iommu_init);
 
 static __devinit void via_no_dac(struct pci_dev *dev)
 {
-	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+	if (forbid_dac == 0) {
 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
 		forbid_dac = 1;
 	}
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
+				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
 #endif
--- head.orig/arch/x86/kernel/pci-nommu-xen.c	2012-11-14 13:13:11.000000000 +0100
+++ head/arch/x86/kernel/pci-nommu-xen.c	2012-11-14 13:17:04.000000000 +0100
@@ -96,8 +96,8 @@ static int nommu_dma_supported(struct de
 }
 
 struct dma_map_ops nommu_dma_ops = {
-	.alloc_coherent		= dma_generic_alloc_coherent,
-	.free_coherent		= dma_generic_free_coherent,
+	.alloc			= dma_generic_alloc_coherent,
+	.free			= dma_generic_free_coherent,
 	.map_page		= gnttab_map_page,
 	.unmap_page		= gnttab_unmap_page,
 	.map_sg			= gnttab_map_sg,
--- head.orig/arch/x86/kernel/process-xen.c	2012-02-09 12:32:50.000000000 +0100
+++ head/arch/x86/kernel/process-xen.c	2012-04-11 14:42:34.000000000 +0200
@@ -12,18 +12,39 @@
 #include <linux/user-return-notifier.h>
 #include <linux/dmi.h>
 #include <linux/utsname.h>
+#include <linux/stackprotector.h>
+#include <linux/tick.h>
+#include <linux/cpuidle.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
 #include <asm/cpu.h>
-#include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/syscalls.h>
 #include <asm/idle.h>
 #include <asm/uaccess.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/debugreg.h>
+#include <asm/nmi.h>
 #include <xen/evtchn.h>
 
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU(unsigned char, is_idle);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+	atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+	atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+#endif
+
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
 
@@ -328,14 +349,107 @@ void (*pm_idle)(void);
 EXPORT_SYMBOL(pm_idle);
 #endif
 
+#ifndef CONFIG_SMP
+static inline void play_dead(void)
+{
+	BUG();
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void enter_idle(void)
+{
+	percpu_write(is_idle, 1);
+	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+}
+
+static void __exit_idle(void)
+{
+	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
+		return;
+	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+	/* idle loop has pid 0 */
+	if (current->pid)
+		return;
+	__exit_idle();
+}
+#endif
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+	/*
+	 * If we're the non-boot CPU, nothing set the stack canary up
+	 * for us.  CPU0 already has it initialized but no harm in
+	 * doing it again.  This is a good place for updating it, as
+	 * we wont ever return from this function (so the invalid
+	 * canaries already on the stack wont ever trigger).
+	 */
+	boot_init_stack_canary();
+	current_thread_info()->status |= TS_POLLING;
+
+	while (1) {
+		tick_nohz_idle_enter();
+
+		while (!need_resched()) {
+			rmb();
+
+			if (cpu_is_offline(smp_processor_id()))
+				play_dead();
+
+			/*
+			 * Idle routines should keep interrupts disabled
+			 * from here on, until they go to idle.
+			 * Otherwise, idle callbacks can misfire.
+			 */
+			local_touch_nmi();
+			local_irq_disable();
+
+			enter_idle();
+
+			/* Don't trace irqs off for idle */
+			stop_critical_timings();
+
+			/* enter_idle() needs rcu for notifiers */
+			rcu_idle_enter();
+
+			if (cpuidle_idle_call())
+				xen_idle();
+
+			rcu_idle_exit();
+			start_critical_timings();
+
+			/* In many cases the interrupt that ended idle
+			   has already called exit_idle. But some idle
+			   loops can be woken up without interrupt. */
+			__exit_idle();
+		}
+
+		tick_nohz_idle_exit();
+		preempt_enable_no_resched();
+		schedule();
+		preempt_disable();
+	}
+}
+
 /*
  * We use this if we don't have any better
  * idle routine..
  */
 void xen_idle(void)
 {
-	trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-	trace_cpu_idle(1, smp_processor_id());
+	trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+	trace_cpu_idle_rcuidle(1, smp_processor_id());
 	current_thread_info()->status &= ~TS_POLLING;
 	/*
 	 * TS_POLLING-cleared state must be visible before we
@@ -348,8 +462,8 @@ void xen_idle(void)
 	else
 		local_irq_enable();
 	current_thread_info()->status |= TS_POLLING;
-	trace_power_end(smp_processor_id());
-	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+	trace_power_end_rcuidle(smp_processor_id());
+	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 #ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(default_idle);
@@ -403,8 +517,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
 static void mwait_idle(void)
 {
 	if (!need_resched()) {
-		trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-		trace_cpu_idle(1, smp_processor_id());
+		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+		trace_cpu_idle_rcuidle(1, smp_processor_id());
 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
 			clflush((void *)&current_thread_info()->flags);
 
@@ -414,8 +528,8 @@ static void mwait_idle(void)
 			__sti_mwait(0, 0);
 		else
 			local_irq_enable();
-		trace_power_end(smp_processor_id());
-		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+		trace_power_end_rcuidle(smp_processor_id());
+		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 	} else
 		local_irq_enable();
 }
@@ -428,13 +542,13 @@ static void mwait_idle(void)
  */
 static void poll_idle(void)
 {
-	trace_power_start(POWER_CSTATE, 0, smp_processor_id());
-	trace_cpu_idle(0, smp_processor_id());
+	trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
+	trace_cpu_idle_rcuidle(0, smp_processor_id());
 	local_irq_enable();
 	while (!need_resched())
 		cpu_relax();
-	trace_power_end(smp_processor_id());
-	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+	trace_power_end_rcuidle(smp_processor_id());
+	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifndef CONFIG_XEN
--- head.orig/arch/x86/kernel/process_32-xen.c	2012-08-10 10:47:29.000000000 +0200
+++ head/arch/x86/kernel/process_32-xen.c	2012-08-01 12:13:13.000000000 +0200
@@ -9,7 +9,6 @@
  * This file handles the architecture-dependent parts of process handling..
  */
 
-#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -31,20 +30,18 @@
 #include <linux/kallsyms.h>
 #include <linux/ptrace.h>
 #include <linux/personality.h>
-#include <linux/tick.h>
 #include <linux/percpu.h>
 #include <linux/prctl.h>
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
-#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
-#include <asm/system.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/desc.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
@@ -59,7 +56,7 @@
 #include <asm/idle.h>
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
-#include <asm/nmi.h>
+#include <asm/switch_to.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 asmlinkage void cstar_ret_from_fork(void) __asm__("cstar_ret_from_fork");
@@ -72,62 +69,6 @@ unsigned long thread_saved_pc(struct tas
 	return ((unsigned long *)tsk->thread.sp)[3];
 }
 
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
-	BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
-	int cpu = smp_processor_id();
-
-	/*
-	 * If we're the non-boot CPU, nothing set the stack canary up
-	 * for us.  CPU0 already has it initialized but no harm in
-	 * doing it again.  This is a good place for updating it, as
-	 * we wont ever return from this function (so the invalid
-	 * canaries already on the stack wont ever trigger).
-	 */
-	boot_init_stack_canary();
-
-	current_thread_info()->status |= TS_POLLING;
-
-	/* endless idle loop with no priority at all */
-	while (1) {
-		tick_nohz_idle_enter();
-		rcu_idle_enter();
-		while (!need_resched()) {
-
-			check_pgt_cache();
-			rmb();
-
-			if (cpu_is_offline(cpu))
-				play_dead();
-
-			local_touch_nmi();
-			local_irq_disable();
-			/* Don't trace irqs off for idle */
-			stop_critical_timings();
-			if (cpuidle_idle_call())
-				xen_idle();
-			start_critical_timings();
-		}
-		rcu_idle_exit();
-		tick_nohz_idle_exit();
-		preempt_enable_no_resched();
-		schedule();
-		preempt_disable();
-	}
-}
-
 void __show_regs(struct pt_regs *regs, int all)
 {
 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
--- head.orig/arch/x86/kernel/process_64-xen.c	2012-08-01 12:12:51.000000000 +0200
+++ head/arch/x86/kernel/process_64-xen.c	2012-08-01 12:13:12.000000000 +0200
@@ -17,7 +17,6 @@
  * This file handles the architecture-dependent parts of process handling..
  */
 
-#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -35,17 +34,15 @@
 #include <linux/notifier.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
-#include <linux/tick.h>
 #include <linux/prctl.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/ftrace.h>
-#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
-#include <asm/system.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/mmu_context.h>
 #include <asm/prctl.h>
 #include <xen/interface/physdev.h>
@@ -56,116 +53,10 @@
 #include <asm/idle.h>
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
-#include <asm/nmi.h>
+#include <asm/switch_to.h>
 
 asmlinkage extern void ret_from_fork(void);
 
-static DEFINE_PER_CPU(unsigned char, is_idle);
-
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-	atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-	atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
-void enter_idle(void)
-{
-	percpu_write(is_idle, 1);
-	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
-	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
-		return;
-	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
-	/* idle loop has pid 0 */
-	if (current->pid)
-		return;
-	__exit_idle();
-}
-
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
-	BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
-	current_thread_info()->status |= TS_POLLING;
-
-	/*
-	 * If we're the non-boot CPU, nothing set the stack canary up
-	 * for us.  CPU0 already has it initialized but no harm in
-	 * doing it again.  This is a good place for updating it, as
-	 * we wont ever return from this function (so the invalid
-	 * canaries already on the stack wont ever trigger).
-	 */
-	boot_init_stack_canary();
-
-	/* endless idle loop with no priority at all */
-	while (1) {
-		tick_nohz_idle_enter();
-		while (!need_resched()) {
-
-			rmb();
-
-			if (cpu_is_offline(smp_processor_id()))
-				play_dead();
-			/*
-			 * Idle routines should keep interrupts disabled
-			 * from here on, until they go to idle.
-			 * Otherwise, idle callbacks can misfire.
-			 */
-			local_touch_nmi();
-			local_irq_disable();
-			enter_idle();
-			/* Don't trace irqs off for idle */
-			stop_critical_timings();
-
-			/* enter_idle() needs rcu for notifiers */
-			rcu_idle_enter();
-
-			if (cpuidle_idle_call())
-				xen_idle();
-
-			rcu_idle_exit();
-			start_critical_timings();
-
-			/* In many cases the interrupt that ended idle
-			   has already called exit_idle. But some idle
-			   loops can be woken up without interrupt. */
-			__exit_idle();
-		}
-
-		tick_nohz_idle_exit();
-		preempt_enable_no_resched();
-		schedule();
-		preempt_disable();
-	}
-}
-
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, int all)
 {
@@ -375,7 +266,9 @@ start_thread(struct pt_regs *regs, unsig
 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
 {
 	start_thread_common(regs, new_ip, new_sp,
-			    __USER32_CS, __USER32_DS, __USER32_DS);
+			    test_thread_flag(TIF_X32)
+			    ? __USER_CS : __USER32_CS,
+			    __USER_DS, __USER_DS);
 }
 #endif
 
@@ -551,6 +444,8 @@ void set_personality_64bit(void)
 
 	/* Make sure to be in 64bit mode */
 	clear_thread_flag(TIF_IA32);
+	clear_thread_flag(TIF_ADDR32);
+	clear_thread_flag(TIF_X32);
 
 	/* Ensure the corresponding mm is not marked. */
 	if (current->mm)
@@ -563,21 +458,33 @@ void set_personality_64bit(void)
 	current->personality &= ~READ_IMPLIES_EXEC;
 }
 
-void set_personality_ia32(void)
+void set_personality_ia32(bool x32)
 {
 	/* inherit personality from parent */
 
 	/* Make sure to be in 32bit mode */
-	set_thread_flag(TIF_IA32);
-	current->personality |= force_personality32;
+	set_thread_flag(TIF_ADDR32);
 
 	/* Mark the associated mm as containing 32-bit tasks. */
 	if (current->mm)
 		current->mm->context.ia32_compat = 1;
 
-	/* Prepare the first "return" to user space */
-	current_thread_info()->status |= TS_COMPAT;
+	if (x32) {
+		clear_thread_flag(TIF_IA32);
+		set_thread_flag(TIF_X32);
+		current->personality &= ~READ_IMPLIES_EXEC;
+		/* is_compat_task() uses the presence of the x32
+		   syscall bit flag to determine compat status */
+		current_thread_info()->status &= ~TS_COMPAT;
+	} else {
+		set_thread_flag(TIF_IA32);
+		clear_thread_flag(TIF_X32);
+		current->personality |= force_personality32;
+		/* Prepare the first "return" to user space */
+		current_thread_info()->status |= TS_COMPAT;
+	}
 }
+EXPORT_SYMBOL_GPL(set_personality_ia32);
 
 unsigned long get_wchan(struct task_struct *p)
 {
--- head.orig/arch/x86/kernel/setup-xen.c	2012-02-09 12:32:50.000000000 +0100
+++ head/arch/x86/kernel/setup-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -90,7 +90,6 @@
 #include <asm/processor.h>
 #include <asm/bugs.h>
 
-#include <asm/system.h>
 #include <asm/vsyscall.h>
 #include <asm/cpu.h>
 #include <asm/desc.h>
@@ -572,15 +571,6 @@ static void __init memblock_x86_reserve_
 
 #ifdef CONFIG_KEXEC
 
-static inline unsigned long long get_total_mem(void)
-{
-	unsigned long long total;
-
-	total = max_pfn - min_low_pfn;
-
-	return total << PAGE_SHIFT;
-}
-
 /*
  * Keep the crash kernel below this limit.  On 32 bits earlier kernels
  * would limit the kernel to the low 512 MiB due to mapping restrictions.
@@ -599,7 +589,7 @@ static void __init reserve_crashkernel(v
 	unsigned long long crash_size, crash_base;
 	int ret;
 
-	total_mem = get_total_mem();
+	total_mem = memblock_phys_mem_size();
 
 	ret = parse_crashkernel(boot_command_line, total_mem,
 			&crash_size, &crash_base);
@@ -842,10 +832,16 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #ifdef CONFIG_EFI
 	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
-		     EFI_LOADER_SIGNATURE, 4)) {
+		     "EL32", 4)) {
+		efi_enabled = 1;
+		efi_64bit = false;
+	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+		     "EL64", 4)) {
 		efi_enabled = 1;
-		efi_memblock_x86_reserve_range();
+		efi_64bit = true;
 	}
+	if (efi_enabled && efi_memblock_x86_reserve_range())
+		efi_enabled = 0;
 #endif
 #else /* CONFIG_XEN */
 #ifdef CONFIG_X86_32
--- head.orig/arch/x86/kernel/tboot.c	2013-08-15 11:59:07.000000000 +0200
+++ head/arch/x86/kernel/tboot.c	2013-08-09 15:36:50.000000000 +0200
@@ -274,7 +274,8 @@ static void tboot_copy_fadt(const struct
 		offsetof(struct acpi_table_facs, firmware_waking_vector);
 }
 
-static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control,
+		       bool extended)
 {
 	static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
 		/* S0,1,2: */ -1, -1, -1,
@@ -285,6 +286,9 @@ static int tboot_sleep(u8 sleep_state, u
 	if (!tboot_enabled())
 		return 0;
 
+	if (extended)
+		return -1;
+
 	tboot_copy_fadt(&acpi_gbl_FADT);
 	tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
 	tboot->acpi_sinfo.pm1b_cnt_val = pm1b_control;
--- head.orig/arch/x86/kernel/time-xen.c	2011-12-23 11:26:26.000000000 +0100
+++ head/arch/x86/kernel/time-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -437,9 +437,6 @@ static irqreturn_t timer_interrupt(int i
 	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
 	struct vcpu_runstate_info runstate;
 
-	/* Keep nmi watchdog up to date */
-	inc_irq_stat(irq0_irqs);
-
 	/*
 	 * Here we are in the timer irq handler. We just have irqs locally
 	 * disabled but we don't know if the timer_bh is running on the other
--- head.orig/arch/x86/kernel/traps-xen.c	2012-07-05 12:00:31.000000000 +0200
+++ head/arch/x86/kernel/traps-xen.c	2012-07-05 12:03:40.000000000 +0200
@@ -50,10 +50,10 @@
 #include <asm/processor.h>
 #include <asm/debugreg.h>
 #include <linux/atomic.h>
-#include <asm/system.h>
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/mce.h>
 
 #include <asm/mach_traps.h>
@@ -123,7 +123,7 @@ do_trap(int trapnr, int signr, char *str
 		 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 		 * On nmi (interrupt 2), do_trap should not be called.
 		 */
-		if (trapnr < 6)
+		if (trapnr < X86_TRAP_UD)
 			goto vm86_trap;
 		goto trap_signal;
 	}
@@ -136,7 +136,7 @@ do_trap(int trapnr, int signr, char *str
 trap_signal:
 #endif
 	/*
-	 * We want error_code and trap_no set for userspace faults and
+	 * We want error_code and trap_nr set for userspace faults and
 	 * kernelspace faults which result in die(), but not
 	 * kernelspace faults which are fixed up.  die() gives the
 	 * process no chance to handle the signal and notice the
@@ -145,7 +145,7 @@ trap_signal:
 	 * delivered, faults.  See also do_general_protection below.
 	 */
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = trapnr;
+	tsk->thread.trap_nr = trapnr;
 
 #ifdef CONFIG_X86_64
 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -168,7 +168,7 @@ trap_signal:
 kernel_trap:
 	if (!fixup_exception(regs)) {
 		tsk->thread.error_code = error_code;
-		tsk->thread.trap_no = trapnr;
+		tsk->thread.trap_nr = trapnr;
 		die(str, regs, error_code);
 	}
 	return;
@@ -207,27 +207,31 @@ dotraplinkage void do_##name(struct pt_r
 	do_trap(trapnr, signr, str, regs, error_code, &info);		\
 }
 
-DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
-DO_ERROR(4, SIGSEGV, "overflow", overflow)
-DO_ERROR(5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
-DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
+		regs->ip)
+DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
+DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
+		regs->ip)
+DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
+		coprocessor_segment_overrun)
+DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
 #ifdef CONFIG_X86_32
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
 #endif
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
+		BUS_ADRALN, 0)
 
 #ifdef CONFIG_X86_64
 /* Runs on IST stack */
 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
 {
 	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-			12, SIGBUS) == NOTIFY_STOP)
+			X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
 		return;
 	preempt_conditional_sti(regs);
-	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+	do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
 	preempt_conditional_cli(regs);
 }
 
@@ -237,10 +241,10 @@ dotraplinkage void do_double_fault(struc
 	struct task_struct *tsk = current;
 
 	/* Return not checked because double check cannot be ignored */
-	notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
+	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = 8;
+	tsk->thread.trap_nr = X86_TRAP_DF;
 
 	/*
 	 * This is always a kernel trap and never fixable (and thus must
@@ -268,7 +272,7 @@ do_general_protection(struct pt_regs *re
 		goto gp_in_kernel;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = 13;
+	tsk->thread.trap_nr = X86_TRAP_GP;
 
 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 			printk_ratelimit()) {
@@ -295,9 +299,9 @@ gp_in_kernel:
 		return;
 
 	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = 13;
-	if (notify_die(DIE_GPF, "general protection fault", regs,
-				error_code, 13, SIGSEGV) == NOTIFY_STOP)
+	tsk->thread.trap_nr = X86_TRAP_GP;
+	if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+			X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
 		return;
 	die("general protection fault", regs, error_code);
 }
@@ -306,13 +310,13 @@ gp_in_kernel:
 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
-	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-			== NOTIFY_STOP)
+	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+				SIGTRAP) == NOTIFY_STOP)
 		return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-			== NOTIFY_STOP)
+	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+			SIGTRAP) == NOTIFY_STOP)
 		return;
 
 	/*
@@ -321,7 +325,7 @@ dotraplinkage void __kprobes do_int3(str
 	 */
 	debug_stack_usage_inc();
 	preempt_conditional_sti(regs);
-	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 	preempt_conditional_cli(regs);
 	debug_stack_usage_dec();
 }
@@ -426,8 +430,8 @@ dotraplinkage void __kprobes do_debug(st
 	preempt_conditional_sti(regs);
 
 	if (regs->flags & X86_VM_MASK) {
-		handle_vm86_trap((struct kernel_vm86_regs *) regs,
-				error_code, 1);
+		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+					X86_TRAP_DB);
 		preempt_conditional_cli(regs);
 		debug_stack_usage_dec();
 		return;
@@ -464,7 +468,8 @@ void math_error(struct pt_regs *regs, in
 	struct task_struct *task = current;
 	siginfo_t info;
 	unsigned short err;
-	char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
+						"simd exception";
 
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 		return;
@@ -474,7 +479,7 @@ void math_error(struct pt_regs *regs, in
 	{
 		if (!fixup_exception(regs)) {
 			task->thread.error_code = error_code;
-			task->thread.trap_no = trapnr;
+			task->thread.trap_nr = trapnr;
 			die(str, regs, error_code);
 		}
 		return;
@@ -484,12 +489,12 @@ void math_error(struct pt_regs *regs, in
 	 * Save the info for the exception handler and clear the error.
 	 */
 	save_init_fpu(task);
-	task->thread.trap_no = trapnr;
+	task->thread.trap_nr = trapnr;
 	task->thread.error_code = error_code;
 	info.si_signo = SIGFPE;
 	info.si_errno = 0;
 	info.si_addr = (void __user *)regs->ip;
-	if (trapnr == 16) {
+	if (trapnr == X86_TRAP_MF) {
 		unsigned short cwd, swd;
 		/*
 		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -533,10 +538,11 @@ void math_error(struct pt_regs *regs, in
 		info.si_code = FPE_FLTRES;
 	} else {
 		/*
-		 * If we're using IRQ 13, or supposedly even some trap 16
-		 * implementations, it's possible we get a spurious trap...
+		 * If we're using IRQ 13, or supposedly even some trap
+		 * X86_TRAP_MF implementations, it's possible
+		 * we get a spurious trap, which is not an error.
 		 */
-		return;		/* Spurious trap, no error */
+		return;
 	}
 	force_sig_info(SIGFPE, &info, task);
 }
@@ -547,13 +553,13 @@ dotraplinkage void do_coprocessor_error(
 	ignore_fpu_irq = 1;
 #endif
 
-	math_error(regs, error_code, 16);
+	math_error(regs, error_code, X86_TRAP_MF);
 }
 
 dotraplinkage void
 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 {
-	math_error(regs, error_code, 19);
+	math_error(regs, error_code, X86_TRAP_XF);
 }
 
 #ifndef CONFIG_XEN
@@ -651,10 +657,11 @@ dotraplinkage void do_iret_error(struct 
 	info.si_errno = 0;
 	info.si_code = ILL_BADSTK;
 	info.si_addr = NULL;
-	if (notify_die(DIE_TRAP, "iret exception",
-			regs, error_code, 32, SIGILL) == NOTIFY_STOP)
+	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
+			X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
 		return;
-	do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
+	do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
+		&info);
 }
 #endif
 
@@ -668,38 +675,38 @@ dotraplinkage void do_iret_error(struct 
 #define X 4
 #endif
 static const trap_info_t __initconst early_trap_table[] = {
-	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
-	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
-	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
-	{  0, 0,	   0, 0						}
+	{ X86_TRAP_DB, 0|4, __KERNEL_CS, (unsigned long)debug			},
+	{ X86_TRAP_BP, 3|4, __KERNEL_CS, (unsigned long)int3			},
+	{ X86_TRAP_PF, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
+	{ }
 };
 static const trap_info_t __cpuinitconst trap_table[] = {
-	{  0, 0|X, __KERNEL_CS, (unsigned long)divide_error		},
-	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
-	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
-	{  4, 3|X, __KERNEL_CS, (unsigned long)overflow			},
-	{  5, 0|X, __KERNEL_CS, (unsigned long)bounds			},
-	{  6, 0|X, __KERNEL_CS, (unsigned long)invalid_op		},
-	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
-	{  9, 0|X, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-	{ 10, 0|X, __KERNEL_CS, (unsigned long)invalid_TSS		},
-	{ 11, 0|X, __KERNEL_CS, (unsigned long)segment_not_present	},
-	{ 12, 0|X, __KERNEL_CS, (unsigned long)stack_segment		},
-	{ 13, 0|X, __KERNEL_CS, (unsigned long)general_protection	},
-	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
-	{ 16, 0|X, __KERNEL_CS, (unsigned long)coprocessor_error	},
-	{ 17, 0|X, __KERNEL_CS, (unsigned long)alignment_check		},
+	{ X86_TRAP_DE, 0|X, __KERNEL_CS, (unsigned long)divide_error		},
+	{ X86_TRAP_DB, 0|4, __KERNEL_CS, (unsigned long)debug			},
+	{ X86_TRAP_BP, 3|4, __KERNEL_CS, (unsigned long)int3			},
+	{ X86_TRAP_OF, 3|X, __KERNEL_CS, (unsigned long)overflow		},
+	{ X86_TRAP_BR, 0|X, __KERNEL_CS, (unsigned long)bounds			},
+	{ X86_TRAP_UD, 0|X, __KERNEL_CS, (unsigned long)invalid_op		},
+	{ X86_TRAP_NM, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
+	{ X86_TRAP_OLD_MF, 0|X, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+	{ X86_TRAP_TS, 0|X, __KERNEL_CS, (unsigned long)invalid_TSS		},
+	{ X86_TRAP_NP, 0|X, __KERNEL_CS, (unsigned long)segment_not_present	},
+	{ X86_TRAP_SS, 0|X, __KERNEL_CS, (unsigned long)stack_segment		},
+	{ X86_TRAP_GP, 0|X, __KERNEL_CS, (unsigned long)general_protection	},
+	{ X86_TRAP_PF, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
+	{ X86_TRAP_MF, 0|X, __KERNEL_CS, (unsigned long)coprocessor_error	},
+	{ X86_TRAP_AC, 0|X, __KERNEL_CS, (unsigned long)alignment_check		},
 #ifdef CONFIG_X86_MCE
-	{ 18, 0|X, __KERNEL_CS, (unsigned long)machine_check		},
+	{ X86_TRAP_MC, 0|X, __KERNEL_CS, (unsigned long)machine_check		},
 #endif
-	{ 19, 0|X, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
+	{ X86_TRAP_XF, 0|X, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
 #ifdef CONFIG_X86_32
-	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
+	{ X86_TRAP_SPURIOUS, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment	},
 	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
 #elif defined(CONFIG_IA32_EMULATION)
 	{ IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall },
 #endif
-	{  0, 0,	   0, 0						}
+	{ }
 };
 
 /* Set of traps needed for early debugging. */
--- head.orig/arch/x86/kernel/vsyscall_64-xen.c	2013-05-23 17:48:42.000000000 +0200
+++ head/arch/x86/kernel/vsyscall_64-xen.c	2013-05-23 17:53:29.000000000 +0200
@@ -52,10 +52,7 @@
 #include "vsyscall_trace.h"
 
 DEFINE_VVAR(int, vgetcpu_mode);
-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
-{
-	.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
-};
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
 
 static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
 
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
 
 void update_vsyscall_tz(void)
 {
-	unsigned long flags;
-
-	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
-	/* sys_tz has changed */
 	vsyscall_gtod_data.sys_tz = sys_tz;
-	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
 }
 
 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
 			struct clocksource *clock, u32 mult)
 {
-	unsigned long flags;
+	struct timespec monotonic;
 
-	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+	write_seqcount_begin(&vsyscall_gtod_data.seq);
 
 	/* copy vsyscall data */
 	vsyscall_gtod_data.clock.vclock_mode	= clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wa
 	vsyscall_gtod_data.clock.mask		= clock->mask;
 	vsyscall_gtod_data.clock.mult		= mult;
 	vsyscall_gtod_data.clock.shift		= clock->shift;
+
 	vsyscall_gtod_data.wall_time_sec	= wall_time->tv_sec;
 	vsyscall_gtod_data.wall_time_nsec	= wall_time->tv_nsec;
-	vsyscall_gtod_data.wall_to_monotonic	= *wtm;
+
+	monotonic = timespec_add(*wall_time, *wtm);
+	vsyscall_gtod_data.monotonic_time_sec	= monotonic.tv_sec;
+	vsyscall_gtod_data.monotonic_time_nsec	= monotonic.tv_nsec;
+
 	vsyscall_gtod_data.wall_time_coarse	= __current_kernel_time();
+	vsyscall_gtod_data.monotonic_time_coarse =
+		timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
 
-	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+	write_seqcount_end(&vsyscall_gtod_data.seq);
 }
 
 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
@@ -153,7 +152,7 @@ static bool write_ok_or_segv(unsigned lo
 
 		thread->error_code	= 6;  /* user fault, no page, write */
 		thread->cr2		= ptr;
-		thread->trap_no		= 14;
+		thread->trap_nr		= X86_TRAP_PF;
 
 		memset(&info, 0, sizeof(info));
 		info.si_signo		= SIGSEGV;
@@ -217,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *re
 	current_thread_info()->sig_on_uaccess_error = 1;
 
 	/*
-	 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
 	 * 64-bit, so we don't need to special-case it here.  For all the
-	 * vsyscalls, 0 means "don't write anything" not "write it at
+	 * vsyscalls, NULL means "don't write anything" not "write it at
 	 * address 0".
 	 */
 	ret = -EFAULT;
@@ -248,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *re
 
 		ret = sys_getcpu((unsigned __user *)regs->di,
 				 (unsigned __user *)regs->si,
-				 0);
+				 NULL);
 		break;
 	default:
 		ret = -ENOSYS;
--- head.orig/arch/x86/kernel/x86_init-xen.c	2011-11-28 10:08:44.000000000 +0100
+++ head/arch/x86/kernel/x86_init-xen.c	2012-04-13 17:40:35.000000000 +0200
@@ -8,6 +8,7 @@
 #include <linux/ioport.h>
 #include <linux/list.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <linux/spinlock_types.h>
 #include <linux/threads.h>
 
--- head.orig/arch/x86/lib/scrub.c	2008-02-08 12:30:51.000000000 +0100
+++ head/arch/x86/lib/scrub.c	2012-04-13 14:47:07.000000000 +0200
@@ -2,7 +2,7 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 
-void scrub_pages(void *v, unsigned int count)
+void xen_scrub_pages(void *v, unsigned int count)
 {
 	if (likely(cpu_has_xmm2)) {
 		unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
--- head.orig/arch/x86/mm/fault-xen.c	2012-02-16 13:54:07.000000000 +0100
+++ head/arch/x86/mm/fault-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -624,7 +624,7 @@ pgtable_bad(struct pt_regs *regs, unsign
 	dump_pagetable(address);
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	if (__die("Bad pagetable", regs, error_code))
@@ -645,7 +645,7 @@ no_context(struct pt_regs *regs, unsigne
 	/* Are we prepared to handle this kernel fault? */
 	if (fixup_exception(regs)) {
 		if (current_thread_info()->sig_on_uaccess_error && signal) {
-			tsk->thread.trap_no = 14;
+			tsk->thread.trap_nr = X86_TRAP_PF;
 			tsk->thread.error_code = error_code | PF_USER;
 			tsk->thread.cr2 = address;
 
@@ -685,7 +685,7 @@ no_context(struct pt_regs *regs, unsigne
 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 
 	tsk->thread.cr2		= address;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 	tsk->thread.error_code	= error_code;
 
 	sig = SIGKILL;
@@ -763,7 +763,7 @@ __bad_area_nosemaphore(struct pt_regs *r
 		/* Kernel addresses are always protection faults: */
 		tsk->thread.cr2		= address;
 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
-		tsk->thread.trap_no	= 14;
+		tsk->thread.trap_nr	= X86_TRAP_PF;
 
 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
 
@@ -847,7 +847,7 @@ do_sigbus(struct pt_regs *regs, unsigned
 
 	tsk->thread.cr2		= address;
 	tsk->thread.error_code	= error_code;
-	tsk->thread.trap_no	= 14;
+	tsk->thread.trap_nr	= X86_TRAP_PF;
 
 #ifdef CONFIG_MEMORY_FAILURE
 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
--- head.orig/arch/x86/mm/highmem_32-xen.c	2011-12-21 11:56:23.000000000 +0100
+++ head/arch/x86/mm/highmem_32-xen.c	2012-04-13 14:41:11.000000000 +0200
@@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
 {
 	return kmap_atomic_prot(page, kmap_prot);
 }
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
 
 /*
  * This is the same as kmap_atomic() but can map memory that doesn't
@@ -130,9 +130,9 @@ void clear_highpage(struct page *page)
 			return;
 	}
 
-	kaddr = kmap_atomic(page, KM_USER0);
+	kaddr = kmap_atomic(page);
 	clear_page(kaddr);
-	kunmap_atomic(kaddr, KM_USER0);
+	kunmap_atomic(kaddr);
 }
 EXPORT_SYMBOL(clear_highpage);
 
@@ -155,11 +155,11 @@ void copy_highpage(struct page *to, stru
 			return;
 	}
 
-	vfrom = kmap_atomic(from, KM_USER0);
-	vto = kmap_atomic(to, KM_USER1);
+	vfrom = kmap_atomic(from);
+	vto = kmap_atomic(to);
 	copy_page(vto, vfrom);
-	kunmap_atomic(vfrom, KM_USER0);
-	kunmap_atomic(vto, KM_USER1);
+	kunmap_atomic(vfrom);
+	kunmap_atomic(vto);
 }
 EXPORT_SYMBOL(copy_highpage);
 
--- head.orig/arch/x86/mm/hypervisor.c	2011-11-18 15:53:44.000000000 +0100
+++ head/arch/x86/mm/hypervisor.c	2012-05-31 14:45:36.000000000 +0200
@@ -612,7 +612,7 @@ int xen_create_contiguous_region(
 
 	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
 
-	scrub_pages((void *)vstart, 1 << order);
+	xen_scrub_pages((void *)vstart, 1 << order);
 
 	balloon_lock(flags);
 
@@ -720,7 +720,7 @@ void xen_destroy_contiguous_region(unsig
 
 	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
 
-	scrub_pages((void *)vstart, 1 << order);
+	xen_scrub_pages((void *)vstart, 1 << order);
 
 	balloon_lock(flags);
 
@@ -911,10 +911,10 @@ int xen_limit_pages_to_max_mfn(
 		}
 
 		if (!PageHighMem(page))
-			scrub_pages(page_address(page), 1);
+			xen_scrub_pages(page_address(page), 1);
 #ifdef CONFIG_XEN_SCRUB_PAGES
 		else {
-			scrub_pages(kmap(page), 1);
+			xen_scrub_pages(kmap(page), 1);
 			kunmap(page);
 			++n;
 		}
--- head.orig/arch/x86/mm/init-xen.c	2013-04-05 09:21:47.000000000 +0200
+++ head/arch/x86/mm/init-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -12,7 +12,6 @@
 #include <asm/page_types.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
-#include <asm/system.h>
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
 #include <asm/proto.h>
--- head.orig/arch/x86/mm/init_32-xen.c	2012-02-09 15:46:24.000000000 +0100
+++ head/arch/x86/mm/init_32-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -37,7 +37,6 @@
 #include <asm/asm.h>
 #include <asm/bios_ebda.h>
 #include <asm/processor.h>
-#include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/dma.h>
--- head.orig/arch/x86/mm/init_64-xen.c	2013-04-05 09:21:15.000000000 +0200
+++ head/arch/x86/mm/init_64-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -38,7 +38,6 @@
 
 #include <asm/processor.h>
 #include <asm/bios_ebda.h>
-#include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
--- head.orig/arch/x86/mm/pgtable_32-xen.c	2011-02-01 15:03:10.000000000 +0100
+++ head/arch/x86/mm/pgtable_32-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -10,7 +10,6 @@
 #include <linux/spinlock.h>
 #include <linux/module.h>
 
-#include <asm/system.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/fixmap.h>
--- head.orig/arch/x86/vdso/vclock_gettime.c	2013-08-15 11:59:07.000000000 +0200
+++ head/arch/x86/vdso/vclock_gettime.c	2013-03-21 15:31:34.000000000 +0100
@@ -26,6 +26,9 @@
 
 #define gtod (&VVAR(vsyscall_gtod_data))
 
+#ifdef CONFIG_XEN
+#define VCLOCK_NONE 0
+#else
 notrace static cycle_t vread_tsc(void)
 {
 	cycle_t ret;
@@ -62,6 +65,7 @@ static notrace cycle_t vread_hpet(void)
 {
 	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
 }
+#endif /* CONFIG_XEN */
 
 #ifdef CONFIG_PARAVIRT_CLOCK
 
@@ -151,6 +155,7 @@ notrace static long vdso_fallback_gtod(s
 }
 
 
+#ifndef CONFIG_XEN
 notrace static inline u64 vgetsns(int *mode)
 {
 	long v;
@@ -209,6 +214,7 @@ notrace static int do_monotonic(struct t
 
 	return mode;
 }
+#endif /* CONFIG_XEN */
 
 notrace static int do_realtime_coarse(struct timespec *ts)
 {
@@ -238,12 +244,14 @@ notrace int __vdso_clock_gettime(clockid
 	int ret = VCLOCK_NONE;
 
 	switch (clock) {
+#ifndef CONFIG_XEN
 	case CLOCK_REALTIME:
 		ret = do_realtime(ts);
 		break;
 	case CLOCK_MONOTONIC:
 		ret = do_monotonic(ts);
 		break;
+#endif
 	case CLOCK_REALTIME_COARSE:
 		return do_realtime_coarse(ts);
 	case CLOCK_MONOTONIC_COARSE:
@@ -261,6 +269,7 @@ notrace int __vdso_gettimeofday(struct t
 {
 	long ret = VCLOCK_NONE;
 
+#ifndef CONFIG_XEN
 	if (likely(tv != NULL)) {
 		BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
 			     offsetof(struct timespec, tv_nsec) ||
@@ -273,6 +282,7 @@ notrace int __vdso_gettimeofday(struct t
 		tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
 		tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
 	}
+#endif
 
 	if (ret == VCLOCK_NONE)
 		return vdso_fallback_gtod(tv, tz);
--- head.orig/arch/x86/vdso/vdso32-setup-xen.c	2012-02-29 14:19:15.000000000 +0100
+++ head/arch/x86/vdso/vdso32-setup-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -286,13 +286,7 @@ static int __init gate_vma_init(void)
 	gate_vma.vm_end = FIXADDR_USER_END;
 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
 	gate_vma.vm_page_prot = __P101;
-	/*
-	 * Make sure the vDSO gets into every core dump.
-	 * Dumping its contents makes post-mortem fully interpretable later
-	 * without matching up the same kernel and hardware config to see
-	 * what PC values meant.
-	 */
-	gate_vma.vm_flags |= VM_ALWAYSDUMP;
+
 	return 0;
 }
 
@@ -365,6 +359,11 @@ int arch_setup_additional_pages(struct l
 	int ret = 0;
 	bool compat;
 
+#ifdef CONFIG_X86_X32_ABI
+	if (test_thread_flag(TIF_X32))
+		return x32_setup_additional_pages(bprm, uses_interp);
+#endif
+
 	if (vdso_enabled == VDSO_DISABLED)
 		return 0;
 
@@ -391,17 +390,10 @@ int arch_setup_additional_pages(struct l
 	if (compat_uses_vma || !compat) {
 		/*
 		 * MAYWRITE to allow gdb to COW and set breakpoints
-		 *
-		 * Make sure the vDSO gets into every core dump.
-		 * Dumping its contents makes post-mortem fully
-		 * interpretable later without matching up the same
-		 * kernel and hardware config to see what PC values
-		 * meant.
 		 */
 		ret = install_special_mapping(mm, addr, PAGE_SIZE,
 					      VM_READ|VM_EXEC|
-					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
-					      VM_ALWAYSDUMP,
+					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 					      vdso32_pages);
 
 		if (ret)
--- head.orig/drivers/acpi/acpica/hwesleep.c	2013-08-15 11:59:07.000000000 +0200
+++ head/drivers/acpi/acpica/hwesleep.c	2013-02-13 16:36:49.000000000 +0100
@@ -43,6 +43,7 @@
  */
 
 #include <acpi/acpi.h>
+#include <linux/acpi.h>
 #include "accommon.h"
 
 #define _COMPONENT          ACPI_HARDWARE
@@ -128,6 +129,13 @@ acpi_status acpi_hw_extended_sleep(u8 sl
 
 	ACPI_FLUSH_CPU_CACHE();
 
+	status = acpi_os_prepare_sleep(sleep_state, acpi_gbl_sleep_type_a,
+				       acpi_gbl_sleep_type_b, true);
+	if (ACPI_SKIP(status))
+		return_ACPI_STATUS(AE_OK);
+	if (ACPI_FAILURE(status))
+		return_ACPI_STATUS(status);
+
 	/*
 	 * Set the SLP_TYP and SLP_EN bits.
 	 *
--- head.orig/drivers/acpi/acpica/hwsleep.c	2013-08-15 11:59:07.000000000 +0200
+++ head/drivers/acpi/acpica/hwsleep.c	2013-03-21 15:30:58.000000000 +0100
@@ -153,7 +153,7 @@ acpi_status acpi_hw_legacy_sleep(u8 slee
 	ACPI_FLUSH_CPU_CACHE();
 
 	status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
-				       pm1b_control);
+				       pm1b_control, false);
 	if (ACPI_SKIP(status))
 		return_ACPI_STATUS(AE_OK);
 	if (ACPI_FAILURE(status))
--- head.orig/drivers/acpi/osl.c	2013-03-21 15:29:15.000000000 +0100
+++ head/drivers/acpi/osl.c	2013-08-09 15:36:16.000000000 +0200
@@ -77,8 +77,8 @@ EXPORT_SYMBOL(acpi_in_debugger);
 extern char line_buf[80];
 #endif				/*ENABLE_DEBUGGER */
 
-static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
-				      u32 pm1b_ctrl);
+static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 val_a, u32 val_b,
+				      bool extended);
 
 static acpi_osd_handler acpi_irq_handler;
 static void *acpi_irq_context;
@@ -1762,13 +1762,13 @@ acpi_status acpi_os_terminate(void)
 	return AE_OK;
 }
 
-acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
-				  u32 pm1b_control)
+acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 val_a, u32 val_b,
+				  bool extended)
 {
 	int rc = 0;
 	if (__acpi_os_prepare_sleep)
-		rc = __acpi_os_prepare_sleep(sleep_state,
-					     pm1a_control, pm1b_control);
+		rc = __acpi_os_prepare_sleep(sleep_state, val_a, val_b,
+					     extended);
 	if (rc < 0)
 		return AE_ERROR;
 	else if (rc > 0)
@@ -1777,8 +1777,8 @@ acpi_status acpi_os_prepare_sleep(u8 sle
 	return AE_OK;
 }
 
-void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
-			       u32 pm1a_ctrl, u32 pm1b_ctrl))
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 val_a,
+					   u32 val_b, bool extended))
 {
 	__acpi_os_prepare_sleep = func;
 }
--- head.orig/drivers/acpi/acpi_processor.c	2013-08-09 15:07:18.000000000 +0200
+++ head/drivers/acpi/acpi_processor.c	2013-08-09 15:36:09.000000000 +0200
@@ -413,7 +413,8 @@ static int acpi_processor_add(struct acp
 				   pr->acpi_id, device);
 	switch (result) {
 	default:
-		goto err_unlock;
+		mutex_unlock(&processor_device_mutex);
+		goto err;
 	case -EEXIST:
 		if (radix_tree_lookup(&processor_device_tree,
 				      pr->acpi_id) == device) {
@@ -469,14 +470,6 @@ static int acpi_processor_add(struct acp
 #endif
 
  err:
-#ifdef CONFIG_XEN
-	mutex_lock(&processor_device_mutex);
-	if (radix_tree_lookup(&processor_device_tree,
-			      pr->acpi_id) == device)
-		radix_tree_delete(&processor_device_tree, pr->acpi_id);
- err_unlock:
-	mutex_unlock(&processor_device_mutex);
-#endif
 	free_cpumask_var(pr->throttling.shared_cpu_map);
 	device->driver_data = NULL;
 #ifdef CONFIG_XEN
--- head.orig/drivers/gpu/drm/i915/i915_gem_gtt.c	2013-08-15 11:59:07.000000000 +0200
+++ head/drivers/gpu/drm/i915/i915_gem_gtt.c	2013-08-09 15:36:31.000000000 +0200
@@ -392,7 +392,6 @@ void i915_ppgtt_unbind_object(struct i91
 			   obj->base.size >> PAGE_SHIFT);
 }
 
-extern int intel_iommu_gfx_mapped;
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
@@ -665,7 +664,7 @@ intel_enable_ppgtt(struct drm_device *de
 	if (i915_enable_ppgtt >= 0)
 		return i915_enable_ppgtt;
 
-#ifdef CONFIG_INTEL_IOMMU
+#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_XEN)
 	/* Disable ppgtt on SNB if VT-d is on. */
 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
 		return false;
--- head.orig/drivers/gpu/drm/ttm/ttm_page_alloc.c	2012-04-10 17:05:48.000000000 +0200
+++ head/drivers/gpu/drm/ttm/ttm_page_alloc.c	2012-04-11 14:58:35.000000000 +0200
@@ -496,8 +496,7 @@ static int ttm_alloc_new_pages(struct li
 			r = xen_limit_pages_to_max_mfn(p, 0, 32);
 			if (r) {
 				__free_page(p);
-				printk(KERN_ERR TTM_PFX
-				       "Cannot restrict page (%d).", r);
+				pr_err("Cannot restrict page (%d)", r);
 				p = NULL;
 			} else if (gfp_flags & __GFP_ZERO)
 				clear_page(page_address(p));
@@ -749,8 +748,7 @@ static int ttm_get_pages(struct page **p
 
 				if (rc) {
 					__free_page(p);
-					printk(KERN_ERR TTM_PFX
-					       "Unable to restrict page (%d).",
+					pr_err("Unable to restrict page (%d)",
 					       rc);
 					return rc;
 				}
--- head.orig/drivers/hwmon/coretemp-xen.c	2011-11-17 16:53:49.000000000 +0100
+++ head/drivers/hwmon/coretemp-xen.c	2012-05-08 11:18:36.000000000 +0200
@@ -38,6 +38,7 @@
 #include <linux/smp.h>
 #include <linux/moduleparam.h>
 #include <asm/msr.h>
+#include <asm/cpu_device_id.h>
 #include <xen/pcpu.h>
 #include "../xen/core/domctl.h"
 
@@ -52,7 +53,7 @@ module_param_named(tjmax, force_tjmax, i
 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES		16	/* Number of Real cores per cpu */
+#define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
@@ -759,6 +760,10 @@ static void put_core_offline(unsigned in
 		return;
 	indx = CORE_ATTR_NO(cpu_core_id);
 
+	/* The core id is too big, just return */
+	if (indx > MAX_CORE_DATA - 1)
+		return;
+
 	if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
 		coretemp_remove_core(pdata, &pdev->dev, indx);
 
@@ -819,6 +824,12 @@ static struct notifier_block coretemp_cp
 	.notifier_call = coretemp_cpu_callback,
 };
 
+static const struct x86_cpu_id coretemp_ids[] = {
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
+
 static int __init coretemp_init(void)
 {
 	int err = -ENODEV;
@@ -826,9 +837,13 @@ static int __init coretemp_init(void)
 	if (!is_initial_xendomain())
 		goto exit;
 
-	/* quick check if we run Intel */
-	if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
-		goto exit;
+	/*
+	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+	 * sensors. We check this bit only, all the early CPUs
+	 * without thermal sensors will be filtered out.
+	 */
+	if (!x86_match_cpu(coretemp_ids))
+		return -ENODEV;
 
 	err = platform_driver_register(&coretemp_driver);
 	if (err)
--- head.orig/drivers/hwmon/via-cputemp-xen.c	2011-09-12 11:45:20.000000000 +0200
+++ head/drivers/hwmon/via-cputemp-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -36,6 +36,7 @@
 #include <linux/platform_device.h>
 #include <linux/cpu.h>
 #include <asm/msr.h>
+#include <asm/cpu_device_id.h>
 #include <xen/pcpu.h>
 #include "../xen/core/domctl.h"
 
@@ -331,6 +332,14 @@ static struct notifier_block via_cputemp
 	.notifier_call = via_cputemp_cpu_callback,
 };
 
+static const struct x86_cpu_id cputemp_ids[] = {
+	{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
+	{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
+	{ X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
+
 static int __init via_cputemp_init(void)
 {
 	int err;
@@ -338,11 +347,8 @@ static int __init via_cputemp_init(void)
 	if (!is_initial_xendomain())
 		return -ENODEV;
 
-	if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
-		printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
-		err = -ENODEV;
-		goto exit;
-	}
+	if (!x86_match_cpu(cputemp_ids))
+		return -ENODEV;
 
 	err = platform_driver_register(&via_cputemp_driver);
 	if (err)
--- head.orig/drivers/input/misc/Kconfig	2013-08-15 11:59:07.000000000 +0200
+++ head/drivers/input/misc/Kconfig	2012-08-20 13:22:44.000000000 +0200
@@ -626,7 +626,7 @@ config INPUT_CMA3000_I2C
 
 config INPUT_XEN_KBDDEV_FRONTEND
 	tristate "Xen virtual keyboard and mouse support"
-	depends on XEN
+	depends on PARAVIRT_XEN
 	default y
 	select XEN_XENBUS_FRONTEND
 	help
--- head.orig/drivers/xen/Kconfig	2013-05-23 17:48:16.000000000 +0200
+++ head/drivers/xen/Kconfig	2013-05-23 17:53:15.000000000 +0200
@@ -630,7 +630,7 @@ config XEN_ACPI_HOTPLUG_CPU
 
 config XEN_ACPI_PROCESSOR
 	tristate "Xen ACPI processor"
-	depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+	depends on PARAVIRT_XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
 	default m
 	help
           This ACPI processor uploads Power Management information to the Xen
--- head.orig/drivers/xen/Makefile	2012-10-04 13:11:29.000000000 +0200
+++ head/drivers/xen/Makefile	2012-10-04 13:11:56.000000000 +0200
@@ -41,7 +41,7 @@ obj-$(CONFIG_SWIOTLB_XEN)		+= swiotlb-xe
 obj-$(CONFIG_XEN_DOM0)			+= pci.o
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= xen-pciback/
 obj-$(CONFIG_XEN_PRIVCMD)		+= $(xen-privcmd_y)
-
+obj-$(CONFIG_XEN_ACPI_PROCESSOR)	+= xen-acpi-processor.o
 xen-evtchn-y				:= evtchn.o
 xen-gntdev-y				:= gntdev.o
 xen-gntalloc-y				:= gntalloc.o
--- head.orig/drivers/xen/acpi.c	2013-08-15 11:59:07.000000000 +0200
+++ head/drivers/xen/acpi.c	2013-02-13 17:01:02.000000000 +0100
@@ -35,27 +35,27 @@
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
-				     u32 pm1a_cnt, u32 pm1b_cnt)
+int xen_acpi_notify_hypervisor_state(u8 sleep_state, u32 val_a, u32 val_b,
+				     bool extended)
 {
+	unsigned int bits = extended ? 8 : 16;
+
 	struct xen_platform_op op = {
 		.cmd = XENPF_enter_acpi_sleep,
 		.interface_version = XENPF_INTERFACE_VERSION,
-		.u = {
-			.enter_acpi_sleep = {
-				.pm1a_cnt_val = (u16)pm1a_cnt,
-				.pm1b_cnt_val = (u16)pm1b_cnt,
-				.sleep_state = sleep_state,
-			},
+		.u.enter_acpi_sleep = {
+			.val_a = (u16)val_a,
+			.val_b = (u16)val_b,
+			.sleep_state = sleep_state,
+			.flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
 		},
 	};
 
-	if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
-		WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
-		     "Email xen-devel@lists.xensource.com  Thank you.\n", \
-		     pm1a_cnt, pm1b_cnt);
+	if (WARN((val_a & (~0 << bits)) || (val_b & (~0 << bits)),
+		 "Using more than %u bits of sleep control values %#x/%#x!"
+		 "Email xen-devel@lists.xen.org - Thank you.\n", \
+		 bits, val_a, val_b))
 		return -1;
-	}
 
 	HYPERVISOR_dom0_op(&op);
 	return 1;
--- head.orig/drivers/xen/balloon/balloon.c	2012-06-08 10:38:14.000000000 +0200
+++ head/drivers/xen/balloon/balloon.c	2012-06-08 10:38:39.000000000 +0200
@@ -353,7 +353,7 @@ static int decrease_reservation(unsigned
 
 		if (!PageHighMem(page)) {
 			v = phys_to_virt(pfn << PAGE_SHIFT);
-			scrub_pages(v, 1);
+			xen_scrub_pages(v, 1);
 #ifdef CONFIG_XEN
 			ret = HYPERVISOR_update_va_mapping(
 				(unsigned long)v, __pte_ma(0), 0);
@@ -363,7 +363,7 @@ static int decrease_reservation(unsigned
 #ifdef CONFIG_XEN_SCRUB_PAGES
 		else {
 			v = kmap(page);
-			scrub_pages(v, 1);
+			xen_scrub_pages(v, 1);
 			kunmap(page);
 		}
 #endif
@@ -678,7 +678,7 @@ struct page **alloc_empty_pages_and_page
 			goto err;
 
 		v = page_address(page);
-		scrub_pages(v, 1);
+		xen_scrub_pages(v, 1);
 
 		balloon_lock(flags);
 
--- head.orig/drivers/xen/core/evtchn.c	2012-10-04 13:11:41.000000000 +0200
+++ head/drivers/xen/core/evtchn.c	2012-04-12 09:56:01.000000000 +0200
@@ -38,7 +38,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/ftrace.h>
 #include <linux/atomic.h>
-#include <asm/system.h>
+#include <asm/barrier.h>
 #include <asm/ptrace.h>
 #include <xen/evtchn.h>
 #include <xen/interface/event_channel.h>
--- head.orig/drivers/xen/netfront/netfront.c	2013-07-15 12:29:48.000000000 +0200
+++ head/drivers/xen/netfront/netfront.c	2013-07-15 12:29:53.000000000 +0200
@@ -937,7 +937,7 @@ static int network_start_xmit(struct sk_
 	char *data = skb->data;
 	RING_IDX i;
 	grant_ref_t ref;
-	unsigned long mfn;
+	unsigned long mfn, flags;
 	int notify;
 	int frags = skb_shinfo(skb)->nr_frags;
 	unsigned int offset = offset_in_page(data);
@@ -967,12 +967,12 @@ static int network_start_xmit(struct sk_
 		goto drop;
 	}
 
-	spin_lock_irq(&np->tx_lock);
+	spin_lock_irqsave(&np->tx_lock, flags);
 
 	if (unlikely(!netfront_carrier_ok(np) ||
 		     (frags > 1 && !xennet_can_sg(dev)) ||
 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
-		spin_unlock_irq(&np->tx_lock);
+		spin_unlock_irqrestore(&np->tx_lock, flags);
 		goto drop;
 	}
 
@@ -1043,7 +1043,7 @@ static int network_start_xmit(struct sk_
 	if (!netfront_tx_slot_available(np))
 		netif_stop_queue(dev);
 
-	spin_unlock_irq(&np->tx_lock);
+	spin_unlock_irqrestore(&np->tx_lock, flags);
 
 	return NETDEV_TX_OK;
 
@@ -2090,6 +2090,13 @@ static int xennet_set_features(struct ne
 	return 0;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xennet_poll_controller(struct net_device *dev)
+{
+	netif_int(0, dev);
+}
+#endif
+
 static const struct net_device_ops xennet_netdev_ops = {
 	.ndo_uninit             = netif_uninit,
 	.ndo_open               = network_open,
@@ -2100,6 +2107,9 @@ static const struct net_device_ops xenne
 	.ndo_validate_addr      = eth_validate_addr,
 	.ndo_fix_features       = xennet_fix_features,
 	.ndo_set_features       = xennet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller    = xennet_poll_controller,
+#endif
 	.ndo_change_mtu	        = xennet_change_mtu,
 	.ndo_get_stats64        = xennet_get_stats64,
 };
@@ -2111,10 +2121,8 @@ static struct net_device * __devinit cre
 	struct netfront_info *np = NULL;
 
 	netdev = alloc_etherdev(sizeof(struct netfront_info));
-	if (!netdev) {
-		pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__);
+	if (!netdev)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	np                   = netdev_priv(netdev);
 	np->xbdev            = dev;
--- head.orig/drivers/xen/pcifront/pci_op.c	2013-01-25 15:10:03.000000000 +0100
+++ head/drivers/xen/pcifront/pci_op.c	2012-04-13 16:45:40.000000000 +0200
@@ -545,7 +545,7 @@ static void free_root_bus_devs(struct pc
 		dev = container_of(bus->devices.next, struct pci_dev,
 				   bus_list);
 		dev_dbg(&dev->dev, "removing device\n");
-		pci_remove_bus_device(dev);
+		pci_stop_and_remove_bus_device(dev);
 	}
 }
 
@@ -572,17 +572,15 @@ void pcifront_free_roots(struct pcifront
 static pci_ers_result_t pcifront_common_process( int cmd, struct pcifront_device *pdev,
 	pci_channel_state_t state)
 {
-	pci_ers_result_t result;
+	pci_ers_result_t result = PCI_ERS_RESULT_NONE;
 	struct pci_driver *pdrv;
 	int bus = pdev->sh_info->aer_op.bus;
 	int devfn = pdev->sh_info->aer_op.devfn;
 	struct pci_dev *pcidev;
-	int flag = 0;
 
 	dev_dbg(&pdev->xdev->dev, 
 		"pcifront AER process: cmd %x (bus %x devfn %x)",
 		cmd, bus, devfn);
-	result = PCI_ERS_RESULT_NONE;
 
 	pcidev = pci_get_bus_and_slot(bus, devfn);
 	if (!pcidev || !pcidev->driver) {
@@ -592,37 +590,32 @@ static pci_ers_result_t pcifront_common_
 	}
 	pdrv = pcidev->driver;
 
-	if (get_driver(&pdrv->driver)) {
-		if (pdrv->err_handler && pdrv->err_handler->error_detected) {
-			dev_dbg(&pcidev->dev,
-				"trying to call AER service\n");
-			if (pcidev) {
-				flag = 1;
-				switch(cmd) {
-				case XEN_PCI_OP_aer_detected:
-					result = pdrv->err_handler->error_detected(pcidev, state);
-					break;
-				case XEN_PCI_OP_aer_mmio:
-					result = pdrv->err_handler->mmio_enabled(pcidev);
-					break;
-				case XEN_PCI_OP_aer_slotreset:
-					result = pdrv->err_handler->slot_reset(pcidev);
-					break;
-				case XEN_PCI_OP_aer_resume:
-					pdrv->err_handler->resume(pcidev);
-					break;
-				default:
-					dev_err(&pdev->xdev->dev,
-						"bad request %x in aer recovery operation!\n",
-						cmd);
-					break;
-				}
-			}
+	if (pdrv->err_handler) {
+		dev_dbg(&pcidev->dev, "trying to call AER service\n");
+		switch(cmd) {
+		case XEN_PCI_OP_aer_detected:
+			if (pdrv->err_handler->error_detected)
+				result = pdrv->err_handler->error_detected(pcidev, state);
+			break;
+		case XEN_PCI_OP_aer_mmio:
+			if (pdrv->err_handler->mmio_enabled)
+				result = pdrv->err_handler->mmio_enabled(pcidev);
+			break;
+		case XEN_PCI_OP_aer_slotreset:
+			if (pdrv->err_handler->slot_reset)
+				result = pdrv->err_handler->slot_reset(pcidev);
+			break;
+		case XEN_PCI_OP_aer_resume:
+			if (pdrv->err_handler->resume)
+				pdrv->err_handler->resume(pcidev);
+			break;
+		default:
+			dev_err(&pdev->xdev->dev,
+				"bad request %x in aer recovery operation!\n",
+				cmd);
+			break;
 		}
-		put_driver(&pdrv->driver);
 	}
-	if (!flag)
-		result = PCI_ERS_RESULT_NONE;
 
 	return result;
 }
--- head.orig/drivers/xen/pcifront/xenbus.c	2012-03-12 13:55:45.000000000 +0100
+++ head/drivers/xen/pcifront/xenbus.c	2012-04-13 16:58:01.000000000 +0200
@@ -375,7 +375,7 @@ static int pcifront_detach_devices(struc
 				domain, bus, slot, func);
 			continue;
 		}
-		pci_remove_bus_device(pci_dev);
+		pci_stop_and_remove_bus_device(pci_dev);
 		pci_dev_put(pci_dev);
 
 		dev_dbg(&pdev->xdev->dev,
--- head.orig/drivers/xen/xenbus/xenbus_client.c	2012-02-17 09:16:09.000000000 +0100
+++ head/drivers/xen/xenbus/xenbus_client.c	2012-04-11 13:26:23.000000000 +0200
@@ -583,7 +583,7 @@ int xenbus_map_ring(struct xenbus_device
 {
 	struct gnttab_map_grant_ref op;
 
-	gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
+	gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
 			  dev->otherend_id);
 
 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
@@ -676,7 +676,7 @@ static int xenbus_unmap_ring_vfree_hvm(s
 			goto found;
 		}
 	}
-	node = NULL;
+	node = addr = NULL;
  found:
 	spin_unlock(&xenbus_valloc_lock);
 
@@ -712,7 +712,7 @@ int xenbus_unmap_ring(struct xenbus_devi
 {
 	struct gnttab_unmap_grant_ref op;
 
-	gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
+	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
 
 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 		BUG();
--- head.orig/include/drm/intel-gtt.h	2013-08-15 11:59:07.000000000 +0200
+++ head/include/drm/intel-gtt.h	2013-04-03 10:41:37.000000000 +0200
@@ -27,6 +27,8 @@ void intel_gtt_clear_range(unsigned int 
 
 #ifdef CONFIG_INTEL_IOMMU
 extern int intel_iommu_gfx_mapped;
+#elif defined(CONFIG_XEN)
+#define intel_iommu_gfx_mapped 1
 #endif
 
 #endif
--- head.orig/include/linux/acpi.h	2013-05-23 15:37:27.000000000 +0200
+++ head/include/linux/acpi.h	2013-08-09 15:36:40.000000000 +0200
@@ -470,11 +470,11 @@ static inline bool acpi_driver_match_dev
 #endif	/* !CONFIG_ACPI */
 
 #ifdef CONFIG_ACPI
-void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
-			       u32 pm1a_ctrl,  u32 pm1b_ctrl));
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 val_a,
+					   u32 val_b, bool extended));
 
-acpi_status acpi_os_prepare_sleep(u8 sleep_state,
-				  u32 pm1a_control, u32 pm1b_control);
+acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 val_a, u32 val_b,
+				  bool extended);
 #ifdef CONFIG_X86
 void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
 #else
@@ -484,7 +484,7 @@ static inline void arch_reserve_mem_area
 }
 #endif /* CONFIG_X86 */
 #else
-#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
+#define acpi_os_set_prepare_sleep(func, val_a, val_b, ext) do { } while (0)
 #endif
 
 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
--- head.orig/include/linux/timex.h	2013-08-15 11:59:07.000000000 +0200
+++ head/include/linux/timex.h	2013-05-23 17:52:27.000000000 +0200
@@ -139,6 +139,9 @@ extern unsigned long tick_nsec;		/* SHIF
 
 extern int do_adjtimex(struct timex *);
 extern void hardpps(const struct timespec *, const struct timespec *);
+#ifdef CONFIG_XEN
+extern int ntp_synced(void);
+#endif
 
 int read_current_timer(unsigned long *timer_val);
 void ntp_notify_cmos_timer(void);
--- head.orig/include/xen/acpi.h	2013-08-15 11:59:07.000000000 +0200
+++ head/include/xen/acpi.h	2013-08-09 15:36:45.000000000 +0200
@@ -75,8 +75,8 @@ static inline int xen_acpi_get_pxm(acpi_
 	return -ENXIO;
 }
 
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
-				     u32 pm1a_cnt, u32 pm1b_cnd);
+int xen_acpi_notify_hypervisor_state(u8 sleep_state, u32 val_a, u32 val_b,
+				     bool extended);
 
 static inline int xen_acpi_suspend_lowlevel(void)
 {
--- head.orig/include/xen/barrier.h	2012-06-06 13:16:59.000000000 +0200
+++ head/include/xen/barrier.h	2012-06-08 11:37:53.000000000 +0200
@@ -1,7 +1,7 @@
 #ifndef __XEN_BARRIER_H__
 #define __XEN_BARRIER_H__
 
-#include <asm/system.h>
+#include <asm/barrier.h>
 
 #define xen_mb()  mb()
 #define xen_rmb() rmb()
--- head.orig/include/xen/interface/platform.h	2013-06-20 15:28:04.000000000 +0200
+++ head/include/xen/interface/platform.h	2013-06-20 15:28:51.000000000 +0200
@@ -292,7 +292,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_i
 #define XENPF_enter_acpi_sleep    51
 struct xenpf_enter_acpi_sleep {
 	/* IN variables */
-#if __XEN_INTERFACE_VERSION__ < 0x00040300
+#if !defined(CONFIG_PARAVIRT_XEN) && __XEN_INTERFACE_VERSION__ < 0x00040300
 	uint16_t pm1a_cnt_val;      /* PM1a control value. */
 	uint16_t pm1b_cnt_val;      /* PM1b control value. */
 #else
--- head.orig/include/xen/xenbus.h	2011-12-21 11:37:00.000000000 +0100
+++ head/include/xen/xenbus.h	2012-04-11 13:26:23.000000000 +0200
@@ -163,9 +163,9 @@ int xenbus_transaction_start(struct xenb
 int xenbus_transaction_end(struct xenbus_transaction t, int abort);
 
 /* Single read and scanf: returns -errno or num scanned if > 0. */
+__scanf(4, 5)
 int xenbus_scanf(struct xenbus_transaction t,
-		 const char *dir, const char *node, const char *fmt, ...)
-	__attribute__((format(scanf, 4, 5)));
+		 const char *dir, const char *node, const char *fmt, ...);
 
 /* Single printf and write: returns -errno or 0. */
 __printf(4, 5)
--- head.orig/kernel/time/ntp.c	2013-08-15 11:59:07.000000000 +0200
+++ head/kernel/time/ntp.c	2013-05-23 17:52:40.000000000 +0200
@@ -234,7 +234,10 @@ static inline void pps_fill_timex(struct
  * ntp_synced - Returns 1 if the NTP status is not UNSYNC
  *
  */
-static inline int ntp_synced(void)
+#ifndef CONFIG_XEN
+static
+#endif
+inline int ntp_synced(void)
 {
 	return !(time_status & STA_UNSYNC);
 }
--- head.orig/lib/swiotlb-xen.c	2012-02-09 12:32:50.000000000 +0100
+++ head/lib/swiotlb-xen.c	2012-04-11 13:26:23.000000000 +0200
@@ -13,7 +13,7 @@
 
 #include <linux/cache.h>
 #include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -292,14 +292,13 @@ void swiotlb_bounce(phys_addr_t phys, ch
 			sz = min_t(size_t, PAGE_SIZE - offset, size);
 
 			local_irq_save(flags);
-			buffer = kmap_atomic(pfn_to_page(pfn),
-					     KM_BOUNCE_READ);
+			buffer = kmap_atomic(pfn_to_page(pfn));
 			if (dir == DMA_TO_DEVICE)
 				memcpy(dma_addr, buffer + offset, sz);
 			else if (__copy_to_user_inatomic(buffer + offset,
 							 dma_addr, sz))
 				/* inaccessible */;
-			kunmap_atomic(buffer, KM_BOUNCE_READ);
+			kunmap_atomic(buffer);
 			local_irq_restore(flags);
 
 			size -= sz;