Blob Blame History Raw
From: www.kernel.org
Subject: Linux 2.6.18
Patch-mainline: 2.6.18

Automatically created from "patches.kernel.org/patch-2.6.18" by xen-port-patches.py

2.6.35/kernel/time.c change removed (no longer applicable)

Acked-by: jbeulich@novell.com

--- 12.2.orig/arch/x86/Kconfig	2012-06-20 12:12:53.000000000 +0200
+++ 12.2/arch/x86/Kconfig	2012-06-20 12:13:05.000000000 +0200
@@ -1625,7 +1625,7 @@ config KEXEC_JUMP
 	  code in physical address mode via KEXEC
 
 config PHYSICAL_START
-	hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
+	hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP || XEN)
 	default "0x1000000"
 	---help---
 	  This gives the physical address where the kernel is loaded.
--- 12.2.orig/arch/x86/kernel/Makefile	2012-04-10 15:44:48.000000000 +0200
+++ 12.2/arch/x86/kernel/Makefile	2012-04-10 16:11:14.000000000 +0200
@@ -120,5 +120,5 @@ ifeq ($(CONFIG_X86_64),y)
 	pci-dma_64-$(CONFIG_XEN)	+= pci-dma_32.o
 endif
 
-disabled-obj-$(CONFIG_XEN) := i8237.o i8259_$(BITS).o reboot.o smpboot_$(BITS).o
+disabled-obj-$(CONFIG_XEN) := i8237.o i8253.o i8259_$(BITS).o reboot.o smpboot_$(BITS).o tsc_$(BITS).o
 %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
--- 12.2.orig/arch/x86/kernel/quirks.c	2012-06-20 12:12:06.000000000 +0200
+++ 12.2/arch/x86/kernel/quirks.c	2011-09-07 15:20:23.000000000 +0200
@@ -6,7 +6,7 @@
 
 #include <asm/hpet.h>
 
-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
 
 static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
 {
@@ -34,10 +34,21 @@ static void __devinit quirk_intel_irqbal
 	if (!(word & (1 << 13))) {
 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
 			"disabling irq balancing and affinity\n");
+#ifndef CONFIG_XEN
 		noirqdebug_setup("");
 #ifdef CONFIG_PROC_FS
 		no_irq_affinity = 1;
 #endif
+#else
+		{
+			struct xen_platform_op op = {
+				.cmd = XENPF_platform_quirk,
+				.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING
+			};
+
+			WARN_ON(HYPERVISOR_platform_op(&op));
+		}
+#endif
 	}
 
 	/* put back the original value for config space*/
--- 12.2.orig/arch/x86/kernel/setup64-xen.c	2008-01-28 12:24:19.000000000 +0100
+++ 12.2/arch/x86/kernel/setup64-xen.c	2011-01-31 17:02:29.000000000 +0100
@@ -363,5 +363,7 @@ void __cpuinit cpu_init (void)
 
 	fpu_init(); 
 
-	raw_local_save_flags(kernel_eflags);
+	asm ("pushfq; popq %0" : "=rm" (kernel_eflags));
+	if (raw_irqs_disabled())
+		kernel_eflags &= ~X86_EFLAGS_IF;
 }
--- 12.2.orig/arch/x86/kernel/time-xen.c	2010-08-31 09:24:21.000000000 +0200
+++ 12.2/arch/x86/kernel/time-xen.c	2011-07-12 11:09:48.000000000 +0200
@@ -45,7 +45,6 @@
 #include <linux/sysdev.h>
 #include <linux/bcd.h>
 #include <linux/efi.h>
-#include <linux/mca.h>
 #include <linux/sysctl.h>
 #include <linux/percpu.h>
 #include <linux/kernel_stat.h>
@@ -76,8 +75,13 @@
 
 #if defined (__i386__)
 #include <asm/i8259.h>
+#include <asm/i8253.h>
+DEFINE_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
 #endif
 
+#define XEN_SHIFT 22
+
 int pit_latch_buggy;              /* extern */
 
 #if defined(__x86_64__)
@@ -97,10 +101,6 @@ extern unsigned long wall_jiffies;
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
-extern struct init_timer_opts timer_tsc_init;
-extern struct timer_opts timer_tsc;
-#define timer_none timer_tsc
-
 /* These are peridically updated in shared_info, and then copied here. */
 struct shadow_time_info {
 	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
@@ -172,24 +172,6 @@ static int __init __permitted_clock_jitt
 }
 __setup("permitted_clock_jitter=", __permitted_clock_jitter);
 
-#if 0
-static void delay_tsc(unsigned long loops)
-{
-	unsigned long bclock, now;
-
-	rdtscl(bclock);
-	do {
-		rep_nop();
-		rdtscl(now);
-	} while ((now - bclock) < loops);
-}
-
-struct timer_opts timer_tsc = {
-	.name = "tsc",
-	.delay = delay_tsc,
-};
-#endif
-
 /*
  * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
  * yielding a 64-bit result.
@@ -226,14 +208,6 @@ static inline u64 scale_delta(u64 delta,
 	return product;
 }
 
-#if 0 /* defined (__i386__) */
-int read_current_timer(unsigned long *timer_val)
-{
-	rdtscl(*timer_val);
-	return 0;
-}
-#endif
-
 void init_cpu_khz(void)
 {
 	u64 __cpu_khz = 1000000ULL << 32;
@@ -253,6 +227,7 @@ static u64 get_nsec_offset(struct shadow
 	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
 }
 
+#ifdef CONFIG_X86_64
 static unsigned long get_usec_offset(struct shadow_time_info *shadow)
 {
 	u64 now, delta;
@@ -260,6 +235,7 @@ static unsigned long get_usec_offset(str
 	delta = now - shadow->tsc_timestamp;
 	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
 }
+#endif
 
 static void __update_wallclock(time_t sec, long nsec)
 {
@@ -374,6 +350,8 @@ void rtc_cmos_write(unsigned char val, u
 }
 EXPORT_SYMBOL(rtc_cmos_write);
 
+#ifdef CONFIG_X86_64
+
 static struct {
 	spinlock_t lock;
 	struct timeval tv;
@@ -467,16 +445,25 @@ static inline void monotonic_reset(void)
 }
 
 int do_settimeofday(struct timespec *tv)
+#elif defined(CONFIG_XEN_PRIVILEGED_GUEST)
+int xen_update_wallclock(const struct timespec *tv)
+#endif
+#if defined(CONFIG_X86_64) || defined(CONFIG_XEN_PRIVILEGED_GUEST)
 {
 	time_t sec;
 	s64 nsec;
 	unsigned int cpu;
 	struct shadow_time_info *shadow;
 	struct xen_platform_op op;
+#ifdef CONFIG_X86_64
+	int warp = 0;
 
 	if (unlikely(!tv)) {
 		monotonic_reset();
-		return 0;
+		if (!is_initial_xendomain() || independent_wallclock)
+			return 0;
+		tv = &xtime;
+		warp = 1;
 	}
 
 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
@@ -484,11 +471,18 @@ int do_settimeofday(struct timespec *tv)
 
 	if (!is_initial_xendomain() && !independent_wallclock)
 		return -EPERM;
+#else
+	if (!is_initial_xendomain() || independent_wallclock)
+		return -EPERM;
+#endif
 
 	cpu = get_cpu();
 	shadow = &per_cpu(shadow_time, cpu);
 
-	write_seqlock_irq(&xtime_lock);
+#ifdef CONFIG_X86_64
+	if (!warp)
+		write_seqlock_irq(&xtime_lock);
+#endif
 
 	/*
 	 * Ensure we don't get blocked for a long time so that our time delta
@@ -511,24 +505,37 @@ int do_settimeofday(struct timespec *tv)
 		op.u.settime.system_time = shadow->system_timestamp;
 		WARN_ON(HYPERVISOR_platform_op(&op));
 		update_wallclock();
+#ifdef CONFIG_X86_64
+		if (warp) {
+			put_cpu();
+			return 0;
+		}
+#endif
 	} else if (independent_wallclock) {
 		nsec -= shadow->system_timestamp;
 		__normalize_time(&sec, &nsec);
 		__update_wallclock(sec, nsec);
 	}
+#ifdef CONFIG_X86_64
 	ntp_clear();
 
 	monotonic_reset();
 
 	write_sequnlock_irq(&xtime_lock);
+#endif
 
 	put_cpu();
 
+#ifdef CONFIG_X86_64
 	clock_was_set();
+#endif
 	return 0;
 }
+#endif
 
+#ifdef CONFIG_X86_64
 EXPORT_SYMBOL(do_settimeofday);
+#endif
 
 static void sync_xen_wallclock(unsigned long dummy);
 static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
@@ -581,11 +588,15 @@ static int set_rtc_mmss(unsigned long no
 	return retval;
 }
 
+#ifdef CONFIG_X86_64
 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
  *		Note: This function is required to return accurate
  *		time even in the absence of multiple timer ticks.
  */
 unsigned long long monotonic_clock(void)
+#else
+unsigned long long sched_clock(void)
+#endif
 {
 	unsigned int cpu = get_cpu();
 	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
@@ -605,9 +616,9 @@ unsigned long long monotonic_clock(void)
 
 	return time;
 }
+#ifdef CONFIG_X86_64
 EXPORT_SYMBOL(monotonic_clock);
 
-#ifdef __x86_64__
 unsigned long long sched_clock(void)
 {
 	return monotonic_clock();
@@ -780,6 +791,89 @@ irqreturn_t timer_interrupt(int irq, voi
 	return IRQ_HANDLED;
 }
 
+#ifndef CONFIG_X86_64
+
+void tsc_init(void)
+{
+	init_cpu_khz();
+	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
+	       cpu_khz / 1000, cpu_khz % 1000);
+
+	use_tsc_delay();
+}
+
+#include <linux/clocksource.h>
+
+void mark_tsc_unstable(void)
+{
+#ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */
+	tsc_unstable = 1;
+#endif
+}
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+
+static cycle_t xen_clocksource_read(void)
+{
+#ifdef CONFIG_SMP
+	static cycle_t last_ret;
+#ifndef CONFIG_64BIT
+	cycle_t last = cmpxchg64(&last_ret, 0, 0);
+#else
+	cycle_t last = last_ret;
+#define cmpxchg64 cmpxchg
+#endif
+	cycle_t ret = sched_clock();
+
+	if (unlikely((s64)(ret - last) < 0)) {
+		if (last - ret > permitted_clock_jitter
+		    && printk_ratelimit()) {
+			unsigned int cpu = get_cpu();
+			struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
+
+			printk(KERN_WARNING "clocksource/%u: "
+			       "Time went backwards: "
+			       "ret=%Lx delta=%Ld shadow=%Lx offset=%Lx\n",
+			       cpu, ret, ret - last, shadow->system_timestamp,
+			       get_nsec_offset(shadow));
+			put_cpu();
+		}
+		return last;
+	}
+
+	for (;;) {
+		cycle_t cur = cmpxchg64(&last_ret, last, ret);
+
+		if (cur == last || (s64)(ret - cur) < 0)
+			return ret;
+		last = cur;
+	}
+#else
+	return sched_clock();
+#endif
+}
+
+static struct clocksource clocksource_xen = {
+	.name			= "xen",
+	.rating			= 400,
+	.read			= xen_clocksource_read,
+	.mask			= CLOCKSOURCE_MASK(64),
+	.mult			= 1 << XEN_SHIFT,		/* time directly in nanoseconds */
+	.shift			= XEN_SHIFT,
+	.is_continuous		= 1,
+};
+
+static int __init init_xen_clocksource(void)
+{
+	clocksource_xen.mult = clocksource_khz2mult(cpu_khz,
+						clocksource_xen.shift);
+
+	return clocksource_register(&clocksource_xen);
+}
+
+module_init(init_xen_clocksource);
+
+#endif
+
 static void init_missing_ticks_accounting(unsigned int cpu)
 {
 	struct vcpu_register_runstate_memory_area area;
@@ -926,7 +1020,7 @@ static void setup_cpu0_timer_irq(void)
 			VIRQ_TIMER,
 			0,
 			timer_interrupt,
-			SA_INTERRUPT,
+			IRQF_DISABLED|IRQF_TIMER,
 			"timer0",
 			NULL);
 	BUG_ON(per_cpu(timer_irq, 0) < 0);
@@ -968,11 +1062,11 @@ void __init time_init(void)
 
 	update_wallclock();
 
+#ifdef CONFIG_X86_64
 	init_cpu_khz();
 	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
 	       cpu_khz / 1000, cpu_khz % 1000);
 
-#if defined(__x86_64__)
 	vxtime.mode = VXTIME_TSC;
 	vxtime.quot = (1000000L << 32) / vxtime_hz;
 	vxtime.tsc_quot = (1000L << 32) / cpu_khz;
@@ -1147,7 +1241,7 @@ int __cpuinit local_setup_timer(unsigned
 	irq = bind_virq_to_irqhandler(VIRQ_TIMER,
 				      cpu,
 				      timer_interrupt,
-				      SA_INTERRUPT,
+				      IRQF_DISABLED|IRQF_TIMER,
 				      timer_name[cpu],
 				      NULL);
 	if (irq < 0)
--- 12.2.orig/arch/x86/pci/irq.c	2012-06-20 12:12:06.000000000 +0200
+++ 12.2/arch/x86/pci/irq.c	2011-08-09 10:33:29.000000000 +0200
@@ -94,13 +94,18 @@ static struct irq_routing_table * __init
 	u8 *addr;
 	struct irq_routing_table *rt;
 
+#ifdef CONFIG_XEN
+	if (!is_initial_xendomain())
+		return NULL;
+#endif
 	if (pirq_table_addr) {
-		rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr));
+		rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
 		if (rt)
 			return rt;
 		printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
 	}
-	for (addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
+	for (addr = (u8 *) isa_bus_to_virt(0xf0000);
+	     addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
 		rt = pirq_check_routing_table(addr);
 		if (rt)
 			return rt;
--- 12.2.orig/drivers/acpi/processor_perflib.c	2012-02-16 11:43:50.000000000 +0100
+++ 12.2/drivers/acpi/processor_perflib.c	2012-02-16 12:28:45.000000000 +0100
@@ -596,6 +596,8 @@ end:
 	return result;
 }
 
+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
+
 int acpi_processor_preregister_performance(
 		struct acpi_processor_performance __percpu *performance)
 {
@@ -811,3 +813,5 @@ acpi_processor_unregister_performance(st
 }
 
 EXPORT_SYMBOL(acpi_processor_unregister_performance);
+
+#endif /* !CONFIG_PROCESSOR_EXTERNAL_CONTROL */
--- 12.2.orig/arch/x86/include/mach-xen/asm/processor_32.h	2008-01-28 12:24:19.000000000 +0100
+++ 12.2/arch/x86/include/mach-xen/asm/processor_32.h	2011-01-31 17:02:29.000000000 +0100
@@ -23,7 +23,7 @@
 #include <xen/interface/physdev.h>
 
 /* flag for disabling the tsc */
-extern int tsc_disable;
+#define tsc_disable 0
 
 struct desc_struct {
 	unsigned long a,b;
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ 12.2/arch/x86/include/mach-xen/asm/time.h	2011-07-11 10:35:24.000000000 +0200
@@ -0,0 +1,6 @@
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+struct timespec;
+int xen_update_wallclock(const struct timespec *);
+#endif
+
+#include_next <asm/time.h>
--- 12.2.orig/arch/x86/include/asm/thread_info.h	2012-06-20 12:12:06.000000000 +0200
+++ 12.2/arch/x86/include/asm/thread_info.h	2012-04-10 16:11:24.000000000 +0200
@@ -147,11 +147,15 @@ struct thread_info {
 	 _TIF_USER_RETURN_NOTIFY)
 
 /* flags to check in __switch_to() */
+#ifndef CONFIG_XEN
 #define _TIF_WORK_CTXSW							\
 	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
+#else
+#define _TIF_WORK_CTXSW _TIF_DEBUG
+#endif
 
 #define PREEMPT_ACTIVE		0x10000000
 
--- 12.2.orig/kernel/time/timekeeping.c	2012-06-20 12:12:06.000000000 +0200
+++ 12.2/kernel/time/timekeeping.c	2012-04-10 16:11:34.000000000 +0200
@@ -20,6 +20,9 @@
 #include <linux/time.h>
 #include <linux/tick.h>
 #include <linux/stop_machine.h>
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#include <asm/time.h>
+#endif
 
 /* Structure holding internal timekeeping values. */
 struct timekeeper {
@@ -384,6 +387,9 @@ int do_settimeofday(const struct timespe
 
 	timekeeper.xtime = *tv;
 	timekeeping_update(true);
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+	xen_update_wallclock(tv);
+#endif
 
 	write_sequnlock_irqrestore(&timekeeper.lock, flags);