Blob Blame History Raw
From: Mike Galbraith <mgalbraith@suse.de>
Date: Mon, 20 Nov 2017 08:24:32 +0100
Subject: Rebase "Provide-softirq-context-hrtimers-RT-fixups" to v4.14-rt
Patch-mainline: Never, SLE15-RT specific
References: SLE Realtime Extension

SLE15-RT is based upon v4.11-rt, and both it and v4.13-rt, the only trees
containing this codebase, are going to be abandoned, leaving SLE15-RT with
no stable base for this critical subsystem.  Roll it forward to the latest
released version v4.14-rt.  Since v4.4 is an LTSS kernel, this rebase should
remain relevant for the forseeable future.

Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
 arch/x86/kvm/lapic.c                  |    4 
 drivers/net/wireless/mac80211_hwsim.c |    8 
 include/linux/hrtimer.h               |   84 +++---
 include/trace/events/timer.h          |   19 -
 kernel/events/core.c                  |    4 
 kernel/sched/core.c                   |    2 
 kernel/sched/deadline.c               |    2 
 kernel/sched/rt.c                     |    4 
 kernel/time/hrtimer.c                 |  472 +++++++++++++++++-----------------
 kernel/time/tick-broadcast-hrtimer.c  |    2 
 kernel/time/tick-internal.h           |   12 
 kernel/time/tick-sched.c              |    4 
 kernel/time/timer.c                   |  111 ++++---
 kernel/watchdog.c                     |    2 
 net/can/bcm.c                         |   24 -
 net/xfrm/xfrm_state.c                 |   13 
 16 files changed, 411 insertions(+), 356 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 66a94b1c7d86..b618eeaa5d49 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2184,8 +2184,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	}
 	apic->vcpu = vcpu;
 
-	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC_HARD,
-		     HRTIMER_MODE_ABS_PINNED);
+	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_ABS_PINNED_HARD);
 	apic->lapic_timer.timer.function = apic_timer_fn;
 
 	/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7f35e6672d80..0ee9e45b7d2c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1703,7 +1703,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
 
 		hrtimer_start(&data->beacon_timer,
 			      ns_to_ktime(until_tbtt * 1000),
-			      HRTIMER_MODE_REL);
+			      HRTIMER_MODE_REL_SOFT);
 	}
 
 	return 0;
@@ -1776,7 +1776,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
 			until_tbtt = bcn_int - do_div(tsf, bcn_int);
 			hrtimer_start(&data->beacon_timer,
 				      ns_to_ktime(until_tbtt * 1000),
-				      HRTIMER_MODE_REL);
+				      HRTIMER_MODE_REL_SOFT);
 		} else if (!info->enable_beacon) {
 			unsigned int count = 0;
 			ieee80211_iterate_active_interfaces_atomic(
@@ -2874,8 +2874,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
 
 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
-	hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC_SOFT,
-		     HRTIMER_MODE_ABS);
+	hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_ABS_SOFT);
 	data->beacon_timer.function = mac80211_hwsim_beacon;
 
 	err = ieee80211_register_hw(hw);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 4a385e58a2e9..f4b867805ff4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -24,35 +24,40 @@
 #include <linux/timerqueue.h>
 #include <linux/wait.h>
 
-/*
- * Clock ids for hrtimers which expire in softirq context. These clock ids
- * are kernel internal and never exported to user space.
- */
-#define HRTIMER_BASE_SOFT_MASK	MAX_CLOCKS
-#define HRTIMER_BASE_HARD_MASK	(MAX_CLOCKS << 1)
-
-#define CLOCK_REALTIME_SOFT	(CLOCK_REALTIME	 | HRTIMER_BASE_SOFT_MASK)
-#define CLOCK_MONOTONIC_SOFT	(CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
-#define CLOCK_BOOTTIME_SOFT	(CLOCK_BOOTTIME	 | HRTIMER_BASE_SOFT_MASK)
-#define CLOCK_TAI_SOFT		(CLOCK_TAI	 | HRTIMER_BASE_SOFT_MASK)
-
-#define CLOCK_REALTIME_HARD	(CLOCK_REALTIME	 | HRTIMER_BASE_HARD_MASK)
-#define CLOCK_MONOTONIC_HARD	(CLOCK_MONOTONIC | HRTIMER_BASE_HARD_MASK)
-#define CLOCK_BOOTTIME_HARD	(CLOCK_BOOTTIME	 | HRTIMER_BASE_HARD_MASK)
-#define CLOCK_TAI_HARD		(CLOCK_TAI	 | HRTIMER_BASE_HARD_MASK)
-
 struct hrtimer_clock_base;
 struct hrtimer_cpu_base;
 
 /*
  * Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS		- Time value is absolute
+ * HRTIMER_MODE_REL		- Time value is relative to now
+ * HRTIMER_MODE_PINNED		- Timer is bound to CPU (is only considered
+ *				  when starting the timer)
+ * HRTIMER_MODE_SOFT		- Timer callback function will be executed in
+ *				  soft irq context
  */
 enum hrtimer_mode {
-	HRTIMER_MODE_ABS = 0x0,		/* Time value is absolute */
-	HRTIMER_MODE_REL = 0x1,		/* Time value is relative to now */
-	HRTIMER_MODE_PINNED = 0x02,	/* Timer is bound to CPU */
-	HRTIMER_MODE_ABS_PINNED = 0x02,
-	HRTIMER_MODE_REL_PINNED = 0x03,
+	HRTIMER_MODE_ABS	= 0x00,
+	HRTIMER_MODE_REL	= 0x01,
+	HRTIMER_MODE_PINNED	= 0x02,
+	HRTIMER_MODE_SOFT	= 0x04,
+	HRTIMER_MODE_HARD	= 0x08,
+
+	HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+	HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+	HRTIMER_MODE_ABS_SOFT	= HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+	HRTIMER_MODE_REL_SOFT	= HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+	HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+	HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
+	HRTIMER_MODE_ABS_HARD	= HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
+	HRTIMER_MODE_REL_HARD	= HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
+
+	HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
+	HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
 };
 
 /*
@@ -115,6 +120,7 @@ struct hrtimer {
 	struct hrtimer_clock_base	*base;
 	u8				state;
 	u8				is_rel;
+	u8				is_soft;
 };
 
 /**
@@ -177,13 +183,11 @@ enum  hrtimer_base_type {
  * @cpu:		cpu number
  * @active_bases:	Bitfield to mark bases with active timers
  * @clock_was_set_seq:	Sequence counter of clock was set events
- * @migration_enabled:	The migration of hrtimers to other cpus is enabled
- * @nohz_active:	The nohz functionality is enabled
- * @softirq_activated:	displays, if the softirq is raised - update of softirq
- *			related settings is not required then.
- * @in_hrtirq:		hrtimer_interrupt() is currently executing
  * @hres_active:	State of high resolution mode
+ * @in_hrtirq:		hrtimer_interrupt() is currently executing
  * @hang_detected:	The last hrtimer interrupt detected a hang
+ * @softirq_activated:	displays, if the softirq is raised - update of softirq
+ *			related settings is not required then.
  * @nr_events:		Total number of hrtimer interrupt events
  * @nr_retries:		Total number of hrtimer interrupt retries
  * @nr_hangs:		Total number of hrtimer interrupt hangs
@@ -193,6 +197,7 @@ enum  hrtimer_base_type {
  *			and soft hrtimer are taken into account)
  * @next_timer:		Pointer to the first expiring timer
  * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
  * @clock_base:		array of clock bases for this cpu
  *
  * Note: next_timer is just an optimization for __remove_hrtimer().
@@ -204,16 +209,14 @@ struct hrtimer_cpu_base {
 	unsigned int			cpu;
 	unsigned int			active_bases;
 	unsigned int			clock_was_set_seq;
-	bool				migration_enabled;
-	bool				nohz_active;
-	bool				softirq_activated;
-	unsigned int			hres_active	: 1,
-					in_hrtirq	: 1,
-					hang_detected	: 1;
+	unsigned int			hres_active		: 1,
+					in_hrtirq		: 1,
+					hang_detected		: 1,
+					softirq_activated       : 1;
 #ifdef CONFIG_HIGH_RES_TIMERS
 	unsigned int			nr_events;
-	unsigned int			nr_retries;
-	unsigned int			nr_hangs;
+	unsigned short			nr_retries;
+	unsigned short			nr_hangs;
 	unsigned int			max_hang_time;
 #endif
 	ktime_t				expires_next;
@@ -222,6 +225,7 @@ struct hrtimer_cpu_base {
 #ifdef CONFIG_PREEMPT_RT_BASE
 	wait_queue_head_t		wait;
 #endif
+	struct hrtimer			*softirq_next_timer;
 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
 } ____cacheline_aligned;
 
@@ -297,7 +301,8 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
 
 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
-	return timer->base->cpu_base->hres_active;
+	return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+		timer->base->cpu_base->hres_active : 0;
 }
 
 #ifdef CONFIG_HIGH_RES_TIMERS
@@ -405,11 +410,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 				   u64 range_ns, const enum hrtimer_mode mode);
 
 /**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
  * @timer:	the timer to be added
  * @tim:	expiry time
- * @mode:	expiry mode: absolute (HRTIMER_MODE_ABS) or
- *		relative (HRTIMER_MODE_REL)
+ * @mode:	timer mode: absolute (HRTIMER_MODE_ABS) or
+ *		relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ *		softirq based mode is considered for debug purpose only!
  */
 static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
 				 const enum hrtimer_mode mode)
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index a3c535c87253..daf10c3a845b 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -147,7 +147,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
 		{ HRTIMER_MODE_ABS,		"ABS"		},	\
 		{ HRTIMER_MODE_REL,		"REL"		},	\
 		{ HRTIMER_MODE_ABS_PINNED,	"ABS|PINNED"	},	\
-		{ HRTIMER_MODE_REL_PINNED,	"REL|PINNED"	})
+		{ HRTIMER_MODE_REL_PINNED,	"REL|PINNED"	},	\
+		{ HRTIMER_MODE_ABS_SOFT,	"ABS|SOFT"	},	\
+		{ HRTIMER_MODE_REL_SOFT,	"REL|SOFT"	},	\
+		{ HRTIMER_MODE_ABS_PINNED_SOFT,	"ABS|PINNED|SOFT" },	\
+		{ HRTIMER_MODE_REL_PINNED_SOFT,	"REL|PINNED|SOFT" })
 
 /**
  * hrtimer_init - called when the hrtimer is initialized
@@ -185,15 +189,16 @@ TRACE_EVENT(hrtimer_init,
  */
 TRACE_EVENT(hrtimer_start,
 
-	TP_PROTO(struct hrtimer *hrtimer),
+	TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
 
-	TP_ARGS(hrtimer),
+	TP_ARGS(hrtimer, mode),
 
 	TP_STRUCT__entry(
 		__field( void *,	hrtimer		)
 		__field( void *,	function	)
 		__field( s64,		expires		)
 		__field( s64,		softexpires	)
+		__field( enum hrtimer_mode,	mode	)
 	),
 
 	TP_fast_assign(
@@ -201,12 +206,14 @@ TRACE_EVENT(hrtimer_start,
 		__entry->function	= hrtimer->function;
 		__entry->expires	= hrtimer_get_expires(hrtimer);
 		__entry->softexpires	= hrtimer_get_softexpires(hrtimer);
+		__entry->mode		= mode;
 	),
 
-	TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
-		  __entry->hrtimer, __entry->function,
+	TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+		  "mode=%s", __entry->hrtimer, __entry->function,
 		  (unsigned long long) __entry->expires,
-		  (unsigned long long) __entry->softexpires)
+		  (unsigned long long) __entry->softexpires,
+		  decode_hrtimer_mode(__entry->mode))
 );
 
 /**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e7067f14de64..2964d9aed108 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1084,7 +1084,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
 	cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
 
 	raw_spin_lock_init(&cpuctx->hrtimer_lock);
-	hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS_PINNED);
+	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
 	timer->function = perf_mux_hrtimer_handler;
 }
 
@@ -9068,7 +9068,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
 	if (!is_sampling_event(event))
 		return;
 
-	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 	hwc->hrtimer.function = perf_swevent_hrtimer;
 
 	/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 96aaa3a283b1..fc3389bac48c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -344,7 +344,7 @@ static void init_rq_hrtick(struct rq *rq)
 	rq->hrtick_csd.info = rq;
 #endif
 
-	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 	rq->hrtick_timer.function = hrtick;
 }
 #else	/* CONFIG_SCHED_HRTICK */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b0d6c20a685e..2cf17b471867 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -689,7 +689,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 {
 	struct hrtimer *timer = &dl_se->dl_timer;
 
-	hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 	timer->function = dl_task_timer;
 }
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 404499b6d05e..1eeb03d1f6fb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -46,8 +46,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
 
 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
-	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC_HARD,
-		     HRTIMER_MODE_REL);
+	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_REL_HARD);
 	rt_b->rt_period_timer.function = sched_rt_period_timer;
 }
 
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 2e583fc55831..3a165921766f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -65,6 +65,10 @@
 #define MASK_SHIFT		(HRTIMER_BASE_MONOTONIC_SOFT)
 #define HRTIMER_ACTIVE_HARD	((1U << MASK_SHIFT) - 1)
 #define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+#define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
+/* Define for debug mode check */
+#define HRTIMER_MODECHECK	true
 
 /*
  * The timer bases:
@@ -101,53 +105,35 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 		},
 		{
 			.index = HRTIMER_BASE_MONOTONIC_SOFT,
-			.clockid = CLOCK_MONOTONIC_SOFT,
+			.clockid = CLOCK_MONOTONIC,
 			.get_time = &ktime_get,
 		},
 		{
 			.index = HRTIMER_BASE_REALTIME_SOFT,
-			.clockid = CLOCK_REALTIME_SOFT,
+			.clockid = CLOCK_REALTIME,
 			.get_time = &ktime_get_real,
 		},
 		{
 			.index = HRTIMER_BASE_BOOTTIME_SOFT,
-			.clockid = CLOCK_BOOTTIME_SOFT,
+			.clockid = CLOCK_BOOTTIME,
 			.get_time = &ktime_get_boottime,
 		},
 		{
 			.index = HRTIMER_BASE_TAI_SOFT,
-			.clockid = CLOCK_TAI_SOFT,
+			.clockid = CLOCK_TAI,
 			.get_time = &ktime_get_clocktai,
 		},
 	}
 };
 
-#define MAX_CLOCKS_HRT		(MAX_CLOCKS * 3)
-
-static const int hrtimer_clock_to_base_table[MAX_CLOCKS_HRT] = {
+static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
 	/* Make sure we catch unsupported clockids */
-	[0 ... MAX_CLOCKS_HRT - 1]	= HRTIMER_MAX_CLOCK_BASES,
+	[0 ... MAX_CLOCKS - 1]	= HRTIMER_MAX_CLOCK_BASES,
 
-#ifdef CONFIG_PREEMPT_RT_FULL
-	[CLOCK_REALTIME]		= HRTIMER_BASE_REALTIME_SOFT,
-	[CLOCK_MONOTONIC]		= HRTIMER_BASE_MONOTONIC_SOFT,
-	[CLOCK_BOOTTIME]		= HRTIMER_BASE_BOOTTIME_SOFT,
-	[CLOCK_TAI]			= HRTIMER_BASE_TAI_SOFT,
-#else
-	[CLOCK_REALTIME]		= HRTIMER_BASE_REALTIME,
-	[CLOCK_MONOTONIC]		= HRTIMER_BASE_MONOTONIC,
-	[CLOCK_BOOTTIME]		= HRTIMER_BASE_BOOTTIME,
-	[CLOCK_TAI]			= HRTIMER_BASE_TAI,
-#endif
-	[CLOCK_REALTIME_SOFT]		= HRTIMER_BASE_REALTIME_SOFT,
-	[CLOCK_MONOTONIC_SOFT]		= HRTIMER_BASE_MONOTONIC_SOFT,
-	[CLOCK_BOOTTIME_SOFT]		= HRTIMER_BASE_BOOTTIME_SOFT,
-	[CLOCK_TAI_SOFT]		= HRTIMER_BASE_TAI_SOFT,
-
-	[CLOCK_REALTIME_HARD]		= HRTIMER_BASE_REALTIME,
-	[CLOCK_MONOTONIC_HARD]		= HRTIMER_BASE_MONOTONIC,
-	[CLOCK_BOOTTIME_HARD]		= HRTIMER_BASE_BOOTTIME,
-	[CLOCK_TAI_HARD]		= HRTIMER_BASE_TAI,
+	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME,
+	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC,
+	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME,
+	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
 };
 
 /*
@@ -216,23 +202,16 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 	return expires < new_base->cpu_base->expires_next;
 }
 
-#ifdef CONFIG_NO_HZ_COMMON
-static inline
-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
-					 int pinned)
-{
-	if (pinned || !base->migration_enabled)
-		return base;
-	return &per_cpu(hrtimer_bases, get_nohz_timer_target());
-}
-#else
 static inline
 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
 					 int pinned)
 {
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+	if (static_branch_unlikely(&timers_migration_enabled) && !pinned)
+		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+#endif
 	return base;
 }
-#endif
 
 /*
  * We switch the timer base to a power-optimized selected CPU target,
@@ -434,8 +413,17 @@ static inline void debug_hrtimer_init(struct hrtimer *timer)
 	debug_object_init(timer, &hrtimer_debug_descr);
 }
 
-static inline void debug_hrtimer_activate(struct hrtimer *timer)
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+					  enum hrtimer_mode mode,
+					  bool modecheck)
 {
+	/*
+	 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+	 * match, when a timer is started via__hrtimer_start_range_ns().
+	 */
+	if (modecheck)
+		WARN_ON_ONCE((mode & HRTIMER_MODE_SOFT) & !timer->is_soft);
+
 	debug_object_activate(timer, &hrtimer_debug_descr);
 }
 
@@ -467,8 +455,11 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
 EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
 
 #else
+
 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
-static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+					  enum hrtimer_mode mode,
+					  bool modecheck) { }
 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 #endif
 
@@ -480,10 +471,12 @@ debug_init(struct hrtimer *timer, clockid_t clockid,
 	trace_hrtimer_init(timer, clockid, mode);
 }
 
-static inline void debug_activate(struct hrtimer *timer)
+static inline void debug_activate(struct hrtimer *timer,
+				  enum hrtimer_mode mode,
+				  bool modecheck)
 {
-	debug_hrtimer_activate(timer);
-	trace_hrtimer_start(timer);
+	debug_hrtimer_activate(timer, mode, modecheck);
+	trace_hrtimer_start(timer, mode);
 }
 
 static inline void debug_deactivate(struct hrtimer *timer)
@@ -492,33 +485,43 @@ static inline void debug_deactivate(struct hrtimer *timer)
 	trace_hrtimer_cancel(timer);
 }
 
-static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
-					     struct hrtimer *timer)
+static struct hrtimer_clock_base *
+__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
 {
-	cpu_base->next_timer = timer;
+	struct hrtimer_clock_base *base = NULL;
+
+	if (*active) {
+		unsigned int idx = __ffs(*active);
+		*active &= ~(1U << idx);
+		base = &cpu_base->clock_base[idx];
+	}
+
+	return base;
 }
 
+#define for_each_active_base(base, cpu_base, active)	\
+	while ((base = __next_base((cpu_base), &(active))))
+
 static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
 					 unsigned int active,
 					 ktime_t expires_next)
 {
+	struct hrtimer_clock_base *base;
 	ktime_t expires;
 
-	while (active) {
-		unsigned int id = __ffs(active);
-		struct hrtimer_clock_base *base;
+	for_each_active_base(base, cpu_base, active) {
 		struct timerqueue_node *next;
 		struct hrtimer *timer;
 
-		active &= ~(1U << id);
-		base = cpu_base->clock_base + id;
-
 		next = timerqueue_getnext(&base->active);
 		timer = container_of(next, struct hrtimer, node);
 		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 		if (expires < expires_next) {
 			expires_next = expires;
-			hrtimer_update_next_timer(cpu_base, timer);
+			if (timer->is_soft)
+				cpu_base->softirq_next_timer = timer;
+			else
+				cpu_base->next_timer = timer;
 		}
 	}
 	/*
@@ -531,27 +534,43 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
 	return expires_next;
 }
 
-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+/*
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ *
+ * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
+ * those timers will get run whenever the softirq gets handled, at the end of
+ * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
+ *
+ * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
+ * The !softirq values are the minima across HRTIMER_ACTIVE, unless an actual
+ * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
+ *
+ * @active_mask must be one of:
+ *  - HRTIMER_ACTIVE,
+ *  - HRTIMER_ACTIVE_SOFT, or
+ *  - HRTIMER_ACTIVE_HARD.
+ */
+static ktime_t
+__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
 {
 	unsigned int active;
+	struct hrtimer *next_timer = NULL;
 	ktime_t expires_next = KTIME_MAX;
 
-	hrtimer_update_next_timer(cpu_base, NULL);
-
-	if (!cpu_base->softirq_activated) {
+	if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
 		active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
-		expires_next = __hrtimer_next_event_base(cpu_base, active,
-							 expires_next);
-		cpu_base->softirq_expires_next = expires_next;
-	}
+		cpu_base->softirq_next_timer = NULL;
+		expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX);
 
-	active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
-	expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+		next_timer = cpu_base->softirq_next_timer;
+	}
 
-	/*
-	 * cpu_base->expires_next is not updated here. It is set only
-	 * in hrtimer_reprogramming path!
-	 */
+	if (active_mask & HRTIMER_ACTIVE_HARD) {
+		active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+		cpu_base->next_timer = next_timer;
+		expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+	}
 
 	return expires_next;
 }
@@ -577,7 +596,8 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  */
 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
 {
-	return cpu_base->hres_active;
+	return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+		cpu_base->hres_active : 0;
 }
 
 static inline int hrtimer_hres_active(void)
@@ -585,34 +605,6 @@ static inline int hrtimer_hres_active(void)
 	return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 }
 
-/* High resolution timer related functions */
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer enabled ?
- */
-static bool hrtimer_hres_enabled __read_mostly  = true;
-unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
-EXPORT_SYMBOL_GPL(hrtimer_resolution);
-
-/*
- * Enable / Disable high resolution mode
- */
-static int __init setup_hrtimer_hres(char *str)
-{
-	return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
-}
-
-__setup("highres=", setup_hrtimer_hres);
-
-/*
- * hrtimer_high_res_enabled - query, if the highres mode is enabled
- */
-static inline int hrtimer_is_hres_enabled(void)
-{
-	return hrtimer_hres_enabled;
-}
-
 /*
  * Reprogram the event source with checking both queues for the
  * next event
@@ -623,10 +615,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
 	ktime_t expires_next;
 
-	if (!cpu_base->hres_active)
-		return;
+	/*
+	 * Find the current next expiration time.
+	 */
+	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
 
-	expires_next = __hrtimer_get_next_event(cpu_base);
+	if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
+		/*
+		 * When the softirq is activated, hrtimer has to be
+		 * programmed with the first hard hrtimer because soft
+		 * timer interrupt could occur too late.
+		 */
+		if (cpu_base->softirq_activated)
+			expires_next = __hrtimer_get_next_event(cpu_base,
+								HRTIMER_ACTIVE_HARD);
+		else
+			cpu_base->softirq_expires_next = expires_next;
+	}
 
 	if (skip_equal && expires_next == cpu_base->expires_next)
 		return;
@@ -634,6 +639,9 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 	cpu_base->expires_next = expires_next;
 
 	/*
+	 * If hres is not active, hardware does not have to be
+	 * reprogrammed yet.
+	 *
 	 * If a hang was detected in the last timer interrupt then we
 	 * leave the hang delay active in the hardware. We want the
 	 * system to make progress. That also prevents the following
@@ -647,12 +655,40 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 	 * set. So we'd effectivly block all timers until the T2 event
 	 * fires.
 	 */
-	if (cpu_base->hang_detected)
+	if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
 		return;
 
 	tick_program_event(cpu_base->expires_next, 1);
 }
 
+/* High resolution timer related functions */
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * High resolution timer enabled ?
+ */
+static bool hrtimer_hres_enabled __read_mostly  = true;
+unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+EXPORT_SYMBOL_GPL(hrtimer_resolution);
+
+/*
+ * Enable / Disable high resolution mode
+ */
+static int __init setup_hrtimer_hres(char *str)
+{
+	return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
+}
+
+__setup("highres=", setup_hrtimer_hres);
+
+/*
+ * hrtimer_high_res_enabled - query, if the highres mode is enabled
+ */
+static inline int hrtimer_is_hres_enabled(void)
+{
+	return hrtimer_hres_enabled;
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -662,7 +698,7 @@ static void retrigger_next_event(void *arg)
 {
 	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
-	if (!base->hres_active)
+	if (!__hrtimer_hres_active(base))
 		return;
 
 	raw_spin_lock(&base->lock);
@@ -735,8 +771,6 @@ void clock_was_set_delayed(void)
 
 static inline int hrtimer_is_hres_enabled(void) { return 0; }
 static inline void hrtimer_switch_to_hres(void) { }
-static inline void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 static inline void retrigger_next_event(void *arg) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
@@ -748,7 +782,7 @@ static inline void retrigger_next_event(void *arg) { }
  *
  * Called with interrupts disabled and base->cpu_base.lock held
  */
-static void hrtimer_reprogram(struct hrtimer *timer)
+static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
 {
 	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
 	struct hrtimer_clock_base *base = timer->base;
@@ -756,6 +790,28 @@ static void hrtimer_reprogram(struct hrtimer *timer)
 
 	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 
+	/*
+	 * CLOCK_REALTIME timer might be requested with an absolute
+	 * expiry time which is less than base->offset. Set it to 0.
+	 */
+	if (expires < 0)
+		expires = 0;
+
+	if (timer->is_soft) {
+		if (cpu_base->softirq_activated)
+			return;
+
+		if (!ktime_before(expires, cpu_base->softirq_expires_next))
+			return;
+
+		cpu_base->softirq_next_timer = timer;
+		cpu_base->softirq_expires_next = expires;
+
+		if (!ktime_before(expires, cpu_base->expires_next) ||
+		    !reprogram)
+			return;
+	}
+
 	/*
 	 * If the timer is not on the current cpu, we cannot reprogram
 	 * the other cpus clock event device.
@@ -773,18 +829,11 @@ static void hrtimer_reprogram(struct hrtimer *timer)
 	if (cpu_base->in_hrtirq)
 		return;
 
-	/*
-	 * CLOCK_REALTIME timer might be requested with an absolute
-	 * expiry time which is less than base->offset. Set it to 0.
-	 */
-	if (expires < 0)
-		expires = 0;
-
 	if (expires >= cpu_base->expires_next)
 		return;
 
 	/* Update the pointer to the next expiring timer */
-	hrtimer_update_next_timer(cpu_base, timer);
+	cpu_base->next_timer = timer;
 	cpu_base->expires_next = expires;
 
 	/*
@@ -938,9 +987,11 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer)
  * Returns 1 when the new timer is the leftmost timer in the tree.
  */
 static int enqueue_hrtimer(struct hrtimer *timer,
-			   struct hrtimer_clock_base *base)
+			   struct hrtimer_clock_base *base,
+			   enum hrtimer_mode mode,
+			   bool modecheck)
 {
-	debug_activate(timer);
+	debug_activate(timer, mode, modecheck);
 
 	base->cpu_base->active_bases |= 1 << base->index;
 
@@ -975,7 +1026,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
 	if (!timerqueue_del(&base->active, &timer->node))
 		cpu_base->active_bases &= ~(1 << base->index);
 
-#ifdef CONFIG_HIGH_RES_TIMERS
 	/*
 	 * Note: If reprogram is false we do not update
 	 * cpu_base->next_timer. This happens when we remove the first
@@ -986,7 +1036,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
 	 */
 	if (reprogram && timer == cpu_base->next_timer)
 		hrtimer_force_reprogram(cpu_base, 1);
-#endif
 }
 
 /*
@@ -1036,47 +1085,29 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
 	return tim;
 }
 
-static void hrtimer_reprogram_softirq(struct hrtimer *timer)
+static void
+hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
 {
-	struct hrtimer_clock_base *base = timer->base;
-	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
 	ktime_t expires;
 
 	/*
-	 * The softirq timer is not rearmed, when the softirq was raised
-	 * and has not yet run to completion.
+	 * Find the next SOFT expiration.
 	 */
-	if (cpu_base->softirq_activated)
-		return;
-
-	expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+	expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
 
-	if (!ktime_before(expires, cpu_base->softirq_expires_next))
-		return;
-
-	cpu_base->softirq_expires_next = expires;
-
-	if (!ktime_before(expires, cpu_base->expires_next))
+	/*
+	 * reprogramming needs to be triggered, even if the next soft
+	 * hrtimer expires at the same time than the next hard
+	 * hrtimer. cpu_base->softirq_expires_next needs to be updated!
+	 */
+	if (!reprogram || expires == KTIME_MAX)
 		return;
-	hrtimer_reprogram(timer);
-}
 
-static void hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base,
-					 bool reprogram)
-{
-	ktime_t expires;
-
-	expires = __hrtimer_get_next_event(cpu_base);
-
-	if (!reprogram || !ktime_before(expires, cpu_base->expires_next))
-		return;
 	/*
-	 * next_timer can be used here, because
-	 * hrtimer_get_next_event() updated the next
-	 * timer. expires_next is only set when reprogramming function
-	 * is called.
+	 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
+	 * cpu_base->*expires_next is only set by hrtimer_reprogram()
 	 */
-	hrtimer_reprogram(cpu_base->next_timer);
+	hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
 }
 
 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
@@ -1098,16 +1129,16 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 	/* Switch the timer base, if necessary: */
 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 
-	return enqueue_hrtimer(timer, new_base);
+	return enqueue_hrtimer(timer, new_base, mode, HRTIMER_MODECHECK);
 }
-
 /**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * hrtimer_start_range_ns - (re)start an hrtimer
  * @timer:	the timer to be added
  * @tim:	expiry time
  * @delta_ns:	"slack" range for the timer
- * @mode:	expiry mode: absolute (HRTIMER_MODE_ABS) or
- *		relative (HRTIMER_MODE_REL)
+ * @mode:	timer mode: absolute (HRTIMER_MODE_ABS) or
+ *		relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ *		softirq based mode is considered for debug purpose only!
  */
 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 			    u64 delta_ns, const enum hrtimer_mode mode)
@@ -1117,12 +1148,9 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 
 	base = lock_hrtimer_base(timer, &flags);
 
-	if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) {
-		if (timer->base->index < HRTIMER_BASE_MONOTONIC_SOFT)
-			hrtimer_reprogram(timer);
-		else
-			hrtimer_reprogram_softirq(timer);
-	}
+	if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+		hrtimer_reprogram(timer, true);
+
 	unlock_hrtimer_base(timer, &flags);
 }
 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1220,7 +1248,7 @@ u64 hrtimer_get_next_event(void)
 	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
 	if (!__hrtimer_hres_active(cpu_base))
-		expires = __hrtimer_get_next_event(cpu_base);
+		expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
 
 	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
@@ -1230,45 +1258,44 @@ u64 hrtimer_get_next_event(void)
 
 static inline int hrtimer_clockid_to_base(clockid_t clock_id)
 {
-	if (likely(clock_id < MAX_CLOCKS_HRT)) {
+	if (likely(clock_id < MAX_CLOCKS)) {
 		int base = hrtimer_clock_to_base_table[clock_id];
 
 		if (likely(base != HRTIMER_MAX_CLOCK_BASES))
 			return base;
 	}
 	WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
-#ifdef CONFIG_PREEMPT_RT_FULL
-	return HRTIMER_BASE_MONOTONIC_SOFT;
-#else
 	return HRTIMER_BASE_MONOTONIC;
-#endif
 }
 
 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 			   enum hrtimer_mode mode)
 {
-	struct hrtimer_cpu_base *cpu_base;
+	bool softtimer;
 	int base;
+	struct hrtimer_cpu_base *cpu_base;
+
+	softtimer = !!(mode & HRTIMER_MODE_SOFT);
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (!softtimer && !(mode & HRTIMER_MODE_HARD))
+		softtimer = true;
+#endif
+	base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
 
 	memset(timer, 0, sizeof(struct hrtimer));
 
 	cpu_base = raw_cpu_ptr(&hrtimer_bases);
 
 	/*
-	 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
+	 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
 	 * clock modifications, so they needs to become CLOCK_MONOTONIC to
-	 * ensure POSIX compliance.
+	 * ensure Posix compliance.
 	 */
-	if (mode & HRTIMER_MODE_ABS) {
-		if (clock_id == CLOCK_REALTIME)
-			clock_id = CLOCK_MONOTONIC;
-		else if (clock_id == CLOCK_REALTIME_SOFT)
-			clock_id = CLOCK_MONOTONIC_SOFT;
-		else if (clock_id == CLOCK_REALTIME_HARD)
-			clock_id = CLOCK_MONOTONIC_HARD;
-	}
+	if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
+		clock_id = CLOCK_MONOTONIC;
 
-	base = hrtimer_clockid_to_base(clock_id);
+	base += hrtimer_clockid_to_base(clock_id);
+	timer->is_soft = softtimer;
 	timer->base = &cpu_base->clock_base[base];
 	timerqueue_init(&timer->node);
 }
@@ -1277,7 +1304,13 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  * hrtimer_init - initialize a timer to the given clock
  * @timer:	the timer to be initialized
  * @clock_id:	the clock to be used
- * @mode:	timer mode abs/rel
+ * @mode:       The modes which are relevant for intitialization:
+ *              HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
+ *              HRTIMER_MODE_REL_SOFT
+ *
+ *              The PINNED variants of the above can be handed in,
+ *              but the PINNED bit is ignored as pinning happens
+ *              when the hrtimer is started
  */
 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 		  enum hrtimer_mode mode)
@@ -1335,7 +1368,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 			  struct hrtimer_clock_base *base,
 			  struct hrtimer *timer, ktime_t *now,
-			  bool hardirq)
+			  unsigned long flags)
 {
 	enum hrtimer_restart (*fn)(struct hrtimer *);
 	int restart;
@@ -1370,19 +1403,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 	 * protected against migration to a different CPU even if the lock
 	 * is dropped.
 	 */
-	if (hardirq)
-		raw_spin_unlock(&cpu_base->lock);
-	else
-		raw_spin_unlock_irq(&cpu_base->lock);
-
+	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 	trace_hrtimer_expire_entry(timer, now);
 	restart = fn(timer);
 	trace_hrtimer_expire_exit(timer);
-
-	if (hardirq)
-		raw_spin_lock(&cpu_base->lock);
-	else
-		raw_spin_lock_irq(&cpu_base->lock);
+	raw_spin_lock_irq(&cpu_base->lock);
 
 	/*
 	 * Note: We clear the running state after enqueue_hrtimer and
@@ -1395,7 +1420,8 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 	 */
 	if (restart != HRTIMER_NORESTART &&
 	    !(timer->state & HRTIMER_STATE_ENQUEUED))
-		enqueue_hrtimer(timer, base);
+		enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS,
+				!HRTIMER_MODECHECK);
 
 	/*
 	 * Separate the ->running assignment from the ->state assignment.
@@ -1411,19 +1437,15 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 }
 
 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
-				 unsigned int active_mask)
+				 unsigned long flags, unsigned int active_mask)
 {
+	struct hrtimer_clock_base *base;
 	unsigned int active = cpu_base->active_bases & active_mask;
 
-	while (active) {
-		unsigned int id = __ffs(active);
-		struct hrtimer_clock_base *base;
+	for_each_active_base(base, cpu_base, active) {
 		struct timerqueue_node *node;
 		ktime_t basenow;
 
-		active &= ~(1U << id);
-		base = cpu_base->clock_base + id;
-
 		basenow = ktime_add(now, base->offset);
 
 		while ((node = timerqueue_getnext(&base->active))) {
@@ -1446,8 +1468,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
 			if (basenow < hrtimer_get_softexpires_tv64(timer))
 				break;
 
-			__run_hrtimer(cpu_base, base, timer, &basenow,
-				      active_mask == HRTIMER_ACTIVE_HARD);
+			__run_hrtimer(cpu_base, base, timer, &basenow, flags);
 		}
 	}
 }
@@ -1455,17 +1476,18 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
 static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
 {
 	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+	unsigned long flags;
 	ktime_t now;
 
-	raw_spin_lock_irq(&cpu_base->lock);
+	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
 	now = hrtimer_update_base(cpu_base);
-	__hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_SOFT);
+	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
 
 	cpu_base->softirq_activated = 0;
 	hrtimer_update_softirq_timer(cpu_base, true);
 
-	raw_spin_unlock_irq(&cpu_base->lock);
+	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 	wake_up_timer_waiters(cpu_base);
 }
 
@@ -1479,13 +1501,14 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 {
 	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
 	ktime_t expires_next, now, entry_time, delta;
+	unsigned long flags;
 	int retries = 0;
 
 	BUG_ON(!cpu_base->hres_active);
 	cpu_base->nr_events++;
 	dev->next_event = KTIME_MAX;
 
-	raw_spin_lock(&cpu_base->lock);
+	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 	entry_time = now = hrtimer_update_base(cpu_base);
 retry:
 	cpu_base->in_hrtirq = 1;
@@ -1504,17 +1527,17 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 	}
 
-	__hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
+	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
 
-	/* Reevaluate the hard interrupt clock bases for the next expiry */
-	expires_next = __hrtimer_get_next_event(cpu_base);
+	/* Reevaluate the clock bases for the next expiry */
+	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
 	/*
 	 * Store the new expiry value so the migration code can verify
 	 * against it.
 	 */
 	cpu_base->expires_next = expires_next;
 	cpu_base->in_hrtirq = 0;
-	raw_spin_unlock(&cpu_base->lock);
+	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
 	/* Reprogramming necessary ? */
 	if (!tick_program_event(expires_next, 0)) {
@@ -1535,7 +1558,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 	 * Acquire base lock for updating the offsets and retrieving
 	 * the current time.
 	 */
-	raw_spin_lock(&cpu_base->lock);
+	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 	now = hrtimer_update_base(cpu_base);
 	cpu_base->nr_retries++;
 	if (++retries < 3)
@@ -1548,7 +1571,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 	 */
 	cpu_base->nr_hangs++;
 	cpu_base->hang_detected = 1;
-	raw_spin_unlock(&cpu_base->lock);
+	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
 	delta = ktime_sub(now, entry_time);
 	if ((unsigned int)delta > cpu_base->max_hang_time)
 		cpu_base->max_hang_time = (unsigned int) delta;
@@ -1590,6 +1614,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
 void hrtimer_run_queues(void)
 {
 	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+	unsigned long flags;
 	ktime_t now;
 
 	if (__hrtimer_hres_active(cpu_base))
@@ -1607,7 +1632,7 @@ void hrtimer_run_queues(void)
 		return;
 	}
 
-	raw_spin_lock(&cpu_base->lock);
+	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 	now = hrtimer_update_base(cpu_base);
 
 	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
@@ -1616,8 +1641,8 @@ void hrtimer_run_queues(void)
 		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 	}
 
-	__hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
-	raw_spin_unlock(&cpu_base->lock);
+	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 }
 
 /*
@@ -1655,11 +1680,11 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
 				   struct task_struct *task)
 {
 #ifdef CONFIG_PREEMPT_RT_FULL
-	if (!(clock_id & (HRTIMER_BASE_HARD_MASK | HRTIMER_BASE_SOFT_MASK))) {
+	if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) {
 		if (task_is_elevated(current) || system_state != SYSTEM_RUNNING)
-			clock_id |= HRTIMER_BASE_HARD_MASK;
+			mode |= HRTIMER_MODE_HARD;
 		else
-			clock_id |= HRTIMER_BASE_SOFT_MASK;
+			mode |= HRTIMER_MODE_SOFT;
 	}
 #endif
 	__hrtimer_init(&sl->timer, clock_id, mode);
@@ -1837,7 +1862,7 @@ void cpu_chill(void)
 	unsigned int freeze_flag = current->flags & PF_NOFREEZE;
 
 	current->flags |= PF_NOFREEZE;
-	__hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD,
+	__hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL_HARD, CLOCK_MONOTONIC,
 			    TASK_UNINTERRUPTIBLE);
 	if (!freeze_flag)
 		current->flags &= ~PF_NOFREEZE;
@@ -1899,7 +1924,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
 		 * sort out already expired timers and reprogram the
 		 * event device.
 		 */
-		enqueue_hrtimer(timer, new_base);
+		enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS,
+				!HRTIMER_MODECHECK);
 	}
 }
 
@@ -1911,7 +1937,6 @@ int hrtimers_dead_cpu(unsigned int scpu)
 	BUG_ON(cpu_online(scpu));
 	tick_cancel_sched_timer(scpu);
 
-	local_bh_disable();
 	local_irq_disable();
 	old_base = &per_cpu(hrtimer_bases, scpu);
 	new_base = this_cpu_ptr(&hrtimer_bases);
@@ -1939,7 +1964,6 @@ int hrtimers_dead_cpu(unsigned int scpu)
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
 	local_irq_enable();
-	local_bh_enable();
 	return 0;
 }
 
@@ -1947,12 +1971,6 @@ int hrtimers_dead_cpu(unsigned int scpu)
 
 void __init hrtimers_init(void)
 {
-	/*
-	 * It is necessary, that the soft base mask is a single
-	 * bit.
-	 */
-	BUILD_BUG_ON_NOT_POWER_OF_2(HRTIMER_BASE_SOFT_MASK);
-
 	hrtimers_prepare_cpu(smp_processor_id());
 	open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
 }
@@ -2008,7 +2026,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
  * schedule_hrtimeout_range - sleep until timeout
  * @expires:	timeout value (ktime_t)
  * @delta:	slack in expires timeout (ktime_t)
- * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode:	timer mode
  *
  * Make the current task sleep until the given expiry time has
  * elapsed. The routine will return immediately unless
@@ -2047,7 +2065,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
 /**
  * schedule_hrtimeout - sleep until timeout
  * @expires:	timeout value (ktime_t)
- * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode:	timer mode
  *
  * Make the current task sleep until the given expiry time has
  * elapsed. The routine will return immediately unless
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index e0a29ebde36d..6181e60aa9f1 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -106,7 +106,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 
 void tick_setup_hrtimer_broadcast(void)
 {
-	hrtimer_init(&bctimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
+	hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
 	bctimer.function = bc_handler;
 	clockevents_register_device(&ce_broadcast_hrtimer);
 }
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index be0ac01f2e12..aac76d9801fc 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -149,14 +149,14 @@ static inline void tick_nohz_init(void) { }
 
 #ifdef CONFIG_NO_HZ_COMMON
 extern unsigned long tick_nohz_active;
-#else
-#define tick_nohz_active (0)
-#endif
+extern void timers_update_nohz(void);
 
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void timers_update_migration(bool update_nohz);
+#ifdef CONFIG_SMP
+extern struct static_key_false timers_migration_enabled;
+#endif
 #else
-static inline void timers_update_migration(bool update_nohz) { }
+static inline void timers_update_nohz(void) { }
+#define tick_nohz_active (0)
 #endif
 
 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index abda4e925543..e23cccdb552a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1101,7 +1101,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
 	ts->nohz_mode = mode;
 	/* One update is enough */
 	if (!test_and_set_bit(0, &tick_nohz_active))
-		timers_update_migration(true);
+		timers_update_nohz();
 }
 
 /**
@@ -1217,7 +1217,7 @@ void tick_setup_sched_timer(void)
 	/*
 	 * Emulate tick processing via per-CPU hrtimers:
 	 */
-	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
+	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
 	ts->sched_timer.function = tick_sched_timer;
 
 	/* Get the next period (per-CPU) */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ff0b98769be4..dee7cb4c0cf3 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -205,8 +205,6 @@ struct timer_base {
 	unsigned long		clk;
 	unsigned long		next_expiry;
 	unsigned int		cpu;
-	bool			migration_enabled;
-	bool			nohz_active;
 	bool			is_idle;
 	bool			must_forward_clk;
 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
@@ -215,45 +213,79 @@ struct timer_base {
 
 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
+
+static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+static DEFINE_MUTEX(timer_keys_mutex);
+
+static struct swork_event timer_update_swork;
+
+#ifdef CONFIG_SMP
 unsigned int sysctl_timer_migration = 1;
 
-void timers_update_migration(bool update_nohz)
+DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
+
+static void timers_update_migration(void)
 {
 	bool on = sysctl_timer_migration && tick_nohz_active;
-	unsigned int cpu;
 
-	/* Avoid the loop, if nothing to update */
-	if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
-		return;
+	if (on)
+		static_branch_enable(&timers_migration_enabled);
+	else
+		static_branch_disable(&timers_migration_enabled);
+}
+#else
+static inline void timers_update_migration(void) { }
+#endif /* !CONFIG_SMP */
 
-	for_each_possible_cpu(cpu) {
-		per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
-		per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
-		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
-		if (!update_nohz)
-			continue;
-		per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
-		per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
-		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
-	}
+static void timer_update_keys(struct swork_event *event)
+{
+	mutex_lock(&timer_keys_mutex);
+	timers_update_migration();
+	static_branch_enable(&timers_nohz_active);
+	mutex_unlock(&timer_keys_mutex);
 }
 
+void timers_update_nohz(void)
+{
+	swork_queue(&timer_update_swork);
+}
+
+static __init int hrtimer_init_thread(void)
+{
+	WARN_ON(swork_get());
+	INIT_SWORK(&timer_update_swork, timer_update_keys);
+	return 0;
+}
+early_initcall(hrtimer_init_thread);
+
 int timer_migration_handler(struct ctl_table *table, int write,
 			    void __user *buffer, size_t *lenp,
 			    loff_t *ppos)
 {
-	static DEFINE_MUTEX(mutex);
 	int ret;
 
-	mutex_lock(&mutex);
+	mutex_lock(&timer_keys_mutex);
 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 	if (!ret && write)
-		timers_update_migration(false);
-	mutex_unlock(&mutex);
+		timers_update_migration();
+	mutex_unlock(&timer_keys_mutex);
 	return ret;
 }
-#endif
+
+static inline bool is_timers_nohz_active(void)
+{
+	return static_branch_unlikely(&timers_nohz_active);
+}
+
+#else
+
+static inline bool is_timers_nohz_active(void)
+{
+	return false;
+}
+
+#endif /* NO_HZ_COMMON */
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
 		bool force_up)
@@ -539,7 +571,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
 static void
 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
 {
-	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+	if (!is_timers_nohz_active())
 		return;
 
 	/*
@@ -830,7 +862,7 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
 	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
 	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
+	if (is_timers_nohz_active() && (tflags & TIMER_DEFERRABLE))
 		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
 	return base;
 }
@@ -843,7 +875,7 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
 	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
 	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
+	if (is_timers_nohz_active() && (tflags & TIMER_DEFERRABLE))
 		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
 	return base;
 }
@@ -853,21 +885,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
 	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
 }
 
-#ifdef CONFIG_NO_HZ_COMMON
 static inline struct timer_base *
 get_target_base(struct timer_base *base, unsigned tflags)
 {
-#ifdef CONFIG_SMP
-	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
-		return get_timer_this_cpu_base(tflags);
-	return get_timer_cpu_base(tflags, get_nohz_timer_target());
-#else
-	return get_timer_this_cpu_base(tflags);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+	if (static_branch_unlikely(&timers_migration_enabled) &&
+	    !(tflags & TIMER_PINNED))
+		return get_timer_cpu_base(tflags, get_nohz_timer_target());
 #endif
+	return get_timer_this_cpu_base(tflags);
 }
 
 static inline void forward_timer_base(struct timer_base *base)
 {
+#ifdef CONFIG_NO_HZ_COMMON
 	unsigned long jnow;
 
 	/*
@@ -894,16 +925,8 @@ static inline void forward_timer_base(struct timer_base *base)
 			return;
 		base->clk = base->next_expiry;
 	}
-}
-#else
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
-	return get_timer_this_cpu_base(tflags);
-}
-
-static inline void forward_timer_base(struct timer_base *base) { }
 #endif
+}
 
 
 /*
@@ -1728,7 +1751,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 	irq_work_tick_soft();
 
 	__run_timers(base);
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
+	if (is_timers_nohz_active())
 		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
@@ -1742,7 +1765,7 @@ void run_local_timers(void)
 	hrtimer_run_queues();
 	/* Raise the softirq only if required. */
 	if (time_before(jiffies, base->clk)) {
-		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
+		if (!is_timers_nohz_active())
 			return;
 		/* CPU is awake, so check the deferrable base. */
 		base++;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0ec77b4abccc..c83e621099a4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -383,7 +383,7 @@ static void watchdog_enable(unsigned int cpu)
 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
 
 	/* kick off the timer for the hardlockup detector */
-	hrtimer_init(hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 	hrtimer->function = watchdog_timer_fn;
 
 	/* Enable the perf event */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 112ba0830d72..9d0305a14777 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -400,7 +400,7 @@ static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
 static void bcm_tx_start_timer(struct bcm_op *op)
 {
 	if (bcm_tx_set_expiry(op, &op->timer))
-		hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS);
+		hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
 }
 
 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
@@ -497,7 +497,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
 		/* do not send the saved data - only start throttle timer */
 		hrtimer_start(&op->thrtimer,
 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
-			      HRTIMER_MODE_ABS);
+			      HRTIMER_MODE_ABS_SOFT);
 		return;
 	}
 
@@ -556,7 +556,7 @@ static void bcm_rx_starttimer(struct bcm_op *op)
 		return;
 
 	if (op->kt_ival1)
-		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
+		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
 }
 
 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
@@ -958,13 +958,13 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 		op->ifindex = ifindex;
 
 		/* initialize uninitialized (kzalloc) structure */
-		hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
-			     HRTIMER_MODE_REL);
+		hrtimer_init(&op->timer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL_SOFT);
 		op->timer.function = bcm_tx_timeout_handler;
 
 		/* currently unused in tx_ops */
-		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
-			     HRTIMER_MODE_REL);
+		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL_SOFT);
 
 		/* add this bcm_op to the list of the tx_ops */
 		list_add(&op->list, &bo->tx_ops);
@@ -1131,12 +1131,12 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 		op->rx_ifindex = ifindex;
 
 		/* initialize uninitialized (kzalloc) structure */
-		hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
-			     HRTIMER_MODE_REL);
+		hrtimer_init(&op->timer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL_SOFT);
 		op->timer.function = bcm_rx_timeout_handler;
 
-		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
-			     HRTIMER_MODE_REL);
+		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL_SOFT);
 		op->thrtimer.function = bcm_rx_thr_handler;
 
 		/* add this bcm_op to the list of the rx_ops */
@@ -1189,7 +1189,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
 		if ((op->flags & STARTTIMER) && op->kt_ival1)
 			hrtimer_start(&op->timer, op->kt_ival1,
-				      HRTIMER_MODE_REL);
+				      HRTIMER_MODE_REL_SOFT);
 	}
 
 	/* now we can register for can_ids, if we added a new bcm_op */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b20077e5aa76..0bb48735396e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -576,7 +576,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
 		INIT_HLIST_NODE(&x->bydst);
 		INIT_HLIST_NODE(&x->bysrc);
 		INIT_HLIST_NODE(&x->byspi);
-		hrtimer_init(&x->mtimer, CLOCK_BOOTTIME_SOFT, HRTIMER_MODE_ABS);
+		hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
 		x->mtimer.function = xfrm_timer_handler;
 		setup_timer(&x->rtimer, xfrm_replay_timer_handler,
 				(unsigned long)x);
@@ -1035,7 +1035,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 			x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
 			hrtimer_start(&x->mtimer,
 				      ktime_set(net->xfrm.sysctl_acq_expires, 0),
-				      HRTIMER_MODE_REL);
+				      HRTIMER_MODE_REL_SOFT);
 			net->xfrm.state_num++;
 			xfrm_hash_grow_check(net, x->bydst.next != NULL);
 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
@@ -1146,7 +1146,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
 		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
 	}
 
-	hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+	hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
 	if (x->replay_maxage)
 		mod_timer(&x->rtimer, jiffies + x->replay_maxage);
 
@@ -1252,7 +1252,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
 		xfrm_state_hold(x);
 		hrtimer_start(&x->mtimer,
 			      ktime_set(net->xfrm.sysctl_acq_expires, 0),
-			      HRTIMER_MODE_REL);
+			      HRTIMER_MODE_REL_SOFT);
 		list_add(&x->km.all, &net->xfrm.state_all);
 		hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
 		h = xfrm_src_hash(net, daddr, saddr, family);
@@ -1544,7 +1544,8 @@ int xfrm_state_update(struct xfrm_state *x)
 		memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
 		x1->km.dying = 0;
 
-		hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+		hrtimer_start(&x1->mtimer, ktime_set(1, 0),
+			      HRTIMER_MODE_REL_SOFT);
 		if (x1->curlft.use_time)
 			xfrm_state_check_expire(x1);
 
@@ -1568,7 +1569,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
 	if (x->curlft.bytes >= x->lft.hard_byte_limit ||
 	    x->curlft.packets >= x->lft.hard_packet_limit) {
 		x->km.state = XFRM_STATE_EXPIRED;
-		hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
+		hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
 		return -EINVAL;
 	}