From acb9e30dd1fb2c1c3694329f3261a5eacfe59a11 Mon Sep 17 00:00:00 2001
From: Jiri Slaby <jslaby@suse.cz>
Date: Mar 25 2025 07:16:15 +0000
Subject: lockdep: Don't disable interrupts on RT in

disable_irq_nosync_lockdep.*() (git-fixes).

---

diff --git a/patches.suse/lockdep-Don-t-disable-interrupts-on-RT-in-disable_ir.patch b/patches.suse/lockdep-Don-t-disable-interrupts-on-RT-in-disable_ir.patch
new file mode 100644
index 0000000..d70885c
--- /dev/null
+++ b/patches.suse/lockdep-Don-t-disable-interrupts-on-RT-in-disable_ir.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 12 Feb 2025 11:36:18 +0100
+Subject: lockdep: Don't disable interrupts on RT in
+ disable_irq_nosync_lockdep.*()
+References: git-fixes
+Git-commit: 87886b32d669abc11c7be95ef44099215e4f5788
+Patch-mainline: v6.15-rc1
+
+disable_irq_nosync_lockdep() disables interrupts with lockdep enabled to
+avoid false positive reports by lockdep that a certain lock has not been
+acquired with disabled interrupts. The user of this macros expects that
+a lock can be acquried without disabling interrupts because the IRQ line
+triggering the interrupt is disabled.
+
+This triggers a warning on PREEMPT_RT because after
+disable_irq_nosync_lockdep.*() the following spinlock_t now is acquired
+with disabled interrupts.
+
+On PREEMPT_RT there is no difference between spin_lock() and
+spin_lock_irq() so avoiding disabling interrupts in this case works for
+the two remaining callers as of today.
+
+Don't disable interrupts on PREEMPT_RT in disable_irq_nosync_lockdep.*().
+
+Closes: https://lore.kernel.org/760e34f9-6034-40e0-82a5-ee9becd24438@roeck-us.net
+Fixes: e8106b941ceab ("[PATCH] lockdep: core, add enable/disable_irq_irqsave/irqrestore() APIs")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Suggested-by: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/20250212103619.2560503-2-bigeasy@linutronix.de
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/interrupt.h |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -448,7 +448,7 @@ irq_calc_affinity_vectors(unsigned int m
+ static inline void disable_irq_nosync_lockdep(unsigned int irq)
+ {
+ 	disable_irq_nosync(irq);
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_disable();
+ #endif
+ }
+@@ -456,7 +456,7 @@ static inline void disable_irq_nosync_lo
+ static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
+ {
+ 	disable_irq_nosync(irq);
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_save(*flags);
+ #endif
+ }
+@@ -471,7 +471,7 @@ static inline void disable_irq_lockdep(u
+ 
+ static inline void enable_irq_lockdep(unsigned int irq)
+ {
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_enable();
+ #endif
+ 	enable_irq(irq);
+@@ -479,7 +479,7 @@ static inline void enable_irq_lockdep(un
+ 
+ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
+ {
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_restore(*flags);
+ #endif
+ 	enable_irq(irq);
diff --git a/series.conf b/series.conf
index 3590b18..eaee9bf 100644
--- a/series.conf
+++ b/series.conf
@@ -4550,6 +4550,7 @@
 	patches.suse/spi-Fix-reference-count-leak-in-slave_show.patch
 	patches.suse/keys-Fix-UAF-in-key_put.patch
 	patches.suse/i2c-amd-mp2-drop-free_irq-of-devm_request_irq-alloca.patch
+	patches.suse/lockdep-Don-t-disable-interrupts-on-RT-in-disable_ir.patch
 
 	# jejb/scsi for-next
 	patches.suse/scsi-lpfc-Reduce-log-message-generation-during-ELS-r.patch