Re: [PATCH RT 0/1] Linux v4.19.312-rt134-rc2

From: Daniel Wagner
Date: Mon May 06 2024 - 07:00:55 EST


Hi Sebastian,

On 06.05.24 12:46, Daniel Wagner wrote:
Dear RT Folks,

This is the RT stable review cycle of patch 4.19.312-rt134-rc2.

Please scream at me if I messed something up. Please test the patches
too.

My announce script is not attaching any conflict resolve diffs
(eventually, I'll fix this). Could have a look if I got the
kernel/time/timer.c upddate right? This was caused by stable
including the 030dcdd197d7 ("timers: Prepare support for
PREEMPT_RT") patch.

commit 2c1a32c5e05fd75885186793bc0d26e0a65b473d
Merge: 4790d0210f19 3d86e7f5bdf3
Author: Daniel Wagner <wagi@xxxxxxxxx>
Date: Wed Apr 17 16:31:21 2024 +0200

Merge tag 'v4.19.312' into v4.19-rt-next

This is the 4.19.312 stable release

Conflicts:
include/linux/timer.h
kernel/time/timer.c

diff --git a/include/linux/timer.h b/include/linux/timer.h
remerge CONFLICT (content): Merge conflict in include/linux/timer.h
index b70c5168a346..aef40cac2add 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -173,13 +173,6 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
extern int timer_delete_sync(struct timer_list *timer);

-<<<<<<< 4790d0210f19 (Linux 4.19.307-rt133)
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- extern int del_timer_sync(struct timer_list *timer);
-#else
-# define del_timer_sync(t) del_timer(t)
-#endif
-=======
/**
* del_timer_sync - Delete a pending timer and wait for a running callback
* @timer: The timer to be deleted
@@ -192,7 +185,6 @@ static inline int del_timer_sync(struct timer_list *timer)
{
return timer_delete_sync(timer);
}
->>>>>>> 3d86e7f5bdf3 (Linux 4.19.312)

#define del_singleshot_timer_sync(t) del_timer_sync(t)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
remerge CONFLICT (content): Merge conflict in kernel/time/timer.c
index 911191916df1..bc5ce0cf9488 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1250,6 +1250,25 @@ int del_timer(struct timer_list *timer)
}
EXPORT_SYMBOL(del_timer);

+static int __try_to_del_timer_sync(struct timer_list *timer,
+ struct timer_base **basep)
+{
+ struct timer_base *base;
+ unsigned long flags;
+ int ret = -1;
+
+ debug_assert_init(timer);
+
+ *basep = base = lock_timer_base(timer, &flags);
+
+ if (base->running_timer != timer)
+ ret = detach_if_pending(timer, base, true);
+
+ raw_spin_unlock_irqrestore(&base->lock, flags);
+
+ return ret;
+}
+
/**
* try_to_del_timer_sync - Try to deactivate a timer
* @timer: Timer to deactivate
@@ -1269,19 +1288,8 @@ EXPORT_SYMBOL(del_timer);
int try_to_del_timer_sync(struct timer_list *timer)
{
struct timer_base *base;
- unsigned long flags;
- int ret = -1;
-
- debug_assert_init(timer);
-
- base = lock_timer_base(timer, &flags);
-
- if (base->running_timer != timer)
- ret = detach_if_pending(timer, base, true);
-
- raw_spin_unlock_irqrestore(&base->lock, flags);

- return ret;
+ return __try_to_del_timer_sync(timer, &base);
}
EXPORT_SYMBOL(try_to_del_timer_sync);

@@ -1303,7 +1311,6 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)

/*
* The counterpart to del_timer_wait_running().
-<<<<<<< 4790d0210f19 (Linux 4.19.307-rt133)
*
* If there is a waiter for base->expiry_lock, then it was waiting for the
* timer callback to finish. Drop expiry_lock and reaquire it. That allows
@@ -1359,64 +1366,35 @@ static inline void timer_sync_wait_running(struct timer_base *base) { }
static inline void del_timer_wait_running(struct timer_list *timer) { }
#endif

-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
-/**
- * del_timer_sync - deactivate a timer and wait for the handler to finish.
- * @timer: the timer to be deactivated
-=======
->>>>>>> 3d86e7f5bdf3 (Linux 4.19.312)
- *
- * If there is a waiter for base->expiry_lock, then it was waiting for the
- * timer callback to finish. Drop expiry_lock and reaquire it. That allows
- * the waiter to acquire the lock and make progress.
- */
-static void timer_sync_wait_running(struct timer_base *base)
+static int __del_timer_sync(struct timer_list *timer)
{
- if (atomic_read(&base->timer_waiters)) {
- spin_unlock(&base->expiry_lock);
- spin_lock(&base->expiry_lock);
- }
-}
+ struct timer_base *base;
+ int ret;

-/*
- * This function is called on PREEMPT_RT kernels when the fast path
- * deletion of a timer failed because the timer callback function was
- * running.
- *
- * This prevents priority inversion, if the softirq thread on a remote CPU
- * got preempted, and it prevents a life lock when the task which tries to
- * delete a timer preempted the softirq thread running the timer callback
- * function.
- */
-static void del_timer_wait_running(struct timer_list *timer)
-{
- u32 tf;
+ /*
+ * Must be able to sleep on PREEMPT_RT because of the slowpath in
+ * del_timer_wait_running().
+ */
+#if 0
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
+ lockdep_assert_preemption_enabled();
+#endif

- tf = READ_ONCE(timer->flags);
- if (!(tf & TIMER_MIGRATING)) {
- struct timer_base *base = get_timer_base(tf);
+ for (;;) {
+ ret = __try_to_del_timer_sync(timer, &base);
+ if (ret >= 0)
+ return ret;
+
+ if (READ_ONCE(timer->flags) & TIMER_IRQSAFE)
+ continue;

/*
- * Mark the base as contended and grab the expiry lock,
- * which is held by the softirq across the timer
- * callback. Drop the lock immediately so the softirq can
- * expire the next timer. In theory the timer could already
- * be running again, but that's more than unlikely and just
- * causes another wait loop.
+ * When accessing the lock, timers of base are no longer expired
+ * and so timer is no longer running.
*/
- atomic_inc(&base->timer_waiters);
- spin_lock_bh(&base->expiry_lock);
- atomic_dec(&base->timer_waiters);
- spin_unlock_bh(&base->expiry_lock);
+ timer_sync_wait_running(base);
}
}
-#else
-static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
-static inline void timer_base_lock_expiry(struct timer_base *base) { }
-static inline void timer_base_unlock_expiry(struct timer_base *base) { }
-static inline void timer_sync_wait_running(struct timer_base *base) { }
-static inline void del_timer_wait_running(struct timer_list *timer) { }
-#endif

/**
* timer_delete_sync - Deactivate a timer and wait for the handler to finish.
@@ -1459,8 +1437,6 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
*/
int timer_delete_sync(struct timer_list *timer)
{
- int ret;
-
#ifdef CONFIG_LOCKDEP
unsigned long flags;

@@ -1478,43 +1454,14 @@ int timer_delete_sync(struct timer_list *timer)
* could lead to deadlock.
*/
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
-<<<<<<< 4790d0210f19 (Linux 4.19.307-rt133)
-
/*
* Must be able to sleep on PREEMPT_RT because of the slowpath in
- * del_timer_wait_running().
+ * __del_timer_sync().
*/
-#if 0
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
- lockdep_assert_preemption_enabled();
-#endif
-
- do {
- ret = try_to_del_timer_sync(timer);
+ might_sleep();

- if (unlikely(ret < 0)) {
- del_timer_wait_running(timer);
- cpu_relax();
- }
- } while (ret < 0);
-
- return ret;
-}
-EXPORT_SYMBOL(del_timer_sync);
-#endif
-=======
->>>>>>> 3d86e7f5bdf3 (Linux 4.19.312)
-
- do {
- ret = try_to_del_timer_sync(timer);
-
- if (unlikely(ret < 0)) {
- del_timer_wait_running(timer);
- cpu_relax();
- }
- } while (ret < 0);
-
- return ret;
+ return __del_timer_sync(timer);
}
EXPORT_SYMBOL(timer_delete_sync);

@@ -1585,14 +1532,11 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn, baseclk);
- base->running_timer = NULL;
raw_spin_lock(&base->lock);
base->running_timer = NULL;
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, baseclk);
- base->running_timer = NULL;
- timer_sync_wait_running(base);
raw_spin_lock_irq(&base->lock);
base->running_timer = NULL;
timer_sync_wait_running(base);