[PATCH 4/5] kernel/watchdog_hld: simplify the detecting of hld watchdog

From: Pingfan Liu
Date: Tue Sep 14 2021 - 23:51:48 EST


By utilizing the new interface model, the stages of probe and enable can
be merged, which saves the pair of perf_event alloc and free.

Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: Petr Mladek <pmladek@xxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Wang Qing <wangqing@xxxxxxxx>
Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx>
Cc: Santosh Sivaraj <santosh@xxxxxxxxxx>
To: linux-kernel@xxxxxxxxxxxxxxx
---
include/linux/nmi.h | 8 ++------
kernel/watchdog.c | 16 ++++++++++++++--
kernel/watchdog_hld.c | 21 +++------------------
3 files changed, 19 insertions(+), 26 deletions(-)

diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 70665fa6e0a9..e41cdf3edba7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -101,20 +101,16 @@ extern void arch_touch_nmi_watchdog(void);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
extern void hardlockup_detector_perf_disable(void);
-extern void hardlockup_detector_perf_enable(void);
+extern int hardlockup_detector_perf_enable(void);
extern void hardlockup_detector_perf_cleanup(void);
-extern int hardlockup_detector_perf_init(void);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
static inline void hardlockup_detector_perf_disable(void) { }
-static inline void hardlockup_detector_perf_enable(void) { }
+static inline int hardlockup_detector_perf_enable(void) { return -ENODEV; }
static inline void hardlockup_detector_perf_cleanup(void) { }
# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
-static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
static inline void arch_touch_nmi_watchdog(void) {}
-# else
-static inline int hardlockup_detector_perf_init(void) { return 0; }
# endif
#endif

diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 4ab71943d65f..3f5efbd5961c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -99,7 +99,17 @@ static watchdog_nmi_status_reporter status_reporter;
*/
void __weak watchdog_nmi_enable(unsigned int cpu)
{
- hardlockup_detector_perf_enable();
+ struct watchdog_nmi_status data;
+ int ret;
+
+ ret = hardlockup_detector_perf_enable();
+ /* No concurrent risk because BP executes this before smp_init() */
+ if (watchdog_enabled & NMI_WATCHDOG_UNDETERMINED
+ && status_reporter) {
+ data.cpu = cpu;
+ data.status = ret;
+ (*status_reporter)(&data);
+ }
}

void __weak watchdog_nmi_disable(unsigned int cpu)
@@ -130,7 +140,9 @@ static void watchdog_nmi_report_capability(struct watchdog_nmi_status *data)
*/
int __weak __init watchdog_nmi_probe(watchdog_nmi_status_reporter notifier)
{
- return hardlockup_detector_perf_init();
+ status_reporter = notifier;
+
+ return -EBUSY;
}

/**
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 6876e796dbf5..2894778fbc6d 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -190,16 +190,17 @@ static int hardlockup_detector_event_create(void)
/**
* hardlockup_detector_perf_enable - Enable the local event
*/
-void hardlockup_detector_perf_enable(void)
+int hardlockup_detector_perf_enable(void)
{
if (hardlockup_detector_event_create())
- return;
+ return -ENODEV;

/* use original value for check */
if (!atomic_fetch_inc(&watchdog_cpus))
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");

perf_event_enable(this_cpu_read(watchdog_ev));
+ return 0;
}

/**
@@ -281,19 +282,3 @@ void __init hardlockup_detector_perf_restart(void)
perf_event_enable(event);
}
}
-
-/**
- * hardlockup_detector_perf_init - Probe whether NMI event is available at all
- */
-int __init hardlockup_detector_perf_init(void)
-{
- int ret = hardlockup_detector_event_create();
-
- if (ret) {
- pr_info("Perf NMI watchdog permanently disabled\n");
- } else {
- perf_event_release_kernel(this_cpu_read(watchdog_ev));
- this_cpu_write(watchdog_ev, NULL);
- }
- return ret;
-}
--
2.31.1