powerpc/watchdog: Fix missed watchdog reset due to memory ordering race
[ Upstream commit 5dad4ba68a2483fc80d70b9dc90bbe16e1f27263 ] It is possible for all CPUs to miss the pending cpumask becoming clear, and then nobody resetting it, which will cause the lockup detector to stop working. It will eventually expire, but watchdog_smp_panic will avoid doing anything if the pending mask is clear and it will never be reset. Order the cpumask clear vs the subsequent test to close this race. Add an extra check for an empty pending mask when the watchdog fires and finds its bit still clear, to try to catch any other possible races or bugs here and keep the watchdog working. The extra test in arch_touch_nmi_watchdog is required to prevent the new warning from firing off. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Debugged-by: Laurent Dufour <ldufour@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211110025056.2084347-2-npiggin@gmail.com Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5a3cda54ff
commit
aea9d36848
@ -132,6 +132,10 @@ static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
|
|||||||
{
|
{
|
||||||
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
||||||
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
||||||
|
/*
|
||||||
|
* See wd_smp_clear_cpu_pending()
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||||
wd_smp_last_reset_tb = tb;
|
wd_smp_last_reset_tb = tb;
|
||||||
cpumask_andnot(&wd_smp_cpus_pending,
|
cpumask_andnot(&wd_smp_cpus_pending,
|
||||||
@ -217,13 +221,44 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
|||||||
|
|
||||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
|
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
|
||||||
wd_smp_unlock(&flags);
|
wd_smp_unlock(&flags);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* The last CPU to clear pending should have reset the
|
||||||
|
* watchdog so we generally should not find it empty
|
||||||
|
* here if our CPU was clear. However it could happen
|
||||||
|
* due to a rare race with another CPU taking the
|
||||||
|
* last CPU out of the mask concurrently.
|
||||||
|
*
|
||||||
|
* We can't add a warning for it. But just in case
|
||||||
|
* there is a problem with the watchdog that is causing
|
||||||
|
* the mask to not be reset, try to kick it along here.
|
||||||
|
*/
|
||||||
|
if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
|
||||||
|
goto none_pending;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Order the store to clear pending with the load(s) to check all
|
||||||
|
* words in the pending mask to check they are all empty. This orders
|
||||||
|
* with the same barrier on another CPU. This prevents two CPUs
|
||||||
|
* clearing the last 2 pending bits, but neither seeing the other's
|
||||||
|
* store when checking if the mask is empty, and missing an empty
|
||||||
|
* mask, which ends with a false positive.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
none_pending:
|
||||||
|
/*
|
||||||
|
* Double check under lock because more than one CPU could see
|
||||||
|
* a clear mask with the lockless check after clearing their
|
||||||
|
* pending bits.
|
||||||
|
*/
|
||||||
wd_smp_lock(&flags);
|
wd_smp_lock(&flags);
|
||||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||||
wd_smp_last_reset_tb = tb;
|
wd_smp_last_reset_tb = tb;
|
||||||
@ -314,8 +349,12 @@ void arch_touch_nmi_watchdog(void)
|
|||||||
{
|
{
|
||||||
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
u64 tb = get_tb();
|
u64 tb;
|
||||||
|
|
||||||
|
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||||
|
return;
|
||||||
|
|
||||||
|
tb = get_tb();
|
||||||
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
|
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
|
||||||
per_cpu(wd_timer_tb, cpu) = tb;
|
per_cpu(wd_timer_tb, cpu) = tb;
|
||||||
wd_smp_clear_cpu_pending(cpu, tb);
|
wd_smp_clear_cpu_pending(cpu, tb);
|
||||||
|
Loading…
Reference in New Issue
Block a user