Skip to content

Commit 6ba5571

Browse files
committed
powerpc/nmi: Add an API for sending "safe" NMIs
Currently the options we have for sending NMIs are not necessarily safe, that is they can potentially interrupt a CPU in a non-recoverable region of code, meaning the kernel must then panic(). But we'd like to use smp_send_nmi_ipi() to do cross-CPU calls in situations where we don't want to risk a panic(), because it doesn't have the requirement that interrupts must be enabled like smp_call_function(). So add an API for the caller to indicate that it wants to use the NMI infrastructure, but doesn't want to do anything "unsafe". Currently that is implemented by not actually calling cause_nmi_ipi(), instead falling back to an IPI. In future we can pass the safe parameter down to cause_nmi_ipi() and the individual backends can potentially take it into account before deciding what to do. Signed-off-by: Michael Ellerman <[email protected]> Reviewed-by: Nicholas Piggin <[email protected]>
1 parent 7b08729 commit 6ba5571

File tree

2 files changed

+16
-5
lines changed

2 files changed

+16
-5
lines changed

arch/powerpc/include/asm/smp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ struct smp_ops_t {
5858

5959
extern void smp_flush_nmi_ipi(u64 delay_us);
6060
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
61+
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
6162
extern void smp_send_debugger_break(void);
6263
extern void start_secondary_resume(void);
6364
extern void smp_generic_give_timebase(void);

arch/powerpc/kernel/smp.c

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -430,9 +430,9 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
430430
return ret;
431431
}
432432

433-
static void do_smp_send_nmi_ipi(int cpu)
433+
static void do_smp_send_nmi_ipi(int cpu, bool safe)
434434
{
435-
if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
435+
if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
436436
return;
437437

438438
if (cpu >= 0) {
@@ -472,7 +472,7 @@ void smp_flush_nmi_ipi(u64 delay_us)
472472
* - delay_us > 0 is the delay before giving up waiting for targets to
473473
* enter the handler, == 0 specifies indefinite delay.
474474
*/
475-
int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
475+
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
476476
{
477477
unsigned long flags;
478478
int me = raw_smp_processor_id();
@@ -505,7 +505,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
505505
nmi_ipi_busy_count++;
506506
nmi_ipi_unlock();
507507

508-
do_smp_send_nmi_ipi(cpu);
508+
do_smp_send_nmi_ipi(cpu, safe);
509509

510510
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
511511
udelay(1);
@@ -527,6 +527,16 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
527527

528528
return ret;
529529
}
530+
531+
int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
532+
{
533+
return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
534+
}
535+
536+
int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
537+
{
538+
return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
539+
}
530540
#endif /* CONFIG_NMI_IPI */
531541

532542
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -570,7 +580,7 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
570580
* entire NMI dance and waiting for
571581
* cpus to clear pending mask, etc.
572582
*/
573-
do_smp_send_nmi_ipi(cpu);
583+
do_smp_send_nmi_ipi(cpu, false);
574584
}
575585
}
576586
}

0 commit comments

Comments
 (0)