From fddf9055a60dfcc97bda5ef03c8fa4108ed555c5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Aug 2020 09:13:30 +0200 Subject: [PATCH 01/12] lockdep: Use raw_cpu_*() for per-cpu variables Sven reported that commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") caused trouble on s390 because their this_cpu_*() primitives disable preemption which then lands back tracing. On the one hand, per-cpu ops should use preempt_*able_notrace() and raw_local_irq_*(), on the other hand, we can trivialy use raw_cpu_*() ops for this. Fixes: a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") Reported-by: Sven Schnelle Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200821085348.192346882@infradead.org --- include/linux/irqflags.h | 6 +++--- include/linux/lockdep.h | 18 +++++++++++++----- kernel/locking/lockdep.c | 4 ++-- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index bd5c55755447c5..d7e50a215ea98a 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -53,13 +53,13 @@ DECLARE_PER_CPU(int, hardirq_context); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) +# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ - if (this_cpu_inc_return(hardirq_context) == 1) \ + if (__this_cpu_inc_return(hardirq_context) == 1)\ current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ @@ -68,7 +68,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - this_cpu_dec(hardirq_context); \ + __this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d1845bd5..6a584b3e5c74f5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -535,19 +535,27 @@ do { \ DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ + #define lockdep_assert_irqs_enabled() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_irqs_disabled() \ do { \ - WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_in_irq() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ } while (0) #define lockdep_assert_preemption_enabled() \ @@ -555,7 +563,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + !raw_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -563,7 +571,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + raw_cpu_read(hardirqs_enabled))); \ } while (0) #else diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2fad21d345b0e4..c872e95e6e4d14 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) skip_checks: /* we'll do an OFF -> ON transition: */ - this_cpu_write(hardirqs_enabled, 1); + __this_cpu_write(hardirqs_enabled, 1); trace->hardirq_enable_ip = ip; trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); @@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) /* * We have done an ON -> OFF transition: */ - this_cpu_write(hardirqs_enabled, 0); + __this_cpu_write(hardirqs_enabled, 0); trace->hardirq_disable_ip = ip; trace->hardirq_disable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_off_events); From 49d9c5936314e44d314c605c39cce0fd947f9c3a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Aug 2020 16:47:24 +0200 Subject: [PATCH 02/12] cpuidle: Fixup IRQ state Match the pattern elsewhere in this file. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.251340558@infradead.org --- drivers/cpuidle/cpuidle.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 87197319ab0694..2fe4f3cdf54d70 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -153,7 +153,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side From 1098582a0f6c4e8fd28da0a6305f9233d02c9c1d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Aug 2020 20:50:19 +0200 Subject: [PATCH 03/12] sched,idle,rcu: Push rcu_idle deeper into the idle path Lots of things take locks, due to a wee bug, rcu_lockdep didn't notice that the locking tracepoints were using RCU. Push rcu_idle_{enter,exit}() as deep as possible into the idle paths, this also resolves a lot of _rcuidle()/RCU_NONIDLE() usage. Specifically, sched_clock_idle_wakeup_event() will use ktime which will use seqlocks which will tickle lockdep, and stop_critical_timings() uses lock. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.310943801@infradead.org --- drivers/cpuidle/cpuidle.c | 12 ++++++++---- kernel/sched/idle.c | 22 ++++++++-------------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 2fe4f3cdf54d70..9bcda4153d3bb5 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -145,13 +145,14 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * executing it contains RCU usage regarded as invalid in the idle * context, so tell RCU about that. */ - RCU_NONIDLE(tick_freeze()); + tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ stop_critical_timings(); + rcu_idle_enter(); drv->states[index].enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); @@ -160,7 +161,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ - RCU_NONIDLE(tick_unfreeze()); + rcu_idle_exit(); + tick_unfreeze(); start_critical_timings(); time_end = ns_to_ktime(local_clock()); @@ -229,16 +231,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, /* Take note of the planned idle state. */ sched_idle_set_state(target_state); - trace_cpu_idle_rcuidle(index, dev->cpu); + trace_cpu_idle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); stop_critical_timings(); + rcu_idle_enter(); entered_state = target_state->enter(dev, drv, index); + rcu_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); time_end = ns_to_ktime(local_clock()); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ sched_idle_set_state(NULL); diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6bf34986f45ce9..ea3a0989dc4cf7 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup); static noinline int __cpuidle cpu_idle_poll(void) { + trace_cpu_idle(0, smp_processor_id()); + stop_critical_timings(); rcu_idle_enter(); - trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); - stop_critical_timings(); while (!tif_need_resched() && - (cpu_idle_force_poll || tick_check_broadcast_expired())) + (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); - start_critical_timings(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); + rcu_idle_exit(); + start_critical_timings(); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); return 1; } @@ -91,7 +92,9 @@ void __cpuidle default_idle_call(void) local_irq_enable(); } else { stop_critical_timings(); + rcu_idle_enter(); arch_cpu_idle(); + rcu_idle_exit(); start_critical_timings(); } } @@ -158,7 +161,6 @@ static void cpuidle_idle_call(void) if (cpuidle_not_available(drv, dev)) { tick_nohz_idle_stop_tick(); - rcu_idle_enter(); default_idle_call(); goto exit_idle; @@ -178,21 +180,17 @@ static void cpuidle_idle_call(void) u64 max_latency_ns; if (idle_should_enter_s2idle()) { - rcu_idle_enter(); entered_state = call_cpuidle_s2idle(drv, dev); if (entered_state > 0) goto exit_idle; - rcu_idle_exit(); - max_latency_ns = U64_MAX; } else { max_latency_ns = dev->forced_idle_latency_limit_ns; } tick_nohz_idle_stop_tick(); - rcu_idle_enter(); next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); call_cpuidle(drv, dev, next_state); @@ -209,8 +207,6 @@ static void cpuidle_idle_call(void) else tick_nohz_idle_retain_tick(); - rcu_idle_enter(); - entered_state = call_cpuidle(drv, dev, next_state); /* * Give the governor an opportunity to reflect on the outcome @@ -226,8 +222,6 @@ static void cpuidle_idle_call(void) */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); - - rcu_idle_exit(); } /* From bf9282dc26e7fe2a0736edc568762f0f05d12416 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 12:22:17 +0200 Subject: [PATCH 04/12] cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic This allows moving the leave_mm() call into generic code before rcu_idle_enter(). Gets rid of more trace_*_rcuidle() users. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.369441600@infradead.org --- arch/x86/include/asm/mmu.h | 1 + arch/x86/mm/tlb.c | 13 ++----------- drivers/cpuidle/cpuidle.c | 4 ++++ drivers/idle/intel_idle.c | 16 ---------------- include/linux/cpuidle.h | 13 +++++++------ include/linux/mmu_context.h | 5 +++++ 6 files changed, 19 insertions(+), 33 deletions(-) diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 0a301ad0b02f00..9257667d13c5e3 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -59,5 +59,6 @@ typedef struct { } void leave_mm(int cpu); +#define leave_mm leave_mm #endif /* _ASM_X86_MMU_H */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1a3569b43aa5bd..0951b47e64c10b 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); load_new_mm_cr3(next->pgd, new_asid, true); - /* - * NB: This gets called via leave_mm() in the idle path - * where RCU functions differently. Tracing normally - * uses RCU, so we need to use the _rcuidle variant. - * - * (There is no good reason for this. The idle code should - * be rearranged to call this before rcu_idle_enter().) - */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ load_new_mm_cr3(next->pgd, new_asid, false); - /* See above wrt _rcuidle. */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } /* Make sure we write CR3 before loaded_mm. */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 9bcda4153d3bb5..04becd70cc41f5 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -228,6 +229,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, broadcast = false; } + if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED) + leave_mm(dev->cpu); + /* Take note of the planned idle state. */ sched_idle_set_state(target_state); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 8e0fb1a5bdbd1e..9a810e4a794603 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) -/* - * Set this flag for states where the HW flushes the TLB for us - * and so we don't need cross-calls to keep it consistent. - * If this flag is set, SW flushes the TLB, so even if the - * HW doesn't do the flushing, this flag is safe to use. - */ -#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16) - /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ bool tick; - int cpu = smp_processor_id(); - - /* - * leave_mm() to avoid costly and often unnecessary wakeups - * for flushing the user TLB's associated with the active mm. - */ - if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) - leave_mm(cpu); if (!static_cpu_has(X86_FEATURE_ARAT)) { /* diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b65909ae4e2011..75895e6363b89a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -75,12 +75,13 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index c51a84132d7c08..03dee12d2b61ce 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -3,10 +3,15 @@ #define _LINUX_MMU_CONTEXT_H #include +#include /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif +#ifndef leave_mm +static inline void leave_mm(int cpu) { } +#endif + #endif From 9864f5b5943ab0f1f835f21dc3f9f068d06f5b52 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 12:27:10 +0200 Subject: [PATCH 05/12] cpuidle: Move trace_cpu_idle() into generic code Remove trace_cpu_idle() from the arch_cpu_idle() implementations and put it in the generic code, right before disabling RCU. Gets rid of more trace_*_rcuidle() users. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.428433395@infradead.org --- arch/arm/mach-omap2/pm34xx.c | 4 ---- arch/arm64/kernel/process.c | 2 -- arch/s390/kernel/idle.c | 3 +-- arch/x86/kernel/process.c | 4 ---- kernel/sched/idle.c | 3 +++ 5 files changed, 4 insertions(+), 12 deletions(-) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 6df395fff971b0..f5dfddf492e210 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -298,11 +298,7 @@ static void omap3_pm_idle(void) if (omap_irq_pending()) return; - trace_cpu_idle_rcuidle(1, smp_processor_id()); - omap_sram_idle(); - - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_SUSPEND diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b63ce4c54cfe9c..f1804496b93508 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -123,10 +123,8 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 88bb42ca500847..c73f50649e7e41 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -33,14 +33,13 @@ void enabled_wait(void) PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; clear_cpu_flag(CIF_NOHZ_DELAY); - trace_cpu_idle_rcuidle(1, smp_processor_id()); local_irq_save(flags); /* Call the assembler magic in entry.S */ psw_idle(idle, psw_mask); local_irq_restore(flags); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); /* Account time spent with enabled wait psw loaded as idle time. */ + /* XXX seqcount has tracepoints that require RCU */ write_seqcount_begin(&idle->seqcount); idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 994d8393f2f7b0..13ce616cc7afb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -684,9 +684,7 @@ void arch_cpu_idle(void) */ void __cpuidle default_idle(void) { - trace_cpu_idle_rcuidle(1, smp_processor_id()); safe_halt(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) EXPORT_SYMBOL(default_idle); @@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) static __cpuidle void mwait_idle(void) { if (!current_set_polling_and_test()) { - trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { mb(); /* quirk */ clflush((void *)¤t_thread_info()->flags); @@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index ea3a0989dc4cf7..f324dc36fc43de 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -91,11 +91,14 @@ void __cpuidle default_idle_call(void) if (current_clr_polling_and_test()) { local_irq_enable(); } else { + + trace_cpu_idle(1, smp_processor_id()); stop_critical_timings(); rcu_idle_enter(); arch_cpu_idle(); rcu_idle_exit(); start_critical_timings(); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } } From 7da93f379330f2be1122ca7f54ab1eb44ef9aa59 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 19:28:07 +0200 Subject: [PATCH 06/12] x86/entry: Remove unused THUNKs Unused remnants Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.487040689@infradead.org --- arch/x86/entry/thunk_32.S | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index 3a07ce3ec70baf..f1f96d4d8cd607 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name) SYM_CODE_END(\name) .endm -#ifdef CONFIG_TRACE_IRQFLAGS - THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1 - THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 -#endif - #ifdef CONFIG_PREEMPTION THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace From 00b0ed2d4997af6d0a93edef820386951fd66d94 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 19:28:06 +0200 Subject: [PATCH 07/12] locking/lockdep: Cleanup Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.546087214@infradead.org --- include/linux/irqflags.h | 54 ++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index d7e50a215ea98a..00d553d7791133 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -49,10 +49,11 @@ struct irqtrace_events { DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); - extern void trace_hardirqs_on_prepare(void); - extern void trace_hardirqs_off_finish(void); - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); +extern void trace_hardirqs_on_prepare(void); +extern void trace_hardirqs_off_finish(void); +extern void trace_hardirqs_on(void); +extern void trace_hardirqs_off(void); + # define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) @@ -120,17 +121,17 @@ do { \ #else # define trace_hardirqs_on_prepare() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0) -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define lockdep_hardirq_context() 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled() 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false # define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) @@ -181,17 +182,25 @@ do { \ * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS -#define local_irq_enable() \ - do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) -#define local_irq_disable() \ - do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) + +#define local_irq_enable() \ + do { \ + trace_hardirqs_on(); \ + raw_local_irq_enable(); \ + } while (0) + +#define local_irq_disable() \ + do { \ + raw_local_irq_disable(); \ + trace_hardirqs_off(); \ + } while (0) + #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ trace_hardirqs_off(); \ } while (0) - #define local_irq_restore(flags) \ do { \ if (raw_irqs_disabled_flags(flags)) { \ @@ -214,10 +223,7 @@ do { \ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) \ - do { \ - raw_local_irq_save(flags); \ - } while (0) +#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) From 36206b588bc815e5f64e8da72d7ab79e00b76281 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Aug 2020 09:27:52 +0200 Subject: [PATCH 08/12] nds32: Implement arch_irqs_disabled() Cc: Nick Hu Cc: Greentime Hu Cc: Vincent Chen Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.604899379@infradead.org --- arch/nds32/include/asm/irqflags.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h index fb45ec46bb1b2e..51ef800bb3018b 100644 --- a/arch/nds32/include/asm/irqflags.h +++ b/arch/nds32/include/asm/irqflags.h @@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) { return !flags; } + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} From 021c109330ebc1f54b546c63a078ea3c31356ecb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 Aug 2020 10:40:49 +0200 Subject: [PATCH 09/12] arm64: Implement arch_irqs_disabled() Cc: Catalin Marinas Cc: Will Deacon Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Link: https://lkml.kernel.org/r/20200821085348.664425120@infradead.org --- arch/arm64/include/asm/irqflags.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa4b6521ef1441..ff328e5bbb7572 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return res; } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + static inline unsigned long arch_local_irq_save(void) { unsigned long flags; From 99dc56feb7932020502d40107a712fa302b32082 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 22 Aug 2020 18:04:15 +0200 Subject: [PATCH 10/12] mips: Implement arch_irqs_disabled() Cc: Thomas Bogendoerfer Cc: Paul Burton Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200826101653.GE1362448@hirez.programming.kicks-ass.net --- arch/mips/include/asm/irqflags.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 47a8ffc0b413ed..f5b8300f45735c 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return !(flags & 1); } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + #endif /* #ifndef __ASSEMBLY__ */ /* From 044d0d6de9f50192f9697583504a382347ee95ca Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 23 Jul 2020 20:56:14 +1000 Subject: [PATCH 11/12] lockdep: Only trace IRQ edges Problem: raw_local_irq_save(); // software state on local_irq_save(); // software state off ... local_irq_restore(); // software state still off, because we don't enable IRQs raw_local_irq_restore(); // software state still off, *whoopsie* existing instances: - lock_acquire() raw_local_irq_save() __lock_acquire() arch_spin_lock(&graph_lock) pv_wait() := kvm_wait() (same or worse for Xen/HyperV) local_irq_save() - trace_clock_global() raw_local_irq_save() arch_spin_lock() pv_wait() := kvm_wait() local_irq_save() - apic_retrigger_irq() raw_local_irq_save() apic->send_IPI() := default_send_IPI_single_phys() local_irq_save() Possible solutions: A) make it work by enabling the tracing inside raw_*() B) make it work by keeping tracing disabled inside raw_*() C) call it broken and clean it up now Now, given that the only reason to use the raw_* variant is because you don't want tracing. Therefore A) seems like a weird option (although it can be done). C) is tempting, but OTOH it ends up converting a _lot_ of code to raw just because there is one raw user, this strips the validation/tracing off for all the other users. So we pick B) and declare any code that ends up doing: raw_local_irq_save() local_irq_save() lockdep_assert_irqs_disabled(); broken. AFAICT this problem has existed forever, the only reason it came up is because commit: 859d069ee1dd ("lockdep: Prepare for NMI IRQ state tracking") changed IRQ tracing vs lockdep recursion and the first instance is fairly common, the other cases hardly ever happen. Signed-off-by: Nicholas Piggin [rewrote changelog] Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200723105615.1268126-1-npiggin@gmail.com --- arch/powerpc/include/asm/hw_irq.h | 11 ++++------- include/linux/irqflags.h | 15 +++++++-------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 3a0db7b0b46efc..35060be090732d 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void) #define powerpc_local_irq_pmu_save(flags) \ do { \ raw_local_irq_pmu_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while(0) #define powerpc_local_irq_pmu_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_pmu_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_pmu_restore(flags); \ - } \ + raw_local_irq_pmu_restore(flags); \ } while(0) #else #define powerpc_local_irq_pmu_save(flags) \ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 00d553d7791133..3ed4e8771b64e7 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -191,25 +191,24 @@ do { \ #define local_irq_disable() \ do { \ + bool was_disabled = raw_irqs_disabled();\ raw_local_irq_disable(); \ - trace_hardirqs_off(); \ + if (!was_disabled) \ + trace_hardirqs_off(); \ } while (0) #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while (0) #define local_irq_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + raw_local_irq_restore(flags); \ } while (0) #define safe_halt() \ From eb1f00237aca2e368b93db79303f8433d1976d10 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Aug 2020 20:53:16 +0200 Subject: [PATCH 12/12] lockdep,trace: Expose tracepoints The lockdep tracepoints are under the lockdep recursion counter, this has a bunch of nasty side effects: - TRACE_IRQFLAGS doesn't work across the entire tracepoint - RCU-lockdep doesn't see the tracepoints either, hiding numerous "suspicious RCU usage" warnings. Pull the trace_lock_*() tracepoints completely out from under the lockdep recursion handling and completely rely on the trace level recusion handling -- also, tracing *SHOULD* not be taking locks in any case. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.782688941@infradead.org --- kernel/locking/lockdep.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c872e95e6e4d14..54b74fabf40c7b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, { unsigned long flags; + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); + if (unlikely(current->lockdep_recursion)) { /* XXX allow trylock from NMI ?!? */ if (lockdep_nmi() && !trylock) { @@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, check_flags(flags); current->lockdep_recursion++; - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0, 0); lockdep_recursion_finish(); @@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_release(lock, ip); + if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); + current->lockdep_recursion++; - trace_lock_release(lock, ip); if (__lock_release(lock, ip)) check_chain_key(current); lockdep_recursion_finish(); @@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) hlock->holdtime_stamp = now; } - trace_lock_acquired(lock, ip); - stats = get_lock_stats(hlock_class(hlock)); if (waittime) { if (hlock->read) @@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_acquired(lock, ip); + if (unlikely(!lock_stat || !debug_locks)) return; @@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion++; - trace_lock_contended(lock, ip); __lock_contended(lock, ip); lockdep_recursion_finish(); raw_local_irq_restore(flags); @@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_contended(lock, ip); + if (unlikely(!lock_stat || !debug_locks)) return;