Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge branch 'core-rcu-2021.07.04' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/paulmck/linux-rcu

Pull RCU updates from Paul McKenney:

 - Bitmap parsing support for "all" as an alias for all bits

 - Documentation updates

 - Miscellaneous fixes, including some that overlap into mm and lockdep

 - kvfree_rcu() updates

 - mem_dump_obj() updates, with acks from one of the slab-allocator
   maintainers

 - RCU NOCB CPU updates, including limited deoffloading

 - SRCU updates

 - Tasks-RCU updates

 - Torture-test updates

* 'core-rcu-2021.07.04' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (78 commits)
  tasks-rcu: Make show_rcu_tasks_gp_kthreads() be static inline
  rcu-tasks: Make ksoftirqd provide RCU Tasks quiescent states
  rcu: Add missing __releases() annotation
  rcu: Remove obsolete rcu_read_unlock() deadlock commentary
  rcu: Improve comments describing RCU read-side critical sections
  rcu: Create an unrcu_pointer() to remove __rcu from a pointer
  srcu: Early test SRCU polling start
  rcu: Fix various typos in comments
  rcu/nocb: Unify timers
  rcu/nocb: Prepare for fine-grained deferred wakeup
  rcu/nocb: Only cancel nocb timer if not polling
  rcu/nocb: Delete bypass_timer upon nocb_gp wakeup
  rcu/nocb: Cancel nocb_timer upon nocb_gp wakeup
  rcu/nocb: Allow de-offloading rdp leader
  rcu/nocb: Directly call __wake_nocb_gp() from bypass timer
  rcu: Don't penalize priority boosting when there is nothing to boost
  rcu: Point to documentation of ordering guarantees
  rcu: Make rcu_gp_cleanup() be noinline for tracing
  rcu: Restrict RCU_STRICT_GRACE_PERIOD to at most four CPUs
  rcu: Make show_rcu_gp_kthreads() dump rcu_node structures blocking GP
  ...
  • Loading branch information
torvalds committed Jul 4, 2021
2 parents da803f8 + 641faf1 commit 28e92f9
Show file tree
Hide file tree
Showing 49 changed files with 1,252 additions and 577 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ Any code that happens after the end of a given RCU grace period is guaranteed
to see the effects of all accesses prior to the beginning of that grace
period that are within RCU read-side critical sections.
Similarly, any code that happens before the beginning of a given RCU grace
period is guaranteed to see the effects of all accesses following the end
period is guaranteed to not see the effects of all accesses following the end
of that grace period that are within RCU read-side critical sections.

Note well that RCU-sched read-side critical sections include any region
Expand Down Expand Up @@ -339,14 +339,14 @@ The diagram below shows the path of ordering if the leftmost
leftmost ``rcu_node`` structure offlines its last CPU and if the next
``rcu_node`` structure has no online CPUs).

.. kernel-figure:: TreeRCU-gp-init-1.svg
.. kernel-figure:: TreeRCU-gp-init-2.svg

The final ``rcu_gp_init()`` pass through the ``rcu_node`` tree traverses
breadth-first, setting each ``rcu_node`` structure's ``->gp_seq`` field
to the newly advanced value from the ``rcu_state`` structure, as shown
in the following diagram.

.. kernel-figure:: TreeRCU-gp-init-1.svg
.. kernel-figure:: TreeRCU-gp-init-3.svg

This change will also cause each CPU's next call to
``__note_gp_changes()`` to notice that a new grace period has started,
Expand Down
5 changes: 5 additions & 0 deletions Documentation/admin-guide/kernel-parameters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ to change, such as less cores in the CPU list, then N and any ranges using N
will also change. Use the same on a small 4 core system, and "16-N" becomes
"16-3" and now the same boot input will be flagged as invalid (start > end).

The special case-tolerant group name "all" has a meaning of selecting all CPUs,
so that "nohz_full=all" is the equivalent of "nohz_full=0-N".

The semantics of "N" and "all" is supported on a level of bitmaps and holds for
all users of bitmap_parse().

This document may not be entirely up to date and comprehensive. The command
"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
Expand Down
5 changes: 5 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4354,6 +4354,11 @@
whole algorithm to behave better in low memory
condition.

rcutree.rcu_delay_page_cache_fill_msec= [KNL]
Set the page-cache refill delay (in milliseconds)
in response to low-memory conditions. The range
of permitted values is in the range 0:100000.

rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
Expand Down
72 changes: 36 additions & 36 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void)
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(".data.unlikely") __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
Expand Down Expand Up @@ -373,7 +373,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define unrcu_pointer(p) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
rcu_check_sparse(p, __rcu); \
((typeof(*p) __force __kernel *)(_________p1)); \
})

Expand Down Expand Up @@ -532,7 +532,12 @@ do { \
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is the RCU-bh counterpart to rcu_dereference_check().
* This is the RCU-bh counterpart to rcu_dereference_check(). However,
* please note that starting in v5.0 kernels, vanilla RCU grace periods
* wait for local_bh_disable() regions of code in addition to regions of
* code demarked by rcu_read_lock() and rcu_read_unlock(). This means
* that synchronize_rcu(), call_rcu, and friends all take not only
* rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
Expand All @@ -543,6 +548,11 @@ do { \
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
* However, please note that starting in v5.0 kernels, vanilla RCU grace
* periods wait for preempt_disable() regions of code in addition to
* regions of code demarked by rcu_read_lock() and rcu_read_unlock().
* This means that synchronize_rcu(), call_rcu, and friends all take not
* only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
Expand Down Expand Up @@ -634,6 +644,12 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
* In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
* wait for regions of code with preemption disabled, including regions of
* code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
* define synchronize_sched(), only code enclosed within rcu_read_lock()
* and rcu_read_unlock() are guaranteed to be waited for.
*
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
Expand Down Expand Up @@ -686,33 +702,12 @@ static __always_inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* In most situations, rcu_read_unlock() is immune from deadlock.
* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
* is responsible for deboosting, which it does via rt_mutex_unlock().
* Unfortunately, this function acquires the scheduler's runqueue and
* priority-inheritance spinlocks. This means that deadlock could result
* if the caller of rcu_read_unlock() already holds one of these locks or
* any lock that is ever acquired while holding them.
*
* That said, RCU readers are never priority boosted unless they were
* preempted. Therefore, one way to avoid deadlock is to make sure
* that preemption never happens within any RCU read-side critical
* section whose outermost rcu_read_unlock() is called with one of
* rt_mutex_unlock()'s locks held. Such preemption can be avoided in
* a number of ways, for example, by invoking preempt_disable() before
* critical section's outermost rcu_read_lock().
*
* Given that the set of locks acquired by rt_mutex_unlock() might change
* at any time, a somewhat more future-proofed approach is to make sure
* that that preemption never happens within any RCU read-side critical
* section whose outermost rcu_read_unlock() is called with irqs disabled.
* This approach relies on the fact that rt_mutex_unlock() currently only
* acquires irq-disabled locks.
*
* The second of these two approaches is best in most situations,
* however, the first approach can also be useful, at least to those
* developers willing to keep abreast of the set of locks acquired by
* rt_mutex_unlock().
* In almost all situations, rcu_read_unlock() is immune from deadlock.
* In recent kernels that have consolidated synchronize_sched() and
* synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
* also extends to the scheduler's runqueue and priority-inheritance
* spinlocks, courtesy of the quiescent-state deferral that is carried
* out when rcu_read_unlock() is invoked with interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
Expand All @@ -728,9 +723,11 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent of rcu_read_lock(), but also disables softirqs.
* Note that anything else that disables softirqs can also serve as
* an RCU read-side critical section.
* This is equivalent to rcu_read_lock(), but also disables softirqs.
* Note that anything else that disables softirqs can also serve as an RCU
* read-side critical section. However, please note that this equivalence
* applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
* rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
Expand Down Expand Up @@ -763,9 +760,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
* This is equivalent of rcu_read_lock(), but disables preemption.
* Read-side critical sections can also be introduced by anything else
* that disables preemption, including local_irq_disable() and friends.
* This is equivalent to rcu_read_lock(), but also disables preemption.
* Read-side critical sections can also be introduced by anything else that
* disables preemption, including local_irq_disable() and friends. However,
* please note that the equivalence to rcu_read_lock() applies only to
* v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
* were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
Expand Down
1 change: 0 additions & 1 deletion include/linux/rcutiny.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
#define rcu_is_idle_cpu(cpu) \
(is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
Expand Down
1 change: 0 additions & 1 deletion include/linux/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_is_idle_cpu(int cpu);
Expand Down
6 changes: 6 additions & 0 deletions include/linux/srcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);

#ifdef CONFIG_SRCU
void srcu_init(void);
#else /* #ifdef CONFIG_SRCU */
static inline void srcu_init(void) { }
#endif /* #else #ifdef CONFIG_SRCU */

#ifdef CONFIG_DEBUG_LOCK_ALLOC

/**
Expand Down
2 changes: 0 additions & 2 deletions include/linux/srcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,7 @@ struct srcu_struct {
/* callback for the barrier */
/* operation. */
struct delayed_work work;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
};

/* Values for state variable (bottom bits of ->srcu_gp_seq). */
Expand Down
2 changes: 0 additions & 2 deletions include/linux/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer);

#define del_singleshot_timer_sync(t) del_timer_sync(t)

extern bool timer_curr_running(struct timer_list *timer);

extern void init_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
Expand Down
1 change: 1 addition & 0 deletions include/trace/events/rcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
* "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
* "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
Expand Down
2 changes: 2 additions & 0 deletions init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include <linux/profile.h>
#include <linux/kfence.h>
#include <linux/rcupdate.h>
#include <linux/srcu.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
#include <linux/writeback.h>
Expand Down Expand Up @@ -1008,6 +1009,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
tick_init();
rcu_init_nohz();
init_timers();
srcu_init();
hrtimers_init();
softirq_init();
timekeeping_init();
Expand Down
6 changes: 4 additions & 2 deletions kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -6506,6 +6506,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
struct task_struct *curr = current;
int dl = READ_ONCE(debug_locks);

/* Note: the following can be executed concurrently, so be careful. */
pr_warn("\n");
Expand All @@ -6515,11 +6516,12 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
pr_warn("-----------------------------\n");
pr_warn("%s:%d %s!\n", file, line, s);
pr_warn("\nother info that might help us debug this:\n\n");
pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
!rcu_lockdep_current_cpu_online()
? "RCU used illegally from offline CPU!\n"
: "",
rcu_scheduler_active, debug_locks);
rcu_scheduler_active, dl,
dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");

/*
* If a CPU is in the RCU-free window in idle (ie: in the section
Expand Down
2 changes: 1 addition & 1 deletion kernel/rcu/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ config RCU_EQS_DEBUG

config RCU_STRICT_GRACE_PERIOD
bool "Provide debug RCU implementation with short grace periods"
depends on DEBUG_KERNEL && RCU_EXPERT
depends on DEBUG_KERNEL && RCU_EXPERT && NR_CPUS <= 4
default n
select PREEMPT_COUNT if PREEMPT=n
help
Expand Down
14 changes: 8 additions & 6 deletions kernel/rcu/rcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
}
}

extern void rcu_init_geometry(void);

/* Returns a pointer to the first leaf rcu_node structure. */
#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])

Expand Down Expand Up @@ -422,12 +424,6 @@ do { \

#endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */

#ifdef CONFIG_SRCU
void srcu_init(void);
#else /* #ifdef CONFIG_SRCU */
static inline void srcu_init(void) { }
#endif /* #else #ifdef CONFIG_SRCU */

#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) { return true; }
Expand All @@ -441,7 +437,11 @@ bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
void rcupdate_announce_bootup_oddness(void);
#ifdef CONFIG_TASKS_RCU_GENERIC
void show_rcu_tasks_gp_kthreads(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void show_rcu_tasks_gp_kthreads(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
void rcu_request_urgent_qs_task(struct task_struct *t);
#endif /* #else #ifdef CONFIG_TINY_RCU */

Expand Down Expand Up @@ -519,6 +519,7 @@ static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long
srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
Expand All @@ -527,6 +528,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_fwd_progress_check(unsigned long j);
Expand Down
Loading

0 comments on commit 28e92f9

Please sign in to comment.