Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge branches 'doc.2024.06.06a', 'fixes.2024.07.04a', 'mb.2024.06.28…
Browse files Browse the repository at this point in the history
…a', 'nocb.2024.06.03a', 'rcu-tasks.2024.06.06a', 'rcutorture.2024.06.06a' and 'srcu.2024.06.18a' into HEAD

doc.2024.06.06a: Documentation updates.
fixes.2024.07.04a: Miscellaneous fixes.
mb.2024.06.28a: Grace-period memory-barrier redundancy removal.
nocb.2024.06.03a: No-CB CPU updates.
rcu-tasks.2024.06.06a: RCU-Tasks updates.
rcutorture.2024.06.06a: Torture-test updates.
srcu.2024.06.18a: SRCU polled-grace-period updates.
  • Loading branch information
paulmckrcu committed Jul 4, 2024
7 parents a3fbf86 + 55d4669 + 677ab23 + e4f7805 + 399ced9 + 0ac55d0 + e206f33 commit 02219ca
Show file tree
Hide file tree
Showing 22 changed files with 404 additions and 267 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,9 @@ This case is handled by calls to the strongly ordered
``atomic_add_return()`` read-modify-write atomic operation that
is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
The grace-period kthread invokes ``rcu_dynticks_snap()`` and
``rcu_dynticks_in_eqs_since()`` (both of which invoke
an ``atomic_add_return()`` of zero) to detect idle CPUs.
The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()``
(preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()``
(both of which rely on acquire semantics) to detect idle CPUs.

+-----------------------------------------------------------------------+
| **Quick Quiz**: |
Expand Down
8 changes: 8 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5018,6 +5018,14 @@
the ->nocb_bypass queue. The definition of "too
many" is supplied by this kernel boot parameter.

rcutree.nohz_full_patience_delay= [KNL]
On callback-offloaded (rcu_nocbs) CPUs, avoid
disturbing RCU unless the grace period has
reached the specified age in milliseconds.
Defaults to zero. Large values will be capped
at five seconds. All values will be rounded down
to the nearest value representable by jiffies.

rcutree.qhimark= [KNL]
Set threshold of queued RCU callbacks beyond which
batch limiting is disabled.
Expand Down
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -18868,6 +18868,7 @@ M: Neeraj Upadhyay <[email protected]> (kernel/rcu/tasks.h)
M: Joel Fernandes <[email protected]>
M: Josh Triplett <[email protected]>
M: Boqun Feng <[email protected]>
M: Uladzislau Rezki <[email protected]>
R: Steven Rostedt <[email protected]>
R: Mathieu Desnoyers <[email protected]>
R: Lai Jiangshan <[email protected]>
Expand Down
88 changes: 38 additions & 50 deletions include/linux/rcu_segcblist.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,36 +80,35 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
* | allowing nocb_timer to be armed. |
* | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
* ----------------------------------------------------------------------------
* |
* v
* -----------------------------------
* | |
* v v
* --------------------------------------- ----------------------------------|
* | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
* | | | |
* | | | |
* | CB kthread woke up and | | GP kthread woke up and |
* | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
* | Processes callbacks concurrently | | |
* | with rcu_core(), holding | | |
* | nocb_lock. | | |
* --------------------------------------- -----------------------------------
* | |
* -----------------------------------
* ----------------------------------------------------------------------------
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | + unparked CB kthread |
* | |
* | CB kthread got unparked and processes callbacks concurrently with |
* | rcu_core(), holding nocb_lock. |
* ---------------------------------------------------------------------------
* |
* v
* ---------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP |
* | + unparked CB kthread |
* | |
* | GP kthread woke up and acknowledged nocb_lock. |
* ---------------------------------------- -----------------------------------
* |
* v
* |--------------------------------------------------------------------------|
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP | |
* | SEGCBLIST_KTHREAD_CB |
* | + unparked CB kthread |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. |
Expand All @@ -125,8 +124,8 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
* | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | ignores callbacks. Bypass enqueue is enabled. |
Expand All @@ -137,11 +136,11 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
* | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | handles callbacks concurrently. Bypass enqueue is enabled. |
* | handles callbacks concurrently. Bypass enqueue is disabled. |
* | Invoke RCU core so we make sure not to preempt it in the middle with |
* | leaving some urgent work unattended within a jiffy. |
* ----------------------------------------------------------------------------
Expand All @@ -150,42 +149,31 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
* | + unparked CB kthread |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
* | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
* | bypass enqueue. |
* | holding nocb_lock. Wake up GP kthread if necessary. |
* ----------------------------------------------------------------------------
* |
* v
* -----------------------------------
* | |
* v v
* ---------------------------------------------------------------------------|
* | | |
* | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
* | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
* | | |
* | GP kthread woke up and | CB kthread woke up and |
* | acknowledged the fact that | acknowledged the fact that |
* | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
* | | The CB kthread goes to sleep |
* | The callbacks from the target CPU | until it ever gets re-offloaded. |
* | will be ignored from the GP kthread | |
* | loop. | |
* |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | + unparked CB kthread |
* | |
* | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
* | got cleared. The callbacks from the target CPU will be ignored from the|
* | GP kthread loop. |
* ----------------------------------------------------------------------------
* | |
* -----------------------------------
* |
* v
* ----------------------------------------------------------------------------
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
* | + parked CB kthread |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
* | Flush pending nocb_timer. Flush nocb bypass callbacks. |
* | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
* | local rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
Expand Down
62 changes: 60 additions & 2 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,6 @@ void synchronize_rcu_tasks_rude(void);

#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
Expand All @@ -218,7 +217,6 @@ void exit_tasks_rcu_finish(void);
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */

Expand Down Expand Up @@ -421,11 +419,71 @@ static inline void rcu_preempt_sleep_check(void) { }
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)

// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
static inline bool lockdep_assert_rcu_helper(bool c)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
debug_lockdep_rcu_enabled();
}

/**
* lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
*
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))

/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
*
* Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
* Note that local_bh_disable() and friends do not suffice here, instead an
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))

/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
*
* Splats if lockdep is enabled and there is no rcu_read_lock_sched()
* in effect. Note that preempt_disable() and friends do not suffice here,
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))

/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
*
* Splats if lockdep is enabled and there is no RCU reader of any
* type in effect. Note that regions of code protected by things like
* preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
* as RCU readers.
*
* Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
* kernels that are not also built with PREEMPT_COUNT. But if you have
* lockdep enabled, you might as well also enable PREEMPT_COUNT.
*/
#define lockdep_assert_in_rcu_reader() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
preemptible()))

#else /* #ifdef CONFIG_PROVE_RCU */

#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)

#define lockdep_assert_in_rcu_read_lock() do { } while (0)
#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
#define lockdep_assert_in_rcu_reader() do { } while (0)

#endif /* #else #ifdef CONFIG_PROVE_RCU */

/*
Expand Down
35 changes: 35 additions & 0 deletions include/linux/srcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,45 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);

#define SRCU_GET_STATE_COMPLETED 0x1

/**
* get_completed_synchronize_srcu - Return a pre-completed polled state cookie
*
* Returns a value that poll_state_synchronize_srcu() will always treat
* as a cookie whose grace period has already completed.
*/
static inline unsigned long get_completed_synchronize_srcu(void)
{
return SRCU_GET_STATE_COMPLETED;
}

unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);

// Maximum number of unsigned long values corresponding to
// not-yet-completed SRCU grace periods.
#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2

/**
* same_state_synchronize_srcu - Are two old-state values identical?
* @oldstate1: First old-state value.
* @oldstate2: Second old-state value.
*
* The two old-state values must have been obtained from either
* get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
* get_completed_synchronize_srcu(). Returns @true if the two values are
* identical and @false otherwise. This allows structures whose lifetimes
* are tracked by old-state values to push these values to a list header,
* allowing those structures to be slightly smaller.
*/
static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
{
return oldstate1 == oldstate2;
}

#ifdef CONFIG_NEED_SRCU_NMI_SAFE
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
Expand Down
17 changes: 0 additions & 17 deletions kernel/pid_namespace.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,24 +248,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
set_current_state(TASK_INTERRUPTIBLE);
if (pid_ns->pid_allocated == init_pids)
break;
/*
* Release tasks_rcu_exit_srcu to avoid following deadlock:
*
* 1) TASK A unshare(CLONE_NEWPID)
* 2) TASK A fork() twice -> TASK B (child reaper for new ns)
* and TASK C
* 3) TASK B exits, kills TASK C, waits for TASK A to reap it
* 4) TASK A calls synchronize_rcu_tasks()
* -> synchronize_srcu(tasks_rcu_exit_srcu)
* 5) *DEADLOCK*
*
* It is considered safe to release tasks_rcu_exit_srcu here
* because we assume the current task can not be concurrently
* reaped at this point.
*/
exit_tasks_rcu_stop();
schedule();
exit_tasks_rcu_start();
}
__set_current_state(TASK_RUNNING);

Expand Down
1 change: 1 addition & 0 deletions kernel/rcu/rcuscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

#include "rcu.h"

MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <[email protected]>");

Expand Down
Loading

0 comments on commit 02219ca

Please sign in to comment.