Skip to content

Commit

Permalink
sched/core: Introduce sched_asym_cpucap_active()
Browse files Browse the repository at this point in the history
Create an inline helper for conditional code to be only executed on
asymmetric CPU capacity systems. This makes these (currently ~10 and
future) conditions a lot more readable.

Signed-off-by: Dietmar Eggemann <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
deggeman authored and Ingo Molnar committed Aug 2, 2022
1 parent 9de1f9c commit 740cf8a
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 9 deletions.
2 changes: 1 addition & 1 deletion kernel/sched/cpudeadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
unsigned long cap, max_cap = 0;
int cpu, max_cpu = -1;

if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return 1;

/* Ensure the capacity of the CPUs fits the task. */
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(int i)
*/
static inline unsigned long dl_bw_capacity(int i)
{
if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
if (!sched_asym_cpucap_active() &&
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
} else {
Expand Down Expand Up @@ -1849,7 +1849,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
* Take the capacity of the CPU into account to
* ensure it fits the requirement of the task.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity))
if (sched_asym_cpucap_active())
select_rq |= !dl_task_fits_capacity(p, cpu);

if (select_rq) {
Expand Down
8 changes: 4 additions & 4 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -4262,7 +4262,7 @@ static inline int task_fits_capacity(struct task_struct *p,

static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return;

if (!p || p->nr_cpus_allowed == 1) {
Expand Down Expand Up @@ -6506,7 +6506,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)

static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
{
if (static_branch_unlikely(&sched_asym_cpucapacity))
if (sched_asym_cpucap_active())
return fits_capacity(task_util, capacity_of(cpu));

return true;
Expand All @@ -6526,7 +6526,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* On asymmetric system, update task utilization because we will check
* that the task fits with cpu's capacity.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {
sync_entity_load_avg(&p->se);
task_util = uclamp_task_util(p);
}
Expand Down Expand Up @@ -6580,7 +6580,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* For asymmetric CPU capacity systems, our domain of interest is
* sd_asym_cpucapacity rather than sd_llc.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
/*
* On an asymmetric CPU capacity system where an exclusive
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
unsigned int cpu_cap;

/* Only heterogeneous systems can benefit from this check */
if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return true;

min_cap = uclamp_eff_value(p, UCLAMP_MIN);
Expand Down Expand Up @@ -1897,7 +1897,7 @@ static int find_lowest_rq(struct task_struct *task)
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {

ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
Expand Down
5 changes: 5 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1813,6 +1813,11 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;

static __always_inline bool sched_asym_cpucap_active(void)
{
return static_branch_unlikely(&sched_asym_cpucapacity);
}

struct sched_group_capacity {
atomic_t ref;
/*
Expand Down

0 comments on commit 740cf8a

Please sign in to comment.