Skip to content

Commit

Permalink
sched/fair: Restructure wake_affine*() to return a CPU id
Browse files Browse the repository at this point in the history
This is a preparation patch that has wake_affine*() return a CPU ID instead of
a boolean. The intent is to allow the wake_affine() helpers to be avoided
if a decision is already made. This patch has no functional change.

Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Matt Fleming <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
gormanm authored and Ingo Molnar committed Feb 6, 2018
1 parent 89a55f5 commit 3b76c4a
Showing 1 changed file with 17 additions and 18 deletions.
35 changes: 17 additions & 18 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -5692,7 +5692,7 @@ static int wake_wide(struct task_struct *p)
* scheduling latency of the CPUs. This seems to work
* for the overloaded case.
*/
static bool
static int
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
{
/*
Expand All @@ -5702,15 +5702,15 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
* node depending on the IO topology or IRQ affinity settings.
*/
if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
return true;
return this_cpu;

if (sync && cpu_rq(this_cpu)->nr_running == 1)
return true;
return this_cpu;

return false;
return nr_cpumask_bits;
}

static bool
static int
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
Expand All @@ -5724,7 +5724,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
unsigned long current_load = task_h_load(current);

if (current_load > this_eff_load)
return true;
return this_cpu;

this_eff_load -= current_load;
}
Expand All @@ -5741,28 +5741,28 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= capacity_of(this_cpu);

return this_eff_load <= prev_eff_load;
return this_eff_load <= prev_eff_load ? this_cpu : nr_cpumask_bits;
}

static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int prev_cpu, int sync)
{
int this_cpu = smp_processor_id();
bool affine = false;
int target = nr_cpumask_bits;

if (sched_feat(WA_IDLE))
affine = wake_affine_idle(this_cpu, prev_cpu, sync);
target = wake_affine_idle(this_cpu, prev_cpu, sync);

if (sched_feat(WA_WEIGHT) && !affine)
affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);

schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (affine) {
schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p->se.statistics.nr_wakeups_affine);
}
if (target == nr_cpumask_bits)
return prev_cpu;

return affine;
schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p->se.statistics.nr_wakeups_affine);
return target;
}

static inline unsigned long task_util(struct task_struct *p);
Expand Down Expand Up @@ -6355,8 +6355,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (cpu == prev_cpu)
goto pick_cpu;

if (wake_affine(affine_sd, p, prev_cpu, sync))
new_cpu = cpu;
new_cpu = wake_affine(affine_sd, p, prev_cpu, sync);
}

if (sd && !(sd_flag & SD_BALANCE_FORK)) {
Expand Down

0 comments on commit 3b76c4a

Please sign in to comment.