Skip to content

Commit

Permalink
sched: Fix a race between __kthread_bind() and sched_setaffinity()
Browse files Browse the repository at this point in the history
Because sched_setscheduler() checks p->flags & PF_NO_SETAFFINITY
without locks, a caller might observe an old value and race with the
set_cpus_allowed_ptr() call from __kthread_bind() and effectively undo
it:

	__kthread_bind()
	  do_set_cpus_allowed()
						<SYSCALL>
						  sched_setaffinity()
						    if (p->flags & PF_NO_SETAFFINITIY)
						    set_cpus_allowed_ptr()
	  p->flags |= PF_NO_SETAFFINITY

Fix the bug by putting everything under the regular scheduler locks.

This also closes a hole in the serialization of task_struct::{nr_,}cpus_allowed.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 12, 2015
1 parent 7855a35 commit 25834c7
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 18 deletions.
1 change: 1 addition & 0 deletions include/linux/kthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
})

void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
Expand Down
7 changes: 0 additions & 7 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2203,13 +2203,6 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */

#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
#endif

/*
* Do not use outside of architecture code which knows its limitations.
*
Expand Down
20 changes: 17 additions & 3 deletions kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -325,16 +325,30 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create_on_node);

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
{
/* Must have done schedule() in kthread() before we set_task_cpu */
unsigned long flags;

if (!wait_task_inactive(p, state)) {
WARN_ON(1);
return;
}

/* It's safe because the task is inactive. */
do_set_cpus_allowed(p, cpumask_of(cpu));
raw_spin_lock_irqsave(&p->pi_lock, flags);
do_set_cpus_allowed(p, mask);
p->flags |= PF_NO_SETAFFINITY;
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
__kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
}

/**
Expand Down
36 changes: 32 additions & 4 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1153,6 +1153,8 @@ static int migration_cpu_stop(void *data)

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
lockdep_assert_held(&p->pi_lock);

if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);

Expand All @@ -1169,7 +1171,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
static int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask, bool check)
{
unsigned long flags;
struct rq *rq;
Expand All @@ -1178,6 +1181,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)

rq = task_rq_lock(p, &flags);

/*
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.
*/
if (check && (p->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out;
}

if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;

Expand Down Expand Up @@ -1214,6 +1226,11 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)

return ret;
}

int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
return __set_cpus_allowed_ptr(p, new_mask, false);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Expand Down Expand Up @@ -1595,6 +1612,15 @@ static void update_avg(u64 *avg, u64 sample)
s64 diff = sample - *avg;
*avg += diff >> 3;
}

#else

static inline int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask, bool check)
{
return set_cpus_allowed_ptr(p, new_mask);
}

#endif /* CONFIG_SMP */

static void
Expand Down Expand Up @@ -4340,7 +4366,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
}
#endif
again:
retval = set_cpus_allowed_ptr(p, new_mask);
retval = __set_cpus_allowed_ptr(p, new_mask, true);

if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
Expand Down Expand Up @@ -4865,7 +4891,8 @@ void init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;

raw_spin_lock_irqsave(&rq->lock, flags);
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);

__sched_fork(0, idle);
idle->state = TASK_RUNNING;
Expand All @@ -4891,7 +4918,8 @@ void init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&idle->pi_lock, flags);

/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
Expand Down
6 changes: 2 additions & 4 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -1714,9 +1714,7 @@ static struct worker *create_worker(struct worker_pool *pool)
goto fail;

set_user_nice(worker->task, pool->attrs->nice);

/* prevent userland from meddling with cpumask of workqueue workers */
worker->task->flags |= PF_NO_SETAFFINITY;
kthread_bind_mask(worker->task, pool->attrs->cpumask);

/* successful, attach the worker to the pool */
worker_attach_to_pool(worker, pool);
Expand Down Expand Up @@ -3856,7 +3854,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
}

wq->rescuer = rescuer;
rescuer->task->flags |= PF_NO_SETAFFINITY;
kthread_bind_mask(rescuer->task, cpu_possible_mask);
wake_up_process(rescuer->task);
}

Expand Down

0 comments on commit 25834c7

Please sign in to comment.