Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes: group scheduling corner case fix, two deadline scheduler
  fixes, effective_load() overflow fix, nested sleep fix, 6144 CPUs
  system fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix RCU stall upon -ENOMEM in sched_create_group()
  sched/deadline: Avoid double-accounting in case of missed deadlines
  sched/deadline: Fix migration of SCHED_DEADLINE tasks
  sched: Fix odd values in effective_load() calculations
  sched, fanotify: Deal with nested sleeps
  sched: Fix KMALLOC_MAX_SIZE overflow during cpumask allocation
  • Loading branch information
torvalds committed Jan 11, 2015
2 parents ddb321a + 7f1a169 commit 5ab551d
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 35 deletions.
10 changes: 5 additions & 5 deletions fs/notify/fanotify/fanotify_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);

start = buf;
group = file->private_data;

pr_debug("%s: group=%p\n", __func__, group);

add_wait_queue(&group->notification_waitq, &wait);
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);

mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
Expand All @@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,

if (start != buf)
break;
schedule();

wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
continue;
}

Expand Down Expand Up @@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
buf += ret;
count -= ret;
}
remove_wait_queue(&group->notification_waitq, &wait);

finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
Expand Down
13 changes: 5 additions & 8 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -7112,9 +7112,6 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Expand All @@ -7135,13 +7132,13 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);

#endif /* CONFIG_RT_GROUP_SCHED */
}
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_CPUMASK_OFFSTACK */

init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
Expand Down
25 changes: 4 additions & 21 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{
int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
int rorun = dl_se->runtime <= 0;

if (!rorun && !dmiss)
return 0;

/*
* If we are beyond our current deadline and we are still
* executing, then we have already used some of the runtime of
* the next instance. Thus, if we do not account that, we are
* stealing bandwidth from the system at each deadline miss!
*/
if (dmiss) {
dl_se->runtime = rorun ? dl_se->runtime : 0;
dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
}

return 1;
return (dl_se->runtime <= 0);
}

extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
Expand Down Expand Up @@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se);
else
if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se);
else if (flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se);

__enqueue_dl_entity(dl_se);
}
Expand Down
6 changes: 5 additions & 1 deletion kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)

static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
/* init_cfs_bandwidth() was not called */
if (!cfs_b->throttled_cfs_rq.next)
return;

hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
}
Expand Down Expand Up @@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
* wl = S * s'_i; see (2)
*/
if (W > 0 && w < W)
wl = (w * tg->shares) / W;
wl = (w * (long)tg->shares) / W;
else
wl = tg->shares;

Expand Down

0 comments on commit 5ab551d

Please sign in to comment.