Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-s…
Browse files Browse the repository at this point in the history
…ched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  debug: turn ignore_loglevel into an early param
  sched: remove unused params
  sched: let +nice tasks have smaller impact
  sched: fix high wake up latencies with FAIR_USER_SCHED
  RCU: add help text for "RCU implementation type"
  • Loading branch information
torvalds committed Jan 31, 2008
2 parents e1a9c98 + c4772d9 commit fbdde7b
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 9 deletions.
8 changes: 8 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -775,6 +775,14 @@ config PREEMPT_NOTIFIERS
choice
prompt "RCU implementation type:"
default CLASSIC_RCU
help
This allows you to choose either the classic RCU implementation
that is designed for best read-side performance on non-realtime
systems, or the preemptible RCU implementation for best latency
on realtime systems. Note that some kernel preemption modes
will restrict your choice.

Select the default if you are unsure.

config CLASSIC_RCU
bool "Classic RCU"
Expand Down
4 changes: 2 additions & 2 deletions kernel/printk.c
Original file line number Diff line number Diff line change
Expand Up @@ -455,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");

return 1;
return 0;
}

__setup("ignore_loglevel", ignore_loglevel_setup);
early_param("ignore_loglevel", ignore_loglevel_setup);

/*
* Write out chars from start to end - 1 inclusive
Expand Down
10 changes: 5 additions & 5 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

#define sched_class_highest (&rt_sched_class)

static void inc_nr_running(struct task_struct *p, struct rq *rq)
static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}

static void dec_nr_running(struct task_struct *p, struct rq *rq)
static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
Expand Down Expand Up @@ -1354,7 +1354,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;

enqueue_task(rq, p, wakeup);
inc_nr_running(p, rq);
inc_nr_running(rq);
}

/*
Expand All @@ -1366,7 +1366,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;

dequeue_task(rq, p, sleep);
dec_nr_running(p, rq);
dec_nr_running(rq);
}

/**
Expand Down Expand Up @@ -2006,7 +2006,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
inc_nr_running(p, rq);
inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
Expand Down
8 changes: 6 additions & 2 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)

if (!initial) {
/* sleeps upto a single latency don't count. */
if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
if (sched_feat(NEW_FAIR_SLEEPERS))
vruntime -= sysctl_sched_latency;

/* ensure we never gain time by being placed backwards. */
Expand Down Expand Up @@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
}

gran = sysctl_sched_wakeup_granularity;
if (unlikely(se->load.weight != NICE_0_LOAD))
/*
* More easily preempt - nice tasks, while not making
* it harder for + nice tasks.
*/
if (unlikely(se->load.weight > NICE_0_LOAD))
gran = calc_delta_fair(gran, &se->load);

if (pse->vruntime + gran < se->vruntime)
Expand Down

0 comments on commit fbdde7b

Please sign in to comment.