Skip to content

Commit

Permalink
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/linux-2.6-tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
  sched: Fix SCHED_MC regression caused by change in sched cpu_power
  sched: Don't use possibly stale sched_class
  kthread, sched: Remove reference to kthread_create_on_cpu
  sched: cpuacct: Use bigger percpu counter batch values for stats counters
  percpu_counter: Make __percpu_counter_add an inline function on UP
  sched: Remove member rt_se from struct rt_rq
  sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu]
  sched: Remove unused update_shares_locked()
  sched: Use for_each_bit
  sched: Queue a deboosted task to the head of the RT prio queue
  sched: Implement head queueing for sched_rt
  sched: Extend enqueue_task to allow head queueing
  sched: Remove USER_SCHED
  sched: Fix the place where group powers are updated
  sched: Assume *balance is valid
  sched: Remove load_balance_newidle()
  sched: Unify load_balance{,_newidle}()
  sched: Add a lock break for PREEMPT=y
  sched: Remove from fwd decls
  sched: Remove rq_iterator from move_one_task
  ...

Fix up trivial conflicts in kernel/sched.c
  • Loading branch information
torvalds committed Feb 28, 2010
2 parents 2531216 + dd5feea commit f66ffde
Show file tree
Hide file tree
Showing 14 changed files with 1,976 additions and 2,682 deletions.
15 changes: 0 additions & 15 deletions Documentation/feature-removal-schedule.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,6 @@ be removed from this file.

---------------------------

What: USER_SCHED
When: 2.6.34

Why: USER_SCHED was implemented as a proof of concept for group scheduling.
The effect of USER_SCHED can already be achieved from userspace with
the help of libcgroup. The removal of USER_SCHED will also simplify
the scheduler code with the removal of one major ifdef. There are also
issues USER_SCHED has with USER_NS. A decision was taken not to fix
those and instead remove USER_SCHED. Also new group scheduling
features will not be implemented for USER_SCHED.

Who: Dhaval Giani <[email protected]>

---------------------------

What: PRISM54
When: 2.6.34

Expand Down
5 changes: 3 additions & 2 deletions include/linux/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ extern int _cond_resched(void);
#endif

#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
Expand All @@ -138,7 +138,8 @@ extern int _cond_resched(void);
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
static inline void __might_sleep(char *file, int line, int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif

Expand Down
9 changes: 6 additions & 3 deletions include/linux/percpu_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,6 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
fbc->count = amount;
}

#define __percpu_counter_add(fbc, amount, batch) \
percpu_counter_add(fbc, amount)

static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
Expand All @@ -109,6 +106,12 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable();
}

static inline void
__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
percpu_counter_add(fbc, amount);
}

static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
Expand Down
25 changes: 3 additions & 22 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -740,14 +740,6 @@ struct user_struct {
uid_t uid;
struct user_namespace *user_ns;

#ifdef CONFIG_USER_SCHED
struct task_group *tg;
#ifdef CONFIG_SYSFS
struct kobject kobj;
struct delayed_work work;
#endif
#endif

#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
Expand Down Expand Up @@ -1087,7 +1079,8 @@ struct sched_domain;
struct sched_class {
const struct sched_class *next;

void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
bool head);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);

Expand All @@ -1099,14 +1092,6 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);

unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio);

int (*move_one_task) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
Expand Down Expand Up @@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);

extern void normalize_rt_tasks(void);

#ifdef CONFIG_GROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED

extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
extern void set_tg_uid(struct user_struct *user);
#endif

extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
Expand Down
81 changes: 30 additions & 51 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -461,57 +461,6 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
bool

config GROUP_SCHED
bool "Group CPU scheduler"
depends on EXPERIMENTAL
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups.
In order to create a group from arbitrary set of processes, use
CONFIG_CGROUPS. (See Control Group support.)

config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on GROUP_SCHED
default GROUP_SCHED

config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on EXPERIMENTAL
depends on GROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
to users or control groups (depending on the "Basis for grouping tasks"
setting below. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.

choice
depends on GROUP_SCHED
prompt "Basis for grouping tasks"
default USER_SCHED

config USER_SCHED
bool "user id"
help
This option will choose userid as the basis for grouping
tasks, thus providing equal CPU bandwidth to each user.

config CGROUP_SCHED
bool "Control groups"
depends on CGROUPS
help
This option allows you to create arbitrary task groups
using the "cgroup" pseudo filesystem and control
the cpu bandwidth allocated to each such task group.
Refer to Documentation/cgroups/cgroups.txt for more
information on "cgroup" pseudo filesystem.

endchoice

menuconfig CGROUPS
boolean "Control Group support"
help
Expand Down Expand Up @@ -632,6 +581,36 @@ config CGROUP_MEM_RES_CTLR_SWAP
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.

menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
depends on EXPERIMENTAL && CGROUPS
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. It uses cgroups to group
tasks.

if CGROUP_SCHED
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
default CGROUP_SCHED

config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on EXPERIMENTAL
depends on CGROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
to users or control groups (depending on the "Basis for grouping tasks"
setting below. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.

endif #CGROUP_SCHED

endif # CGROUPS

config MM_OWNER
Expand Down
8 changes: 0 additions & 8 deletions kernel/ksysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -197,16 +197,8 @@ static int __init ksysfs_init(void)
goto group_exit;
}

/* create the /sys/kernel/uids/ directory */
error = uids_sysfs_init();
if (error)
goto notes_exit;

return 0;

notes_exit:
if (notes_size > 0)
sysfs_remove_bin_file(kernel_kobj, &notes_attr);
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
Expand Down
2 changes: 1 addition & 1 deletion kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ static void create_kthread(struct kthread_create_info *create)
*
* Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start
* it. See also kthread_run(), kthread_create_on_cpu().
* it. See also kthread_run().
*
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a
Expand Down
Loading

0 comments on commit f66ffde

Please sign in to comment.