aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c72
1 files changed, 46 insertions, 26 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a3a04085e794..799d23b4e35d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,7 +169,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
#define TASK_PREEMPTS_CURR(p, rq) \
- (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active))
+ ((p)->prio < (rq)->curr->prio)
#define SCALE_PRIO(x, prio) \
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
@@ -305,6 +305,7 @@ struct rq {
};
static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline int cpu_of(struct rq *rq)
{
@@ -4076,13 +4077,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
struct prio_array *array;
unsigned long flags;
struct rq *rq;
- int delta;
+ int oldprio;
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags);
- delta = prio - p->prio;
+ oldprio = p->prio;
array = p->array;
if (array)
dequeue_task(p, array);
@@ -4098,11 +4099,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
enqueue_task(p, array);
/*
* Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if our priority became higher
- * than the current's.
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
*/
- if (TASK_PREEMPTS_CURR(p, rq) ||
- (delta > 0 && task_running(rq, p)))
+ if (task_running(rq, p)) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr);
}
task_rq_unlock(rq, &flags);
@@ -4150,12 +4153,10 @@ void set_user_nice(struct task_struct *p, long nice)
enqueue_task(p, array);
inc_raw_weighted_load(rq, p);
/*
- * Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if our priority became higher
- * than the current's.
+ * If the task increased its priority or is running and
+ * lowered its priority, then reschedule its CPU:
*/
- if (TASK_PREEMPTS_CURR(p, rq) ||
- (delta > 0 && task_running(rq, p)))
+ if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_task(rq->curr);
}
out_unlock:
@@ -4382,11 +4383,13 @@ recheck:
__activate_task(p, rq);
/*
* Reschedule if we are currently running on this runqueue and
- * our priority decreased, or our priority became higher
- * than the current's.
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
*/
- if (TASK_PREEMPTS_CURR(p, rq) ||
- (task_running(rq, p) && p->prio > oldprio))
+ if (task_running(rq, p)) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr);
}
__task_rq_unlock(rq);
@@ -4518,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
struct task_struct *p;
int retval;
- lock_cpu_hotplug();
+ mutex_lock(&sched_hotcpu_mutex);
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (!p) {
read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
+ mutex_unlock(&sched_hotcpu_mutex);
return -ESRCH;
}
@@ -4551,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
out_unlock:
put_task_struct(p);
- unlock_cpu_hotplug();
+ mutex_unlock(&sched_hotcpu_mutex);
return retval;
}
@@ -4608,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
struct task_struct *p;
int retval;
- lock_cpu_hotplug();
+ mutex_lock(&sched_hotcpu_mutex);
read_lock(&tasklist_lock);
retval = -ESRCH;
@@ -4624,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
out_unlock:
read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
+ mutex_unlock(&sched_hotcpu_mutex);
if (retval)
return retval;
@@ -5386,7 +5389,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
struct rq *rq;
switch (action) {
+ case CPU_LOCK_ACQUIRE:
+ mutex_lock(&sched_hotcpu_mutex);
+ break;
+
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
if (IS_ERR(p))
return NOTIFY_BAD;
@@ -5400,12 +5408,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
/* Strictly unneccessary, as first user will wake it. */
wake_up_process(cpu_rq(cpu)->migration_thread);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
if (!cpu_rq(cpu)->migration_thread)
break;
/* Unbind it from offline cpu so it can run. Fall thru. */
@@ -5416,6 +5426,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
migrate_live_tasks(cpu);
rq = cpu_rq(cpu);
kthread_stop(rq->migration_thread);
@@ -5431,7 +5442,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(rq->nr_running != 0);
/* No need to migrate the tasks: it was best-effort if
- * they didn't do lock_cpu_hotplug(). Just wake up
+ * they didn't take sched_hotcpu_mutex. Just wake up
* the requestors. */
spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
@@ -5445,6 +5456,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
spin_unlock_irq(&rq->lock);
break;
#endif
+ case CPU_LOCK_RELEASE:
+ mutex_unlock(&sched_hotcpu_mutex);
+ break;
}
return NOTIFY_OK;
}
@@ -6820,10 +6834,10 @@ int arch_reinit_sched_domains(void)
{
int err;
- lock_cpu_hotplug();
+ mutex_lock(&sched_hotcpu_mutex);
detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map);
- unlock_cpu_hotplug();
+ mutex_unlock(&sched_hotcpu_mutex);
return err;
}
@@ -6902,14 +6916,20 @@ static int update_sched_domains(struct notifier_block *nfb,
{
switch (action) {
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
return NOTIFY_OK;
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
/*
* Fall through and re-initialise the domains.
*/
@@ -6928,12 +6948,12 @@ void __init sched_init_smp(void)
{
cpumask_t non_isolated_cpus;
- lock_cpu_hotplug();
+ mutex_lock(&sched_hotcpu_mutex);
arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
- unlock_cpu_hotplug();
+ mutex_unlock(&sched_hotcpu_mutex);
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);