[amd64] Update rt featureset to 3.0.7-rt20

svn path=/dists/sid/linux-2.6/; revision=18172
This commit is contained in:
Uwe Kleine-König 2011-10-19 19:05:00 +00:00
parent d8a35d2c97
commit 7c7ec14458
3 changed files with 370 additions and 299 deletions

4
debian/changelog vendored
View File

@ -1,7 +1,7 @@
linux-2.6 (3.0.0-6) UNRELEASED; urgency=low
[ Uwe Kleine-König ]
* [amd64] Update rt featureset to 3.0.6-rt18
* [amd64] Update rt featureset to 3.0.7-rt20
[ Bastian Blank ]
* Add stable 3.0.7, including:
@ -9,7 +9,7 @@ linux-2.6 (3.0.0-6) UNRELEASED; urgency=low
calculation (Closes: #585130)
- ipv6: fix NULL dereference in udp6_ufo_fragment() (Closes: #643817)
For the complete list of changes, see:
http://www.kernel.org/pub/linux/kernel/v3.0/ChangeLog-3.0.7
http://www.kernel.org/pub/linux/kernel/v3.0/ChangeLog-3.0.7
[ Ben Hutchings ]
* cputimer: Cure lock inversion

View File

@ -1,6 +1,6 @@
[bwh: Dropped fix to kernel/taskstats.c applied separately in
bugfix/all/Make-TASKSTATS-require-root-access.patch]
[bwh: Updated raw spinlock changes in kernel/posix-cput-timers.c to apply after
[bwh/ukl: Updated raw spinlock changes in kernel/posix-cput-timers.c to apply after
bugfix/all/cputime-Cure-lock-inversion.patch]
Index: linux-2.6/mm/memory.c
@ -1234,6 +1234,278 @@ Index: linux-2.6/arch/x86/kernel/irq.c
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
Index: linux-2.6/kernel/posix-cpu-timers.c
===================================================================
--- linux-2.6.orig/kernel/posix-cpu-timers.c
+++ linux-2.6/kernel/posix-cpu-timers.c
@@ -282,13 +282,13 @@ void thread_group_cputimer(struct task_s
* it.
*/
thread_group_cputime(tsk, &sum);
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
} else
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
@@ -700,7 +701,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -998,9 +999,9 @@ static void stop_process_timers(struct s
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
static u32 onecputick;
@@ -1222,7 +1223,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
arm_timer(timer);
spin_unlock(&p->sighand->siglock);
@@ -1289,10 +1290,11 @@ static inline int fastpath_timer_check(s
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
+ unsigned long flags;
- spin_lock(&sig->cputimer.lock);
+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
group_sample = sig->cputimer.cputime;
- spin_unlock(&sig->cputimer.lock);
+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
@@ -1306,13 +1308,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
-void run_posix_cpu_timers(struct task_struct *tsk)
+static void __run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
@@ -1370,6 +1372,190 @@ void run_posix_cpu_timers(struct task_st
}
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
+
+static int posix_cpu_timers_thread(void *data)
+{
+ int cpu = (long)data;
+
+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
+
+ while (!kthread_should_stop()) {
+ struct task_struct *tsk = NULL;
+ struct task_struct *next = NULL;
+
+ if (cpu_is_offline(cpu))
+ goto wait_to_die;
+
+ /* grab task list */
+ raw_local_irq_disable();
+ tsk = per_cpu(posix_timer_tasklist, cpu);
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+ raw_local_irq_enable();
+
+ /* its possible the list is empty, just return */
+ if (!tsk) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ continue;
+ }
+
+ /* Process task list */
+ while (1) {
+ /* save next */
+ next = tsk->posix_timer_list;
+
+ /* run the task timers, clear its ptr and
+ * unreference it
+ */
+ __run_posix_cpu_timers(tsk);
+ tsk->posix_timer_list = NULL;
+ put_task_struct(tsk);
+
+ /* check if this is the last on the list */
+ if (next == tsk)
+ break;
+ tsk = next;
+ }
+ }
+ return 0;
+
+wait_to_die:
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static inline int __fastpath_timer_check(struct task_struct *tsk)
+{
+ /* tsk == current, ensure it is safe to use ->signal/sighand */
+ if (unlikely(tsk->exit_state))
+ return 0;
+
+ if (!task_cputime_zero(&tsk->cputime_expires))
+ return 1;
+
+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
+ return 1;
+
+ return 0;
+}
+
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ unsigned long cpu = smp_processor_id();
+ struct task_struct *tasklist;
+
+ BUG_ON(!irqs_disabled());
+ if(!per_cpu(posix_timer_task, cpu))
+ return;
+ /* get per-cpu references */
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
+ /* check to see if we're already queued */
+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
+ get_task_struct(tsk);
+ if (tasklist) {
+ tsk->posix_timer_list = tasklist;
+ } else {
+ /*
+ * The list is terminated by a self-pointing
+ * task_struct
+ */
+ tsk->posix_timer_list = tsk;
+ }
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
+
+ wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+}
+
+/*
+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
+ * Here we can start up the necessary migration thread for the new CPU.
+ */
+static int posix_cpu_thread_call(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ struct task_struct *p;
+ struct sched_param param;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
+ "posixcputmr/%d",cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
+ kthread_bind(p, cpu);
+ /* Must be high prio to avoid getting starved */
+ param.sched_priority = MAX_RT_PRIO-1;
+ sched_setscheduler(p, SCHED_FIFO, &param);
+ per_cpu(posix_timer_task,cpu) = p;
+ break;
+ case CPU_ONLINE:
+ /* Strictly unneccessary, as first user will wake it. */
+ wake_up_process(per_cpu(posix_timer_task,cpu));
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ /* Unbind it from offline cpu so it can run. Fall thru. */
+ kthread_bind(per_cpu(posix_timer_task,cpu),
+ any_online_cpu(cpu_online_map));
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+ case CPU_DEAD:
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+/* Register at highest priority so that task migration (migrate_all_tasks)
+ * happens before everything else.
+ */
+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
+ .notifier_call = posix_cpu_thread_call,
+ .priority = 10
+};
+
+static int __init posix_cpu_thread_init(void)
+{
+ void *hcpu = (void *)(long)smp_processor_id();
+ /* Start one for boot CPU. */
+ unsigned long cpu;
+
+ /* init the per-cpu posix_timer_tasklets */
+ for_each_cpu_mask(cpu, cpu_possible_map)
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
+ register_cpu_notifier(&posix_cpu_thread_notifier);
+ return 0;
+}
+early_initcall(posix_cpu_thread_init);
+#else /* CONFIG_PREEMPT_RT_BASE */
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ __run_posix_cpu_timers(tsk);
+}
+#endif /* CONFIG_PREEMPT_RT_BASE */
+
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
Index: linux-2.6/kernel/trace/ftrace.c
===================================================================
--- linux-2.6.orig/kernel/trace/ftrace.c
@ -1699,7 +1971,7 @@ Index: linux-2.6/kernel/sched.c
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
@@ -4206,6 +4207,126 @@ static inline void schedule_debug(struct
@@ -4182,6 +4183,126 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@ -1826,7 +2098,7 @@ Index: linux-2.6/kernel/sched.c
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
@@ -4265,6 +4386,8 @@ need_resched:
@@ -4241,6 +4362,8 @@ need_resched:
raw_spin_lock_irq(&rq->lock);
@ -1835,7 +2107,7 @@ Index: linux-2.6/kernel/sched.c
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -4272,19 +4395,6 @@ need_resched:
@@ -4248,19 +4371,6 @@ need_resched:
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@ -1855,7 +2127,7 @@ Index: linux-2.6/kernel/sched.c
}
switch_count = &prev->nvcsw;
}
@@ -4318,15 +4428,23 @@ need_resched:
@@ -4294,15 +4404,23 @@ need_resched:
post_schedule(rq);
@ -1881,7 +2153,7 @@ Index: linux-2.6/kernel/sched.c
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
@@ -4335,15 +4453,37 @@ static inline void sched_submit_work(str
@@ -4311,15 +4429,37 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@ -2279,12 +2551,12 @@ Index: linux-2.6/kernel/workqueue.c
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
+
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
- /* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
+
+ worker->sleeping = 1;
+ cpu = smp_processor_id();
@ -2903,14 +3175,14 @@ Index: linux-2.6/kernel/workqueue.c
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
+ gcwq = get_gcwq(get_cpu());
+ gcwq = get_gcwq(get_cpu_light());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
+ spin_unlock_irq(&gcwq->lock);
+ put_cpu();
+ put_cpu_light();
}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
@ -3821,15 +4093,75 @@ Index: linux-2.6/kernel/sched_rt.c
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
@@ -631,6 +634,7 @@ static int sched_rt_runtime_exceeded(str
@@ -553,12 +556,9 @@ static inline int balance_runtime(struct
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
- int i, idle = 1;
+ int i, idle = 1, throttled = 0;
const struct cpumask *span;
- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
- return 1;
-
span = sched_rt_period_mask();
for_each_cpu(i, span) {
int enqueue = 0;
@@ -593,12 +593,17 @@ static int do_sched_rt_period_timer(stru
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
+ if (rt_rq->rt_throttled)
+ throttled = 1;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
raw_spin_unlock(&rq->lock);
}
+ if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
+ return 1;
+
return idle;
}
@@ -630,7 +635,24 @@ static int sched_rt_runtime_exceeded(str
return 0;
if (rt_rq->rt_time > runtime) {
rt_rq->rt_throttled = 1;
+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
- rt_rq->rt_throttled = 1;
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+ /*
+ * Don't actually throttle groups that have no runtime assigned
+ * but accrue some time due to boosting.
+ */
+ if (likely(rt_b->rt_runtime)) {
+ rt_rq->rt_throttled = 1;
+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
+ } else {
+ /*
+ * In case we did anyway, make it go away,
+ * replenishment is a joke, since it will replenish us
+ * with exactly 0 ns.
+ */
+ rt_rq->rt_time = 0;
+ }
+
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
@@ -1186,7 +1190,7 @@ static void deactivate_task(struct rq *r
@@ -658,7 +680,8 @@ static void update_curr_rt(struct rq *rq
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
- schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
+ schedstat_set(curr->se.statistics.exec_max,
+ max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
@@ -1186,7 +1209,7 @@ static void deactivate_task(struct rq *r
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@ -3838,7 +4170,7 @@ Index: linux-2.6/kernel/sched_rt.c
(p->rt.nr_cpus_allowed > 1))
return 1;
return 0;
@@ -1331,7 +1335,7 @@ static struct rq *find_lock_lowest_rq(st
@@ -1331,7 +1354,7 @@ static struct rq *find_lock_lowest_rq(st
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu,
@ -3847,7 +4179,7 @@ Index: linux-2.6/kernel/sched_rt.c
task_running(rq, task) ||
!task->on_rq)) {
@@ -1614,9 +1618,6 @@ static void set_cpus_allowed_rt(struct t
@@ -1614,9 +1637,6 @@ static void set_cpus_allowed_rt(struct t
update_rt_migration(&rq->rt);
}
@ -5428,8 +5760,8 @@ Index: linux-2.6/kernel/printk.c
+ va_start(ap, fmt);
+ early_vprintk(fmt, ap);
+ va_end(ap);
+}
+
}
+/*
+ * This is independent of any log levels - a global
+ * kill switch that turns off all of printk.
@ -5448,8 +5780,8 @@ Index: linux-2.6/kernel/printk.c
+void printk_kill(void)
+{
+ printk_killswitch = 1;
}
+}
+
+static int forced_early_printk(const char *fmt, va_list ap)
+{
+ if (!printk_killswitch)
@ -5536,16 +5868,27 @@ Index: linux-2.6/kernel/printk.c
- if (console_trylock_for_printk(this_cpu))
+ if (console_trylock_for_printk(this_cpu, flags)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
+ console_unlock();
console_unlock();
+#else
+ raw_local_irq_restore(flags);
console_unlock();
+ console_unlock();
+ raw_local_irq_save(flags);
+#endif
+ }
lockdep_on();
out_restore_irqs:
@@ -1213,8 +1282,8 @@ void printk_tick(void)
int printk_needs_cpu(int cpu)
{
- if (cpu_is_offline(cpu))
- printk_tick();
+ if (unlikely(cpu_is_offline(cpu)))
+ __this_cpu_write(printk_pending, 0);
return __this_cpu_read(printk_pending);
}
@@ -1252,18 +1321,23 @@ void console_unlock(void)
console_may_schedule = 0;
@ -5962,278 +6305,6 @@ Index: linux-2.6/include/linux/sched.h
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
Index: linux-2.6/kernel/posix-cpu-timers.c
===================================================================
--- linux-2.6.orig/kernel/posix-cpu-timers.c
+++ linux-2.6/kernel/posix-cpu-timers.c
@@ -280,13 +280,13 @@ void thread_group_cputimer(struct task_s
* it.
*/
thread_group_cputime(tsk, &sum);
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
} else
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
static u32 onecputick;
@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
arm_timer(timer);
spin_unlock(&p->sighand->siglock);
@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
+ unsigned long flags;
- spin_lock(&sig->cputimer.lock);
+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
group_sample = sig->cputimer.cputime;
- spin_unlock(&sig->cputimer.lock);
+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
-void run_posix_cpu_timers(struct task_struct *tsk)
+static void __run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
}
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
+
+static int posix_cpu_timers_thread(void *data)
+{
+ int cpu = (long)data;
+
+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
+
+ while (!kthread_should_stop()) {
+ struct task_struct *tsk = NULL;
+ struct task_struct *next = NULL;
+
+ if (cpu_is_offline(cpu))
+ goto wait_to_die;
+
+ /* grab task list */
+ raw_local_irq_disable();
+ tsk = per_cpu(posix_timer_tasklist, cpu);
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+ raw_local_irq_enable();
+
+ /* its possible the list is empty, just return */
+ if (!tsk) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ continue;
+ }
+
+ /* Process task list */
+ while (1) {
+ /* save next */
+ next = tsk->posix_timer_list;
+
+ /* run the task timers, clear its ptr and
+ * unreference it
+ */
+ __run_posix_cpu_timers(tsk);
+ tsk->posix_timer_list = NULL;
+ put_task_struct(tsk);
+
+ /* check if this is the last on the list */
+ if (next == tsk)
+ break;
+ tsk = next;
+ }
+ }
+ return 0;
+
+wait_to_die:
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static inline int __fastpath_timer_check(struct task_struct *tsk)
+{
+ /* tsk == current, ensure it is safe to use ->signal/sighand */
+ if (unlikely(tsk->exit_state))
+ return 0;
+
+ if (!task_cputime_zero(&tsk->cputime_expires))
+ return 1;
+
+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
+ return 1;
+
+ return 0;
+}
+
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ unsigned long cpu = smp_processor_id();
+ struct task_struct *tasklist;
+
+ BUG_ON(!irqs_disabled());
+ if(!per_cpu(posix_timer_task, cpu))
+ return;
+ /* get per-cpu references */
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
+ /* check to see if we're already queued */
+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
+ get_task_struct(tsk);
+ if (tasklist) {
+ tsk->posix_timer_list = tasklist;
+ } else {
+ /*
+ * The list is terminated by a self-pointing
+ * task_struct
+ */
+ tsk->posix_timer_list = tsk;
+ }
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
+
+ wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+}
+
+/*
+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
+ * Here we can start up the necessary migration thread for the new CPU.
+ */
+static int posix_cpu_thread_call(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ struct task_struct *p;
+ struct sched_param param;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
+ "posixcputmr/%d",cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
+ kthread_bind(p, cpu);
+ /* Must be high prio to avoid getting starved */
+ param.sched_priority = MAX_RT_PRIO-1;
+ sched_setscheduler(p, SCHED_FIFO, &param);
+ per_cpu(posix_timer_task,cpu) = p;
+ break;
+ case CPU_ONLINE:
+ /* Strictly unneccessary, as first user will wake it. */
+ wake_up_process(per_cpu(posix_timer_task,cpu));
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ /* Unbind it from offline cpu so it can run. Fall thru. */
+ kthread_bind(per_cpu(posix_timer_task,cpu),
+ any_online_cpu(cpu_online_map));
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+ case CPU_DEAD:
+ kthread_stop(per_cpu(posix_timer_task,cpu));
+ per_cpu(posix_timer_task,cpu) = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+/* Register at highest priority so that task migration (migrate_all_tasks)
+ * happens before everything else.
+ */
+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
+ .notifier_call = posix_cpu_thread_call,
+ .priority = 10
+};
+
+static int __init posix_cpu_thread_init(void)
+{
+ void *hcpu = (void *)(long)smp_processor_id();
+ /* Start one for boot CPU. */
+ unsigned long cpu;
+
+ /* init the per-cpu posix_timer_tasklets */
+ for_each_cpu_mask(cpu, cpu_possible_map)
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
+ register_cpu_notifier(&posix_cpu_thread_notifier);
+ return 0;
+}
+early_initcall(posix_cpu_thread_init);
+#else /* CONFIG_PREEMPT_RT_BASE */
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+ __run_posix_cpu_timers(tsk);
+}
+#endif /* CONFIG_PREEMPT_RT_BASE */
+
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
Index: linux-2.6/kernel/sched_stats.h
===================================================================
--- linux-2.6.orig/kernel/sched_stats.h
@ -22908,7 +22979,7 @@ Index: linux-2.6/kernel/cpu.c
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);

View File

@ -1 +1 @@
+ features/all/rt/patch-3.0.6-rt18.patch featureset=rt
+ features/all/rt/patch-3.0.7-rt20.patch featureset=rt