From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 Subject: hrtimers: prepare full preemption Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.10-rt7.tar.xz Make cancellation of a running callback in softirq context safe against preemption. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/hrtimer.h | 10 ++++++++++ kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++- kernel/itimer.c | 1 + kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -193,6 +193,9 @@ struct hrtimer_cpu_base { unsigned long nr_hangs; ktime_t max_hang_time; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + wait_queue_head_t wait; +#endif struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; }; @@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } +/* Softirq preemption could deadlock timer removal */ +#ifdef CONFIG_PREEMPT_RT_BASE + extern void hrtimer_wait_for_timer(const struct hrtimer *timer); +#else +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) +#endif + /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -872,6 +872,32 @@ u64 hrtimer_forward(struct hrtimer *time } EXPORT_SYMBOL_GPL(hrtimer_forward); +#ifdef CONFIG_PREEMPT_RT_BASE +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) + wait_event(base->cpu_base->wait, + !(timer->state & HRTIMER_STATE_CALLBACK)); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -1124,7 +1150,7 @@ int hrtimer_cancel(struct hrtimer *timer if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1534,6 +1560,8 @@ void hrtimer_run_queues(void) } raw_spin_unlock(&cpu_base->lock); } + + wake_up_timer_waiters(cpu_base); } /* @@ -1694,6 +1722,9 @@ static void init_hrtimers_cpu(int cpu) } hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_RT_BASE + init_waitqueue_head(&cpu_base->wait); +#endif } #ifdef CONFIG_HOTPLUG_CPU --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ return overrun; } +/* + * Protected by RCU! + */ +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + if (kc->timer_set == common_timer_set) + hrtimer_wait_for_timer(&timr->it.real.timer); + else + /* FIXME: Whacky hack for posix-cpu-timers */ + schedule_timeout(1); +#endif +} + /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int @@ -895,6 +909,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, if (!timr) return -EINVAL; + rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; @@ -903,9 +918,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, unlock_timer(timr, flag); if (error == TIMER_RETRY) { + timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... + rcu_read_unlock(); goto retry; } + rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) @@ -943,10 +961,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t if (!timer) return -EINVAL; + rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } + rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -972,8 +995,18 @@ static void itimer_delete(struct k_itime retry_delete: spin_lock_irqsave(&timer->it_lock, flags); + /* On RT we can race with a deletion */ + if (!timer->it_signal) { + unlock_timer(timer, flags); + return; + } + if (timer_delete_hook(timer) == TIMER_RETRY) { + rcu_read_lock(); unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } list_del(&timer->list);