linux/debian/patches/features/all/rt/0027-hrtimer-Prepare-handli...

118 lines
4.0 KiB
Diff

From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Wed, 20 Dec 2017 17:13:16 +0100
Subject: [PATCH 27/29] hrtimer: Prepare handling of hard and softirq based
hrtimers
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.15-rt11.tar.xz
The softirq based hrtimer can utilize most of the existing hrtimers
functions, but need to operate on a different data set.
Add an active_mask argument to various functions so the hard and soft bases
can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
mask.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/time/hrtimer.c | 38 +++++++++++++++++++++++++++++---------
1 file changed, 29 insertions(+), 9 deletions(-)
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -60,6 +60,15 @@
#include "tick-internal.h"
/*
+ * Masks for selecting the soft and hard context timers from
+ * cpu_base->active
+ */
+#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
+#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
+#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
+/*
* The timer bases:
*
* There are more clockids than hrtimer bases. Thus, we index
@@ -508,13 +517,24 @@ static ktime_t __hrtimer_next_event_base
return expires_next;
}
-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+/*
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ *
+ * @active_mask must be one of:
+ * - HRTIMER_ACTIVE,
+ * - HRTIMER_ACTIVE_SOFT, or
+ * - HRTIMER_ACTIVE_HARD.
+ */
+static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
+ unsigned int active_mask)
{
- unsigned int active = cpu_base->active_bases;
+ unsigned int active;
ktime_t expires_next = KTIME_MAX;
cpu_base->next_timer = NULL;
+ active = cpu_base->active_bases & active_mask;
expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
return expires_next;
@@ -555,7 +575,7 @@ hrtimer_force_reprogram(struct hrtimer_c
{
ktime_t expires_next;
- expires_next = __hrtimer_get_next_event(cpu_base);
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
if (skip_equal && expires_next == cpu_base->expires_next)
return;
@@ -1078,7 +1098,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
- expires = __hrtimer_get_next_event(cpu_base);
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@@ -1252,10 +1272,10 @@ static void __run_hrtimer(struct hrtimer
}
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
- unsigned long flags)
+ unsigned long flags, unsigned int active_mask)
{
struct hrtimer_clock_base *base;
- unsigned int active = cpu_base->active_bases;
+ unsigned int active = cpu_base->active_bases & active_mask;
for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node;
@@ -1318,10 +1338,10 @@ void hrtimer_interrupt(struct clock_even
*/
cpu_base->expires_next = KTIME_MAX;
- __hrtimer_run_queues(cpu_base, now, flags);
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
/*
* Store the new expiry value so the migration code can verify
* against it.
@@ -1425,7 +1445,7 @@ void hrtimer_run_queues(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now, flags);
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}