382 lines
12 KiB
Diff
382 lines
12 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sat, 25 Jul 2009 19:43:27 +0200
|
|
Subject: timekeeping: Convert xtime_lock to raw_seqlock
|
|
|
|
Convert xtime_lock to raw_seqlock and fix up all users.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
|
arch/ia64/kernel/time.c | 6 +++---
|
|
arch/x86/include/asm/vgtod.h | 2 +-
|
|
arch/x86/kernel/vsyscall_64.c | 10 +++++-----
|
|
kernel/time/ntp.c | 16 ++++++++--------
|
|
kernel/time/tick-common.c | 4 ++--
|
|
kernel/time/tick-internal.h | 2 +-
|
|
kernel/time/tick-sched.c | 8 ++++----
|
|
kernel/time/timekeeping.c | 31 +++++++++++++++----------------
|
|
8 files changed, 39 insertions(+), 40 deletions(-)
|
|
|
|
Index: linux-3.2/arch/ia64/kernel/time.c
|
|
===================================================================
|
|
--- linux-3.2.orig/arch/ia64/kernel/time.c
|
|
+++ linux-3.2/arch/ia64/kernel/time.c
|
|
@@ -36,7 +36,7 @@
|
|
static cycle_t itc_get_cycles(struct clocksource *cs);
|
|
|
|
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
|
|
- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
|
|
+ .lock = __RAW_SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
|
|
};
|
|
|
|
struct itc_jitter_data_t itc_jitter_data;
|
|
@@ -462,7 +462,7 @@ void update_vsyscall(struct timespec *wa
|
|
{
|
|
unsigned long flags;
|
|
|
|
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
|
|
+ raw_write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
|
|
|
|
/* copy fsyscall clock data */
|
|
fsyscall_gtod_data.clk_mask = c->mask;
|
|
@@ -485,6 +485,6 @@ void update_vsyscall(struct timespec *wa
|
|
fsyscall_gtod_data.monotonic_time.tv_sec++;
|
|
}
|
|
|
|
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
|
|
}
|
|
|
|
Index: linux-3.2/arch/x86/include/asm/vgtod.h
|
|
===================================================================
|
|
--- linux-3.2.orig/arch/x86/include/asm/vgtod.h
|
|
+++ linux-3.2/arch/x86/include/asm/vgtod.h
|
|
@@ -5,7 +5,7 @@
|
|
#include <linux/clocksource.h>
|
|
|
|
struct vsyscall_gtod_data {
|
|
- seqlock_t lock;
|
|
+ raw_seqlock_t lock;
|
|
|
|
/* open coded 'struct timespec' */
|
|
time_t wall_time_sec;
|
|
Index: linux-3.2/arch/x86/kernel/vsyscall_64.c
|
|
===================================================================
|
|
--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c
|
|
+++ linux-3.2/arch/x86/kernel/vsyscall_64.c
|
|
@@ -54,7 +54,7 @@
|
|
DEFINE_VVAR(int, vgetcpu_mode);
|
|
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
|
|
{
|
|
- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
|
+ .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
|
};
|
|
|
|
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
|
|
@@ -82,10 +82,10 @@ void update_vsyscall_tz(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
|
|
+ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
|
|
/* sys_tz has changed */
|
|
vsyscall_gtod_data.sys_tz = sys_tz;
|
|
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
|
|
}
|
|
|
|
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
|
|
@@ -93,7 +93,7 @@ void update_vsyscall(struct timespec *wa
|
|
{
|
|
unsigned long flags;
|
|
|
|
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
|
|
+ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
|
|
|
|
/* copy vsyscall data */
|
|
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
|
|
@@ -106,7 +106,7 @@ void update_vsyscall(struct timespec *wa
|
|
vsyscall_gtod_data.wall_to_monotonic = *wtm;
|
|
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
|
|
|
|
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
|
|
}
|
|
|
|
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
|
|
Index: linux-3.2/kernel/time/ntp.c
|
|
===================================================================
|
|
--- linux-3.2.orig/kernel/time/ntp.c
|
|
+++ linux-3.2/kernel/time/ntp.c
|
|
@@ -358,7 +358,7 @@ static enum hrtimer_restart ntp_leap_sec
|
|
{
|
|
enum hrtimer_restart res = HRTIMER_NORESTART;
|
|
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_write_seqlock(&xtime_lock);
|
|
|
|
switch (time_state) {
|
|
case TIME_OK:
|
|
@@ -388,7 +388,7 @@ static enum hrtimer_restart ntp_leap_sec
|
|
break;
|
|
}
|
|
|
|
- write_sequnlock(&xtime_lock);
|
|
+ raw_write_sequnlock(&xtime_lock);
|
|
|
|
return res;
|
|
}
|
|
@@ -663,7 +663,7 @@ int do_adjtimex(struct timex *txc)
|
|
|
|
getnstimeofday(&ts);
|
|
|
|
- write_seqlock_irq(&xtime_lock);
|
|
+ raw_write_seqlock_irq(&xtime_lock);
|
|
|
|
if (txc->modes & ADJ_ADJTIME) {
|
|
long save_adjust = time_adjust;
|
|
@@ -705,7 +705,7 @@ int do_adjtimex(struct timex *txc)
|
|
/* fill PPS status fields */
|
|
pps_fill_timex(txc);
|
|
|
|
- write_sequnlock_irq(&xtime_lock);
|
|
+ raw_write_sequnlock_irq(&xtime_lock);
|
|
|
|
txc->time.tv_sec = ts.tv_sec;
|
|
txc->time.tv_usec = ts.tv_nsec;
|
|
@@ -903,7 +903,7 @@ void hardpps(const struct timespec *phas
|
|
|
|
pts_norm = pps_normalize_ts(*phase_ts);
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
|
/* clear the error bits, they will be set again if needed */
|
|
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
|
|
@@ -916,7 +916,7 @@ void hardpps(const struct timespec *phas
|
|
* just start the frequency interval */
|
|
if (unlikely(pps_fbase.tv_sec == 0)) {
|
|
pps_fbase = *raw_ts;
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
return;
|
|
}
|
|
|
|
@@ -931,7 +931,7 @@ void hardpps(const struct timespec *phas
|
|
time_status |= STA_PPSJITTER;
|
|
/* restart the frequency calibration interval */
|
|
pps_fbase = *raw_ts;
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
pr_err("hardpps: PPSJITTER: bad pulse\n");
|
|
return;
|
|
}
|
|
@@ -948,7 +948,7 @@ void hardpps(const struct timespec *phas
|
|
|
|
hardpps_update_phase(pts_norm.nsec);
|
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL(hardpps);
|
|
|
|
Index: linux-3.2/kernel/time/tick-common.c
|
|
===================================================================
|
|
--- linux-3.2.orig/kernel/time/tick-common.c
|
|
+++ linux-3.2/kernel/time/tick-common.c
|
|
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
|
|
static void tick_periodic(int cpu)
|
|
{
|
|
if (tick_do_timer_cpu == cpu) {
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_write_seqlock(&xtime_lock);
|
|
|
|
/* Keep track of the next tick event */
|
|
tick_next_period = ktime_add(tick_next_period, tick_period);
|
|
|
|
do_timer(1);
|
|
- write_sequnlock(&xtime_lock);
|
|
+ raw_write_sequnlock(&xtime_lock);
|
|
}
|
|
|
|
update_process_times(user_mode(get_irq_regs()));
|
|
Index: linux-3.2/kernel/time/tick-internal.h
|
|
===================================================================
|
|
--- linux-3.2.orig/kernel/time/tick-internal.h
|
|
+++ linux-3.2/kernel/time/tick-internal.h
|
|
@@ -141,4 +141,4 @@ static inline int tick_device_is_functio
|
|
#endif
|
|
|
|
extern void do_timer(unsigned long ticks);
|
|
-extern seqlock_t xtime_lock;
|
|
+extern raw_seqlock_t xtime_lock;
|
|
Index: linux-3.2/kernel/time/tick-sched.c
|
|
===================================================================
|
|
--- linux-3.2.orig/kernel/time/tick-sched.c
|
|
+++ linux-3.2/kernel/time/tick-sched.c
|
|
@@ -56,7 +56,7 @@ static void tick_do_update_jiffies64(kti
|
|
return;
|
|
|
|
/* Reevalute with xtime_lock held */
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_write_seqlock(&xtime_lock);
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
|
if (delta.tv64 >= tick_period.tv64) {
|
|
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(kti
|
|
/* Keep the tick_next_period variable up to date */
|
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
|
}
|
|
- write_sequnlock(&xtime_lock);
|
|
+ raw_write_sequnlock(&xtime_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(vo
|
|
{
|
|
ktime_t period;
|
|
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_write_seqlock(&xtime_lock);
|
|
/* Did we start the jiffies update yet ? */
|
|
if (last_jiffies_update.tv64 == 0)
|
|
last_jiffies_update = tick_next_period;
|
|
period = last_jiffies_update;
|
|
- write_sequnlock(&xtime_lock);
|
|
+ raw_write_sequnlock(&xtime_lock);
|
|
return period;
|
|
}
|
|
|
|
Index: linux-3.2/kernel/time/timekeeping.c
|
|
===================================================================
|
|
--- linux-3.2.orig/kernel/time/timekeeping.c
|
|
+++ linux-3.2/kernel/time/timekeeping.c
|
|
@@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw
|
|
* This read-write spinlock protects us from races in SMP while
|
|
* playing with xtime.
|
|
*/
|
|
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
|
|
-
|
|
+__cacheline_aligned_in_smp DEFINE_RAW_SEQLOCK(xtime_lock);
|
|
|
|
/*
|
|
* The current time
|
|
@@ -365,7 +364,7 @@ int do_settimeofday(const struct timespe
|
|
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
|
|
return -EINVAL;
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
|
timekeeping_forward_now();
|
|
|
|
@@ -381,7 +380,7 @@ int do_settimeofday(const struct timespe
|
|
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
|
|
timekeeper.mult);
|
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -405,7 +404,7 @@ int timekeeping_inject_offset(struct tim
|
|
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
|
return -EINVAL;
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
|
timekeeping_forward_now();
|
|
|
|
@@ -418,7 +417,7 @@ int timekeeping_inject_offset(struct tim
|
|
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
|
|
timekeeper.mult);
|
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -572,7 +571,7 @@ void __init timekeeping_init(void)
|
|
read_persistent_clock(&now);
|
|
read_boot_clock(&boot);
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
|
ntp_init();
|
|
|
|
@@ -593,7 +592,7 @@ void __init timekeeping_init(void)
|
|
-boot.tv_sec, -boot.tv_nsec);
|
|
total_sleep_time.tv_sec = 0;
|
|
total_sleep_time.tv_nsec = 0;
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
}
|
|
|
|
/* time in seconds when suspend began */
|
|
@@ -640,7 +639,7 @@ void timekeeping_inject_sleeptime(struct
|
|
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
|
|
return;
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
timekeeping_forward_now();
|
|
|
|
__timekeeping_inject_sleeptime(delta);
|
|
@@ -650,7 +649,7 @@ void timekeeping_inject_sleeptime(struct
|
|
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
|
|
timekeeper.mult);
|
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
|
/* signal hrtimers about time change */
|
|
clock_was_set();
|
|
@@ -673,7 +672,7 @@ static void timekeeping_resume(void)
|
|
|
|
clocksource_resume();
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
|
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
|
|
ts = timespec_sub(ts, timekeeping_suspend_time);
|
|
@@ -683,7 +682,7 @@ static void timekeeping_resume(void)
|
|
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
|
|
timekeeper.ntp_error = 0;
|
|
timekeeping_suspended = 0;
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
|
touch_softlockup_watchdog();
|
|
|
|
@@ -701,7 +700,7 @@ static int timekeeping_suspend(void)
|
|
|
|
read_persistent_clock(&timekeeping_suspend_time);
|
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
|
|
timekeeping_forward_now();
|
|
timekeeping_suspended = 1;
|
|
|
|
@@ -724,7 +723,7 @@ static int timekeeping_suspend(void)
|
|
timekeeping_suspend_time =
|
|
timespec_add(timekeeping_suspend_time, delta_delta);
|
|
}
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
|
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
|
|
clocksource_suspend();
|
|
@@ -1239,7 +1238,7 @@ ktime_t ktime_get_monotonic_offset(void)
|
|
*/
|
|
void xtime_update(unsigned long ticks)
|
|
{
|
|
- write_seqlock(&xtime_lock);
|
|
+ raw_write_seqlock(&xtime_lock);
|
|
do_timer(ticks);
|
|
- write_sequnlock(&xtime_lock);
|
|
+ raw_write_sequnlock(&xtime_lock);
|
|
}
|