[x86] Update rt featureset to 3.2.9-rt15

svn path=/dists/sid/linux-2.6/; revision=18782
This commit is contained in:
Uwe Kleine-König 2012-03-04 13:31:34 +00:00
parent 6044a51c1b
commit 6b60b6faa5
58 changed files with 5046 additions and 1470 deletions

3
debian/changelog vendored
View File

@ -37,6 +37,9 @@ linux-2.6 (3.2.9-1) UNRELEASED; urgency=low
* gspca: Enable USB_GSPCA_TOPRO as module
* dvb-usb: Enable DVB_USB_PCTV452E, DVB_USB_MXL111SF as modules
[ Uwe Kleine-König ]
* [x86] Update rt featureset to 3.2.9-rt15
-- Bastian Blank <waldi@debian.org> Thu, 01 Mar 2012 11:47:17 +0100
linux-2.6 (3.2.7-1) unstable; urgency=low

View File

@ -1,72 +0,0 @@
Subject: acpi-gpe-use-wait-simple.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 13 Dec 2011 17:14:35 +0100
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/acpi/ec.c | 8 ++++----
drivers/acpi/internal.h | 4 +++-
2 files changed, 7 insertions(+), 5 deletions(-)
Index: linux-3.2/drivers/acpi/ec.c
===================================================================
--- linux-3.2.orig/drivers/acpi/ec.c
+++ linux-3.2/drivers/acpi/ec.c
@@ -222,7 +222,7 @@ static int ec_poll(struct acpi_ec *ec)
if (ec_transaction_done(ec))
return 0;
} else {
- if (wait_event_timeout(ec->wait,
+ if (swait_event_timeout(ec->wait,
ec_transaction_done(ec),
msecs_to_jiffies(1)))
return 0;
@@ -272,7 +272,7 @@ static int ec_wait_ibf0(struct acpi_ec *
unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
/* interrupt wait manually if GPE mode is not active */
while (time_before(jiffies, delay))
- if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+ if (swait_event_timeout(ec->wait, ec_check_ibf0(ec),
msecs_to_jiffies(1)))
return 0;
return -ETIME;
@@ -612,7 +612,7 @@ static u32 acpi_ec_gpe_handler(acpi_hand
advance_transaction(ec, acpi_ec_read_status(ec));
if (ec_transaction_done(ec) &&
(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
- wake_up(&ec->wait);
+ swait_wake(&ec->wait);
ec_check_sci(ec, acpi_ec_read_status(ec));
}
return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
@@ -676,7 +676,7 @@ static struct acpi_ec *make_acpi_ec(void
return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
mutex_init(&ec->lock);
- init_waitqueue_head(&ec->wait);
+ init_swait_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
raw_spin_lock_init(&ec->curr_lock);
return ec;
Index: linux-3.2/drivers/acpi/internal.h
===================================================================
--- linux-3.2.orig/drivers/acpi/internal.h
+++ linux-3.2/drivers/acpi/internal.h
@@ -23,6 +23,8 @@
#define PREFIX "ACPI: "
+#include <linux/wait-simple.h>
+
int init_acpi_device_notify(void);
int acpi_scan_init(void);
int acpi_sysfs_init(void);
@@ -59,7 +61,7 @@ struct acpi_ec {
unsigned long global_lock;
unsigned long flags;
struct mutex lock;
- wait_queue_head_t wait;
+ struct swait_head wait;
struct list_head list;
struct transaction *curr;
raw_spinlock_t curr_lock;

View File

@ -1,109 +0,0 @@
From: Clark Williams <williams@redhat.com>
Date: Sat Dec 3 09:15:46 2011 -0600
Subject: ACPI: Convert embedded controller lock to raw spinlock
Was seeing multiple "scheduling while atomic" backtraces on the
3.2-rc2-rt5 realtime kernel. This patch converts the spinlock in
the ACPI embedded controller structure (curr_lock) to be a raw
spinlock.
Signed-off-by: Clark Williams <williams@redhat.com>
Link: http://lkml.kernel.org/r/20111203093537.7d805f64@redhat.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
drivers/acpi/ec.c | 22 +++++++++++-----------
drivers/acpi/internal.h | 2 +-
2 files changed, 12 insertions(+), 12 deletions(-)
Index: linux-3.2/drivers/acpi/ec.c
===================================================================
--- linux-3.2.orig/drivers/acpi/ec.c
+++ linux-3.2/drivers/acpi/ec.c
@@ -152,10 +152,10 @@ static int ec_transaction_done(struct ac
{
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ec->curr_lock, flags);
+ raw_spin_lock_irqsave(&ec->curr_lock, flags);
if (!ec->curr || ec->curr->done)
ret = 1;
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
return ret;
}
@@ -169,7 +169,7 @@ static void start_transaction(struct acp
static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
- spin_lock_irqsave(&ec->curr_lock, flags);
+ raw_spin_lock_irqsave(&ec->curr_lock, flags);
if (!ec->curr)
goto unlock;
if (ec->curr->wlen > ec->curr->wi) {
@@ -194,7 +194,7 @@ err:
if (in_interrupt())
++ec->curr->irq_count;
unlock:
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
}
static int acpi_ec_sync_query(struct acpi_ec *ec);
@@ -232,9 +232,9 @@ static int ec_poll(struct acpi_ec *ec)
if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
break;
pr_debug(PREFIX "controller reset, restart transaction\n");
- spin_lock_irqsave(&ec->curr_lock, flags);
+ raw_spin_lock_irqsave(&ec->curr_lock, flags);
start_transaction(ec);
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ raw_spin_unlock_irqrestore(&ec->curr_lock, flags);
}
return -ETIME;
}
@@ -247,17 +247,17 @@ static int acpi_ec_transaction_unlocked(
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ raw_spin_lock_irqsave(&ec->curr_lock, tmp);
/* following two actions should be kept atomic */
ec->curr = t;
start_transaction(ec);
if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ raw_spin_unlock_irqrestore(&ec->curr_lock, tmp);
ret = ec_poll(ec);
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ raw_spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ raw_spin_unlock_irqrestore(&ec->curr_lock, tmp);
return ret;
}
@@ -678,7 +678,7 @@ static struct acpi_ec *make_acpi_ec(void
mutex_init(&ec->lock);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
- spin_lock_init(&ec->curr_lock);
+ raw_spin_lock_init(&ec->curr_lock);
return ec;
}
Index: linux-3.2/drivers/acpi/internal.h
===================================================================
--- linux-3.2.orig/drivers/acpi/internal.h
+++ linux-3.2/drivers/acpi/internal.h
@@ -62,7 +62,7 @@ struct acpi_ec {
wait_queue_head_t wait;
struct list_head list;
struct transaction *curr;
- spinlock_t curr_lock;
+ raw_spinlock_t curr_lock;
};
extern struct acpi_ec *first_ec;

View File

@ -1,406 +0,0 @@
Subject: acpi: Make gbl_[hardware|gpe]_lock raw
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 Nov 2011 17:09:54 +0100
These locks are taken in the guts of the idle code and cannot be
converted to "sleeping" spinlocks on RT
Cc: stable-rt@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/acpi/acpica/acglobal.h | 4 ++--
drivers/acpi/acpica/evgpe.c | 4 ++--
drivers/acpi/acpica/evgpeblk.c | 8 ++++----
drivers/acpi/acpica/evgpeutil.c | 12 ++++++------
drivers/acpi/acpica/evxface.c | 10 +++++-----
drivers/acpi/acpica/evxfgpe.c | 24 ++++++++++++------------
drivers/acpi/acpica/hwregs.c | 4 ++--
drivers/acpi/acpica/hwxface.c | 4 ++--
drivers/acpi/acpica/utmutex.c | 21 +++------------------
9 files changed, 38 insertions(+), 53 deletions(-)
Index: linux-3.2/drivers/acpi/acpica/acglobal.h
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/acglobal.h
+++ linux-3.2/drivers/acpi/acpica/acglobal.h
@@ -235,8 +235,8 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+extern raw_spinlock_t acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+extern raw_spinlock_t acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
Index: linux-3.2/drivers/acpi/acpica/evgpe.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpe.c
+++ linux-3.2/drivers/acpi/acpica/evgpe.c
@@ -357,7 +357,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_x
* Note: Not necessary to obtain the hardware lock, since the GPE
* registers are owned by the gpe_lock.
*/
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Examine all GPE blocks attached to this interrupt level */
@@ -440,7 +440,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_x
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return (int_status);
}
Index: linux-3.2/drivers/acpi/acpica/evgpeblk.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpeblk.c
+++ linux-3.2/drivers/acpi/acpica/evgpeblk.c
@@ -95,7 +95,7 @@ acpi_ev_install_gpe_block(struct acpi_gp
/* Install the new block at the end of the list with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_xrupt_block->gpe_block_list_head) {
next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
while (next_gpe_block->next) {
@@ -109,7 +109,7 @@ acpi_ev_install_gpe_block(struct acpi_gp
}
gpe_block->xrupt_block = gpe_xrupt_block;
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -156,7 +156,7 @@ acpi_status acpi_ev_delete_gpe_block(str
} else {
/* Remove the block on this interrupt with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_block->previous) {
gpe_block->previous->next = gpe_block->next;
} else {
@@ -167,7 +167,7 @@ acpi_status acpi_ev_delete_gpe_block(str
if (gpe_block->next) {
gpe_block->next->previous = gpe_block->previous;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
}
acpi_current_gpe_count -= gpe_block->gpe_count;
Index: linux-3.2/drivers/acpi/acpica/evgpeutil.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpeutil.c
+++ linux-3.2/drivers/acpi/acpica/evgpeutil.c
@@ -70,7 +70,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Walk the interrupt level descriptor list */
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback
}
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -237,7 +237,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_
/* Install new interrupt descriptor with spin lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (acpi_gbl_gpe_xrupt_list_head) {
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt->next) {
@@ -249,7 +249,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
@@ -306,7 +306,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(str
/* Unlink the interrupt block with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
} else {
@@ -318,7 +318,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(str
if (gpe_xrupt->next) {
gpe_xrupt->next->previous = gpe_xrupt->previous;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
/* Free the block */
Index: linux-3.2/drivers/acpi/acpica/evxface.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evxface.c
+++ linux-3.2/drivers/acpi/acpica/evxface.c
@@ -751,7 +751,7 @@ acpi_install_gpe_handler(acpi_handle gpe
goto unlock_and_exit;
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -799,14 +799,14 @@ acpi_install_gpe_handler(acpi_handle gpe
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
free_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
ACPI_FREE(handler);
goto unlock_and_exit;
}
@@ -853,7 +853,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_
return_ACPI_STATUS(status);
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -904,7 +904,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_
ACPI_FREE(handler);
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
Index: linux-3.2/drivers/acpi/acpica/evxfgpe.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evxfgpe.c
+++ linux-3.2/drivers/acpi/acpica/evxfgpe.c
@@ -122,7 +122,7 @@ acpi_status acpi_enable_gpe(acpi_handle
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -131,7 +131,7 @@ acpi_status acpi_enable_gpe(acpi_handle
status = acpi_ev_add_gpe_reference(gpe_event_info);
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
@@ -159,7 +159,7 @@ acpi_status acpi_disable_gpe(acpi_handle
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -168,7 +168,7 @@ acpi_status acpi_disable_gpe(acpi_handle
status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
@@ -215,7 +215,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -271,7 +271,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake
status = AE_OK;
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
@@ -301,7 +301,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_
ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/*
* Ensure that we have a valid GPE number and that this GPE is in
@@ -347,7 +347,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_
}
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -373,7 +373,7 @@ acpi_status acpi_clear_gpe(acpi_handle g
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -386,7 +386,7 @@ acpi_status acpi_clear_gpe(acpi_handle g
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -416,7 +416,7 @@ acpi_get_gpe_status(acpi_handle gpe_devi
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -434,7 +434,7 @@ acpi_get_gpe_status(acpi_handle gpe_devi
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
Index: linux-3.2/drivers/acpi/acpica/hwregs.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/hwregs.c
+++ linux-3.2/drivers/acpi/acpica/hwregs.c
@@ -263,14 +263,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
/* Clear the fixed events in PM1 A/B */
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
if (ACPI_FAILURE(status))
goto exit;
Index: linux-3.2/drivers/acpi/acpica/hwxface.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/hwxface.c
+++ linux-3.2/drivers/acpi/acpica/hwxface.c
@@ -387,7 +387,7 @@ acpi_status acpi_write_bit_register(u32
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
/*
* At this point, we know that the parent register is one of the
@@ -448,7 +448,7 @@ acpi_status acpi_write_bit_register(u32
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
Index: linux-3.2/drivers/acpi/acpica/utmutex.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/utmutex.c
+++ linux-3.2/drivers/acpi/acpica/utmutex.c
@@ -52,6 +52,9 @@ static acpi_status acpi_ut_create_mutex(
static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
+DEFINE_RAW_SPINLOCK(acpi_gbl_gpe_lock);
+DEFINE_RAW_SPINLOCK(acpi_gbl_hardware_lock);
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_initialize
@@ -81,18 +84,6 @@ acpi_status acpi_ut_mutex_initialize(voi
}
}
- /* Create the spinlocks for use at interrupt level */
-
- status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
-
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
-
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
if (ACPI_FAILURE(status)) {
@@ -132,13 +123,7 @@ void acpi_ut_mutex_terminate(void)
acpi_os_delete_mutex(acpi_gbl_osi_mutex);
- /* Delete the spinlocks */
-
- acpi_os_delete_lock(acpi_gbl_gpe_lock);
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
-
/* Delete the reader/writer lock */
-
acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock);
return_VOID;
}

View File

@ -11,7 +11,7 @@ Index: linux-3.2/arch/arm/kernel/signal.c
===================================================================
--- linux-3.2.orig/arch/arm/kernel/signal.c
+++ linux-3.2/arch/arm/kernel/signal.c
@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
@@ -672,6 +672,9 @@ static void do_signal(struct pt_regs *re
if (!user_mode(regs))
return;

View File

@ -11,7 +11,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -2595,7 +2595,7 @@ extern int _cond_resched(void);
@@ -2599,7 +2599,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);

View File

@ -12,7 +12,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -2598,12 +2598,16 @@ extern int __cond_resched_lock(spinlock_
@@ -2602,12 +2602,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})

View File

@ -0,0 +1,115 @@
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 02 Mar 2012 10:36:57 -0500
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fa40834..c25b5ff 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* Makes the lock keep the task's state */
+ spinlock_t lock;
+#else
struct mutex lock; /* Synchronizes accesses to refcount, */
+#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -58,6 +63,14 @@ static struct {
.refcount = 0,
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock() spin_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+# define hotplug_lock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
struct hotplug_pcp {
struct task_struct *unplug;
int refcount;
@@ -87,8 +100,8 @@ retry:
return;
}
preempt_enable();
- mutex_lock(&cpu_hotplug.lock);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_lock();
+ hotplug_unlock();
preempt_disable();
goto retry;
}
@@ -161,9 +174,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
cpu_hotplug.refcount++;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -172,10 +185,10 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -207,11 +220,11 @@ static void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
schedule();
}
}
@@ -219,7 +232,7 @@ static void cpu_hotplug_begin(void)
static void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
#else /* #if CONFIG_HOTPLUG_CPU */

View File

@ -11,7 +11,7 @@ Index: linux-3.2/fs/eventpoll.c
===================================================================
--- linux-3.2.orig/fs/eventpoll.c
+++ linux-3.2/fs/eventpoll.c
@@ -438,12 +438,12 @@ static int ep_poll_wakeup_proc(void *pri
@@ -464,12 +464,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
@ -25,4 +25,4 @@ Index: linux-3.2/fs/eventpoll.c
+ put_cpu_light();
}
/*
static void ep_remove_wait_queue(struct eppoll_entry *pwq)

View File

@ -13,7 +13,7 @@ Index: linux-3.2/mm/filemap.c
===================================================================
--- linux-3.2.orig/mm/filemap.c
+++ linux-3.2/mm/filemap.c
@@ -2058,7 +2058,7 @@ size_t iov_iter_copy_from_user_atomic(st
@@ -2044,7 +2044,7 @@ size_t iov_iter_copy_from_user_atomic(st
char *kaddr;
size_t copied;

View File

@ -18,10 +18,14 @@ Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Index: linux-rt.git/arch/x86/kernel/traps.c
---
arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
1 file changed, 23 insertions(+), 9 deletions(-)
Index: linux-3.2/arch/x86/kernel/traps.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/traps.c
+++ linux-rt.git/arch/x86/kernel/traps.c
--- linux-3.2.orig/arch/x86/kernel/traps.c
+++ linux-3.2/arch/x86/kernel/traps.c
@@ -87,9 +87,21 @@ static inline void conditional_sti(struc
local_irq_enable();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
Subject: fs: Protect open coded isize seqcount
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Mar 2012 16:12:47 +0100
A writer might be preempted in the write side critical section on
RT. Disable preemption to avoid endless spinning of a preempting
reader.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/fs.h | 2 ++
1 file changed, 2 insertions(+)
Index: linux-3.2/include/linux/fs.h
===================================================================
--- linux-3.2.orig/include/linux/fs.h
+++ linux-3.2/include/linux/fs.h
@@ -903,9 +903,11 @@ static inline loff_t i_size_read(const s
static inline void i_size_write(struct inode *inode, loff_t i_size)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable_rt();
write_seqcount_begin(&inode->i_size_seqcount);
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
+ preempt_enable_rt();
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
preempt_disable();
inode->i_size = i_size;

View File

@ -0,0 +1,339 @@
Subject: fs: fs_struct use seqlock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 27 Feb 2012 17:58:13 +0100
Replace the open coded seqlock with a real one, so RT can handle it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
fs/exec.c | 4 ++--
fs/fhandle.c | 4 ++--
fs/fs_struct.c | 46 ++++++++++++++++++----------------------------
fs/namei.c | 14 +++++++-------
include/linux/fs_struct.h | 16 +++++++---------
kernel/fork.c | 10 +++++-----
6 files changed, 41 insertions(+), 53 deletions(-)
Index: linux-3.2/fs/exec.c
===================================================================
--- linux-3.2.orig/fs/exec.c
+++ linux-3.2/fs/exec.c
@@ -1239,7 +1239,7 @@ int check_unsafe_exec(struct linux_binpr
}
n_fs = 1;
- spin_lock(&p->fs->lock);
+ seq_spin_lock(&p->fs->lock);
rcu_read_lock();
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
@@ -1256,7 +1256,7 @@ int check_unsafe_exec(struct linux_binpr
res = 1;
}
}
- spin_unlock(&p->fs->lock);
+ seq_spin_unlock(&p->fs->lock);
return res;
}
Index: linux-3.2/fs/fhandle.c
===================================================================
--- linux-3.2.orig/fs/fhandle.c
+++ linux-3.2/fs/fhandle.c
@@ -115,10 +115,10 @@ static struct vfsmount *get_vfsmount_fro
if (fd == AT_FDCWD) {
struct fs_struct *fs = current->fs;
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
path = fs->pwd;
mntget(path.mnt);
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
} else {
int fput_needed;
struct file *file = fget_light(fd, &fput_needed);
Index: linux-3.2/fs/fs_struct.c
===================================================================
--- linux-3.2.orig/fs/fs_struct.c
+++ linux-3.2/fs/fs_struct.c
@@ -26,13 +26,11 @@ void set_fs_root(struct fs_struct *fs, s
{
struct path old_root;
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->lock);
old_root = fs->root;
fs->root = *path;
path_get_longterm(path);
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->lock);
if (old_root.dentry)
path_put_longterm(&old_root);
}
@@ -45,13 +43,11 @@ void set_fs_pwd(struct fs_struct *fs, st
{
struct path old_pwd;
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->lock);
old_pwd = fs->pwd;
fs->pwd = *path;
path_get_longterm(path);
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->lock);
if (old_pwd.dentry)
path_put_longterm(&old_pwd);
@@ -68,8 +64,7 @@ void chroot_fs_refs(struct path *old_roo
task_lock(p);
fs = p->fs;
if (fs) {
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->lock);
if (fs->root.dentry == old_root->dentry
&& fs->root.mnt == old_root->mnt) {
path_get_longterm(new_root);
@@ -82,8 +77,7 @@ void chroot_fs_refs(struct path *old_roo
fs->pwd = *new_root;
count++;
}
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->lock);
}
task_unlock(p);
} while_each_thread(g, p);
@@ -106,12 +100,10 @@ void exit_fs(struct task_struct *tsk)
if (fs) {
int kill;
task_lock(tsk);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
+ write_seqlock(&fs->lock);
tsk->fs = NULL;
kill = !--fs->users;
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
+ write_sequnlock(&fs->lock);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
@@ -125,16 +117,15 @@ struct fs_struct *copy_fs_struct(struct
if (fs) {
fs->users = 1;
fs->in_exec = 0;
- spin_lock_init(&fs->lock);
- seqcount_init(&fs->seq);
+ seqlock_init(&fs->lock);
fs->umask = old->umask;
- spin_lock(&old->lock);
+ seq_spin_lock(&old->lock);
fs->root = old->root;
path_get_longterm(&fs->root);
fs->pwd = old->pwd;
path_get_longterm(&fs->pwd);
- spin_unlock(&old->lock);
+ seq_spin_unlock(&old->lock);
}
return fs;
}
@@ -149,10 +140,10 @@ int unshare_fs_struct(void)
return -ENOMEM;
task_lock(current);
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
kill = !--fs->users;
current->fs = new_fs;
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
task_unlock(current);
if (kill)
@@ -171,8 +162,7 @@ EXPORT_SYMBOL(current_umask);
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
- .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- .seq = SEQCNT_ZERO,
+ .lock = __SEQLOCK_UNLOCKED(init_fs.lock),
.umask = 0022,
};
@@ -185,14 +175,14 @@ void daemonize_fs_struct(void)
task_lock(current);
- spin_lock(&init_fs.lock);
+ seq_spin_lock(&init_fs.lock);
init_fs.users++;
- spin_unlock(&init_fs.lock);
+ seq_spin_unlock(&init_fs.lock);
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
current->fs = &init_fs;
kill = !--fs->users;
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
task_unlock(current);
if (kill)
Index: linux-3.2/fs/namei.c
===================================================================
--- linux-3.2.orig/fs/namei.c
+++ linux-3.2/fs/namei.c
@@ -428,7 +428,7 @@ static int unlazy_walk(struct nameidata
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
want_root = 1;
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
if (nd->root.mnt != fs->root.mnt ||
nd->root.dentry != fs->root.dentry)
goto err_root;
@@ -458,7 +458,7 @@ static int unlazy_walk(struct nameidata
spin_unlock(&parent->d_lock);
if (want_root) {
path_get(&nd->root);
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
}
mntget(nd->path.mnt);
@@ -473,7 +473,7 @@ err_parent:
spin_unlock(&parent->d_lock);
err_root:
if (want_root)
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
return -ECHILD;
}
@@ -567,10 +567,10 @@ static __always_inline void set_root_rcu
unsigned seq;
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->lock);
nd->root = fs->root;
nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->lock, seq));
}
}
@@ -1519,10 +1519,10 @@ static int path_init(int dfd, const char
rcu_read_lock();
do {
- seq = read_seqcount_begin(&fs->seq);
+ seq = read_seqbegin(&fs->lock);
nd->path = fs->pwd;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
+ } while (read_seqretry(&fs->lock, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
}
Index: linux-3.2/include/linux/fs_struct.h
===================================================================
--- linux-3.2.orig/include/linux/fs_struct.h
+++ linux-3.2/include/linux/fs_struct.h
@@ -2,13 +2,11 @@
#define _LINUX_FS_STRUCT_H
#include <linux/path.h>
-#include <linux/spinlock.h>
#include <linux/seqlock.h>
struct fs_struct {
int users;
- spinlock_t lock;
- seqcount_t seq;
+ seqlock_t lock;
int umask;
int in_exec;
struct path root, pwd;
@@ -26,29 +24,29 @@ extern int unshare_fs_struct(void);
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
*root = fs->root;
path_get(root);
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
*pwd = fs->pwd;
path_get(pwd);
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
}
static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
struct path *pwd)
{
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
*root = fs->root;
path_get(root);
*pwd = fs->pwd;
path_get(pwd);
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
}
#endif /* _LINUX_FS_STRUCT_H */
Index: linux-3.2/kernel/fork.c
===================================================================
--- linux-3.2.orig/kernel/fork.c
+++ linux-3.2/kernel/fork.c
@@ -825,13 +825,13 @@ static int copy_fs(unsigned long clone_f
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
if (fs->in_exec) {
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
@@ -1717,13 +1717,13 @@ SYSCALL_DEFINE1(unshare, unsigned long,
if (new_fs) {
fs = current->fs;
- spin_lock(&fs->lock);
+ seq_spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
- spin_unlock(&fs->lock);
+ seq_spin_unlock(&fs->lock);
}
if (new_fd) {

View File

@ -49,11 +49,16 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 991bc7f..9850dc0 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -75,7 +75,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
---
kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
kernel/rtmutex_common.h | 1 +
2 files changed, 32 insertions(+), 1 deletion(-)
Index: linux-3.2/kernel/rtmutex.c
===================================================================
--- linux-3.2.orig/kernel/rtmutex.c
+++ linux-3.2/kernel/rtmutex.c
@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
@ -63,7 +68,7 @@ index 991bc7f..9850dc0 100644
}
/*
@@ -1353,6 +1354,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
@@ -1010,6 +1011,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
@ -99,11 +104,11 @@ index 991bc7f..9850dc0 100644
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index a688a29..6ec3dc1 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -105,6 +105,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
Index: linux-3.2/kernel/rtmutex_common.h
===================================================================
--- linux-3.2.orig/kernel/rtmutex_common.h
+++ linux-3.2/kernel/rtmutex_common.h
@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
@ -111,6 +116,3 @@ index a688a29..6ec3dc1 100644
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,

View File

@ -0,0 +1,91 @@
Subject: ia64: vsyscall: Use seqcount instead of seqlock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 28 Feb 2012 18:33:08 +0100
The update of the vdso data happens under xtime_lock, so adding a
nested lock is pointless. Just use a seqcount to sync the readers.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
---
arch/ia64/kernel/asm-offsets.c | 4 ++--
arch/ia64/kernel/fsys.S | 2 +-
arch/ia64/kernel/fsyscall_gtod_data.h | 2 +-
arch/ia64/kernel/time.c | 10 +++-------
4 files changed, 7 insertions(+), 11 deletions(-)
Index: linux-2.6/arch/ia64/kernel/asm-offsets.c
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/asm-offsets.c
+++ linux-2.6/arch/ia64/kernel/asm-offsets.c
@@ -269,8 +269,8 @@ void foo(void)
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
- DEFINE(IA64_GTOD_LOCK_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, lock));
+ DEFINE(IA64_GTOD_SEQ_OFFSET,
+ offsetof (struct fsyscall_gtod_data_t, seq);
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
Index: linux-2.6/arch/ia64/kernel/fsys.S
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/fsys.S
+++ linux-2.6/arch/ia64/kernel/fsys.S
@@ -174,7 +174,7 @@ ENTRY(fsys_set_tid_address)
FSYS_RETURN
END(fsys_set_tid_address)
-#if IA64_GTOD_LOCK_OFFSET !=0
+#if IA64_GTOD_SEQ_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
#endif
#if IA64_ITC_JITTER_OFFSET !=0
Index: linux-2.6/arch/ia64/kernel/fsyscall_gtod_data.h
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/fsyscall_gtod_data.h
+++ linux-2.6/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,7 +6,7 @@
*/
struct fsyscall_gtod_data_t {
- seqlock_t lock;
+ seqcount_t seq;
struct timespec wall_time;
struct timespec monotonic_time;
cycle_t clk_mask;
Index: linux-2.6/arch/ia64/kernel/time.c
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/time.c
+++ linux-2.6/arch/ia64/kernel/time.c
@@ -35,9 +35,7 @@
static cycle_t itc_get_cycles(struct clocksource *cs);
-struct fsyscall_gtod_data_t fsyscall_gtod_data = {
- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
-};
+struct fsyscall_gtod_data_t fsyscall_gtod_data;
struct itc_jitter_data_t itc_jitter_data;
@@ -460,9 +458,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timespec *wall, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
- unsigned long flags;
-
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_begin(&fsyscall_gtod_data.seq);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -485,6 +481,6 @@ void update_vsyscall(struct timespec *wa
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_end(&fsyscall_gtod_data.seq);
}

View File

@ -138,7 +138,7 @@ Index: linux-3.2/kernel/softirq.c
===================================================================
--- linux-3.2.orig/kernel/softirq.c
+++ linux-3.2/kernel/softirq.c
@@ -431,6 +431,13 @@ void local_bh_enable_ip(unsigned long ip
@@ -425,6 +425,13 @@ void local_bh_enable_ip(unsigned long ip
}
EXPORT_SYMBOL(local_bh_enable_ip);

View File

@ -219,7 +219,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1566,6 +1566,12 @@ struct task_struct {
@@ -1570,6 +1570,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;

View File

@ -14,4 +14,4 @@ Index: linux-3.2/localversion-rt
--- /dev/null
+++ linux-3.2/localversion-rt
@@ -0,0 +1 @@
+-rt12
+-rt15

View File

@ -19,7 +19,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1431,6 +1431,7 @@ struct task_struct {
@@ -1435,6 +1435,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif

View File

@ -0,0 +1,34 @@
Subject: net: u64_stat: Protect seqcount
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Mar 2012 16:16:02 +0100
On RT we must prevent that the writer gets preempted inside the write
section. Otherwise a preempting reader might spin forever.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/u64_stats_sync.h | 2 ++
1 file changed, 2 insertions(+)
Index: linux-3.2/include/linux/u64_stats_sync.h
===================================================================
--- linux-3.2.orig/include/linux/u64_stats_sync.h
+++ linux-3.2/include/linux/u64_stats_sync.h
@@ -70,6 +70,7 @@ struct u64_stats_sync {
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable_rt();
write_seqcount_begin(&syncp->seq);
#endif
}
@@ -78,6 +79,7 @@ static inline void u64_stats_update_end(
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
+ preempt_enable_rt();
#endif
}

View File

@ -11,7 +11,7 @@ Index: linux-3.2/net/mac80211/rx.c
===================================================================
--- linux-3.2.orig/net/mac80211/rx.c
+++ linux-3.2/net/mac80211/rx.c
@@ -2952,7 +2952,7 @@ void ieee80211_rx(struct ieee80211_hw *h
@@ -2958,7 +2958,7 @@ void ieee80211_rx(struct ieee80211_hw *h
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);

View File

@ -29,15 +29,40 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/signal.c | 9 +++++++++
include/linux/sched.h | 4 ++++
kernel/signal.c | 31 +++++++++++++++++++++++++++++--
3 files changed, 42 insertions(+), 2 deletions(-)
arch/x86/include/asm/signal.h | 13 +++++++++++++
arch/x86/kernel/signal.c | 9 +++++++++
include/linux/sched.h | 4 ++++
kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++--
4 files changed, 61 insertions(+), 2 deletions(-)
Index: linux-rt.git/arch/x86/kernel/signal.c
Index: linux-3.2/arch/x86/include/asm/signal.h
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/signal.c
+++ linux-rt.git/arch/x86/kernel/signal.c
--- linux-3.2.orig/arch/x86/include/asm/signal.h
+++ linux-3.2/arch/x86/include/asm/signal.h
@@ -31,6 +31,19 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+/*
+ * Because some traps use the IST stack, we must keep
+ * preemption disabled while calling do_trap(), but do_trap()
+ * may call force_sig_info() which will grab the signal spin_locks
+ * for the task, which in PREEMPT_RT_FULL are mutexes.
+ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
+ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
+ * of the trap.
+ */
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
+#define ARCH_RT_DELAYS_SIGNAL_SEND
+#endif
+
#else
/* Here we must cater to libcs that poke about in kernel headers. */
Index: linux-3.2/arch/x86/kernel/signal.c
===================================================================
--- linux-3.2.orig/arch/x86/kernel/signal.c
+++ linux-3.2/arch/x86/kernel/signal.c
@@ -820,6 +820,15 @@ do_notify_resume(struct pt_regs *regs, v
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
@ -54,11 +79,11 @@ Index: linux-rt.git/arch/x86/kernel/signal.c
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
Index: linux-rt.git/include/linux/sched.h
Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-rt.git.orig/include/linux/sched.h
+++ linux-rt.git/include/linux/sched.h
@@ -1405,6 +1405,10 @@ struct task_struct {
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1392,6 +1392,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
@ -69,10 +94,10 @@ Index: linux-rt.git/include/linux/sched.h
unsigned long sas_ss_sp;
size_t sas_ss_size;
Index: linux-rt.git/kernel/signal.c
Index: linux-3.2/kernel/signal.c
===================================================================
--- linux-rt.git.orig/kernel/signal.c
+++ linux-rt.git/kernel/signal.c
--- linux-3.2.orig/kernel/signal.c
+++ linux-3.2/kernel/signal.c
@@ -1273,8 +1273,8 @@ int do_send_sig_info(int sig, struct sig
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
@ -124,27 +149,3 @@ Index: linux-rt.git/kernel/signal.c
/*
* Nuke all other threads in the group.
*/
Index: linux-rt.git/arch/x86/include/asm/signal.h
===================================================================
--- linux-rt.git.orig/arch/x86/include/asm/signal.h
+++ linux-rt.git/arch/x86/include/asm/signal.h
@@ -31,6 +31,19 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+/*
+ * Because some traps use the IST stack, we must keep
+ * preemption disabled while calling do_trap(), but do_trap()
+ * may call force_sig_info() which will grab the signal spin_locks
+ * for the task, which in PREEMPT_RT_FULL are mutexes.
+ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
+ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
+ * of the trap.
+ */
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
+#define ARCH_RT_DELAYS_SIGNAL_SEND
+#endif
+
#else
/* Here we must cater to libcs that poke about in kernel headers. */

View File

@ -8,7 +8,7 @@ Index: linux-3.2/kernel/panic.c
===================================================================
--- linux-3.2.orig/kernel/panic.c
+++ linux-3.2/kernel/panic.c
@@ -334,9 +334,11 @@ static u64 oops_id;
@@ -343,9 +343,11 @@ static u64 oops_id;
static int init_oops_id(void)
{

View File

@ -137,7 +137,7 @@ Index: linux-3.2/kernel/softirq.c
void __init softirq_early_init(void)
{
@@ -448,7 +454,7 @@ EXPORT_SYMBOL(in_serving_softirq);
@@ -455,7 +461,7 @@ EXPORT_SYMBOL(in_serving_softirq);
* Called with bh and local interrupts disabled. For full RT cpu must
* be pinned.
*/
@ -146,7 +146,7 @@ Index: linux-3.2/kernel/softirq.c
{
u32 pending = local_softirq_pending();
int cpu = smp_processor_id();
@@ -462,7 +468,7 @@ static void __do_softirq(void)
@@ -469,7 +475,7 @@ static void __do_softirq(void)
lockdep_softirq_enter();
@ -155,7 +155,7 @@ Index: linux-3.2/kernel/softirq.c
pending = local_softirq_pending();
if (pending)
@@ -501,7 +507,7 @@ static int __thread_do_softirq(int cpu)
@@ -508,7 +514,7 @@ static int __thread_do_softirq(int cpu)
* schedule!
*/
if (local_softirq_pending())

View File

@ -77,7 +77,7 @@ Index: linux-3.2/include/linux/sched.h
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
@@ -2677,11 +2679,22 @@ static inline void set_task_cpu(struct t
@@ -2681,11 +2683,22 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */

View File

@ -346,7 +346,7 @@ Index: linux-3.2/include/linux/sched.h
#include <asm/processor.h>
@@ -1431,7 +1432,9 @@ struct task_struct {
@@ -1435,7 +1436,9 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@ -356,7 +356,7 @@ Index: linux-3.2/include/linux/sched.h
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1580,6 +1583,17 @@ struct task_struct {
@@ -1584,6 +1587,17 @@ struct task_struct {
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)

View File

@ -14,7 +14,6 @@ you can flip kmaps around like below.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
[dvhart@linux.intel.com: build fix]
Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
[bwh: Fix context in arch/x86/kernel/process_32.c after FPU changes in 3.2.8]
---
arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++
include/linux/sched.h | 5 +++++
@ -33,7 +32,7 @@ Index: linux-3.2/arch/x86/kernel/process_32.c
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -350,6 +351,41 @@ __switch_to(struct task_struct *prev_p,
@@ -339,6 +340,41 @@ __switch_to(struct task_struct *prev_p,
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
@ -87,7 +86,7 @@ Index: linux-3.2/include/linux/sched.h
#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -1599,6 +1600,10 @@ struct task_struct {
@@ -1603,6 +1604,10 @@ struct task_struct {
struct rcu_head put_rcu;
int softirq_nestcnt;
#endif

View File

@ -0,0 +1,53 @@
Subject: futex/rt: Fix possible lockup when taking pi_lock in proxy handler
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 01 Mar 2012 13:55:29 -0500
When taking the pi_lock, we must disable interrupts because the
pi_lock can also be taken in an interrupt handler.
Use raw_spin_lock_irq() instead of raw_spin_lock().
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.165160680@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/rtmutex.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 9850dc0..b525158 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1373,14 +1373,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* PI_REQUEUE_INPROGRESS, so that if the task is waking up
* it will know that we are in the process of requeuing it.
*/
- raw_spin_lock(&task->pi_lock);
+ raw_spin_lock_irq(&task->pi_lock);
if (task->pi_blocked_on) {
- raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
raw_spin_unlock(&lock->wait_lock);
return -EAGAIN;
}
task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
#endif
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
--
1.7.3.4
--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html

View File

@ -0,0 +1,118 @@
Subject: lglock/rt: Use non-rt for_each_cpu() in -rt code
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 01 Mar 2012 13:55:30 -0500
Currently the RT version of the lglocks() does a for_each_online_cpu()
in the name##_global_lock_online() functions. Non-rt uses its own
mask for this, and for good reason.
A task may grab a *_global_lock_online(), and in the mean time, one
of the CPUs goes offline. Now when that task does a *_global_unlock_online()
it releases all the locks *except* the one that went offline.
Now if that CPU were to come back on line, its lock is now owned by a
task that never released it when it should have.
This causes all sorts of fun errors. Like owners of a lock no longer
existing, or sleeping on IO, waiting to be woken up by a task that
happens to be blocked on the lock it never released.
Convert the RT versions to use the lglock specific cpumasks. As once
a CPU comes on line, the mask is set, and never cleared even when the
CPU goes offline. The locks for that CPU will still be taken and released.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.374756214@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/lglock.h | 35 ++++++++++++++++++++++++++++++++---
1 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 52b289f..cdfcef3 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -203,9 +203,31 @@
#else /* !PREEMPT_RT_FULL */
#define DEFINE_LGLOCK(name) \
\
- DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -214,6 +236,11 @@
lock = &per_cpu(name##_lock, i); \
rt_mutex_init(lock); \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -254,7 +281,8 @@
void name##_global_lock_online(void) { \
int i; \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ spin_lock(&name##_cpu_lock); \
+ for_each_cpu(i, &name##_cpus) { \
struct rt_mutex *lock; \
lock = &per_cpu(name##_lock, i); \
__rt_spin_lock(lock); \
@@ -265,11 +293,12 @@
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
struct rt_mutex *lock; \
lock = &per_cpu(name##_lock, i); \
__rt_spin_unlock(lock); \
} \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
--
1.7.3.4
--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html

View File

@ -0,0 +1,33 @@
Subject: ring-buffer/rt: Check for irqs disabled before grabbing reader lock
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 01 Mar 2012 13:55:32 -0500
In RT the reader lock is a mutex and we can not grab it when preemption is
disabled. The in_atomic() check that is there does not check if irqs are
disabled. Add that check as well.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.786365803@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/trace/ring_buffer.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Index: linux-3.2/kernel/trace/ring_buffer.c
===================================================================
--- linux-3.2.orig/kernel/trace/ring_buffer.c
+++ linux-3.2/kernel/trace/ring_buffer.c
@@ -1054,7 +1054,7 @@ static inline int ok_to_lock(void)
if (in_nmi())
return 0;
#ifdef CONFIG_PREEMPT_RT_FULL
- if (in_atomic())
+ if (in_atomic() || irqs_disabled())
return 0;
#endif
return 1;

View File

@ -0,0 +1,58 @@
Subject: sched/rt: Fix wait_task_interactive() to test rt_spin_lock state
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 01 Mar 2012 13:55:33 -0500
The wait_task_interactive() will have a task sleep waiting for another
task to have a certain state. But it ignores the rt_spin_locks state
and can return with an incorrect result if the task it is waiting
for is blocked on a rt_spin_lock() and is waking up.
The rt_spin_locks save the tasks state in the saved_state field
and the wait_task_interactive() must also test that state.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.979435764@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched.c | 6 ++++--
1 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 81b340d..1cc706d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2450,7 +2450,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
+ if (match_state && unlikely(p->state != match_state)
+ && unlikely(p->saved_state != match_state))
return 0;
cpu_relax();
}
@@ -2465,7 +2466,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
- if (!match_state || p->state == match_state)
+ if (!match_state || p->state == match_state
+ || p->saved_state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
--
1.7.3.4
--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html

View File

@ -0,0 +1,72 @@
Subject: timer: Fix hotplug for -rt
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 01 Mar 2012 13:55:28 -0500
Revert the RT patch:
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jul 3 08:30:32 2009 -0500
timers: fix timer hotplug on -rt
Here we are in the CPU_DEAD notifier, and we must not sleep nor
enable interrupts.
There's no problem with sleeping in this notifier.
But the get_cpu_var() had to be converted to a get_local_var().
Replace the previous fix with the get_local_var() convert.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190344.948157137@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/timer.c | 16 +++++-----------
1 file changed, 5 insertions(+), 11 deletions(-)
Index: linux-3.2/kernel/timer.c
===================================================================
--- linux-3.2.orig/kernel/timer.c
+++ linux-3.2/kernel/timer.c
@@ -1743,21 +1743,17 @@ static void __cpuinit migrate_timers(int
{
struct tvec_base *old_base;
struct tvec_base *new_base;
- unsigned long flags;
int i;
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
- new_base = get_cpu_var(tvec_bases);
+ new_base = get_local_var(tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- local_irq_save(flags);
- while (!spin_trylock(&new_base->lock))
- cpu_relax();
- while (!spin_trylock(&old_base->lock))
- cpu_relax();
+ spin_lock_irq(&new_base->lock);
+ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
BUG_ON(old_base->running_timer);
@@ -1771,10 +1767,8 @@ static void __cpuinit migrate_timers(int
}
spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
- local_irq_restore(flags);
-
- put_cpu_var(tvec_bases);
+ spin_unlock_irq(&new_base->lock);
+ put_local_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -103,7 +103,7 @@ Index: linux-3.2/kernel/rtmutex.c
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
@@ -95,6 +101,12 @@ static inline void mark_rt_mutex_waiters
@@ -96,6 +102,12 @@ static inline void mark_rt_mutex_waiters
}
#endif
@ -116,7 +116,7 @@ Index: linux-3.2/kernel/rtmutex.c
/*
* Calculate task priority from the waiter list priority
*
@@ -141,6 +153,14 @@ static void rt_mutex_adjust_prio(struct
@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@ -131,7 +131,7 @@ Index: linux-3.2/kernel/rtmutex.c
/*
* Max number of times we'll walk the boosting chain:
*/
@@ -252,13 +272,15 @@ static int rt_mutex_adjust_prio_chain(st
@@ -253,13 +273,15 @@ static int rt_mutex_adjust_prio_chain(st
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (!rt_mutex_owner(lock)) {
@ -150,7 +150,7 @@ Index: linux-3.2/kernel/rtmutex.c
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
}
@@ -303,6 +325,25 @@ static int rt_mutex_adjust_prio_chain(st
@@ -304,6 +326,25 @@ static int rt_mutex_adjust_prio_chain(st
return ret;
}
@ -176,7 +176,7 @@ Index: linux-3.2/kernel/rtmutex.c
/*
* Try to take an rt-mutex
*
@@ -312,8 +353,9 @@ static int rt_mutex_adjust_prio_chain(st
@@ -313,8 +354,9 @@ static int rt_mutex_adjust_prio_chain(st
* @task: the task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
@ -188,7 +188,7 @@ Index: linux-3.2/kernel/rtmutex.c
{
/*
* We have to be careful here if the atomic speedups are
@@ -346,12 +388,14 @@ static int try_to_take_rt_mutex(struct r
@@ -347,12 +389,14 @@ static int try_to_take_rt_mutex(struct r
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
@ -207,7 +207,7 @@ Index: linux-3.2/kernel/rtmutex.c
if (waiter || rt_mutex_has_waiters(lock)) {
unsigned long flags;
struct rt_mutex_waiter *top;
@@ -376,7 +420,6 @@ static int try_to_take_rt_mutex(struct r
@@ -377,7 +421,6 @@ static int try_to_take_rt_mutex(struct r
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@ -215,7 +215,7 @@ Index: linux-3.2/kernel/rtmutex.c
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task);
@@ -386,6 +429,13 @@ static int try_to_take_rt_mutex(struct r
@@ -387,6 +430,13 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@ -229,7 +229,7 @@ Index: linux-3.2/kernel/rtmutex.c
/*
* Task blocks on lock.
*
@@ -500,7 +550,7 @@ static void wakeup_next_waiter(struct rt
@@ -501,7 +551,7 @@ static void wakeup_next_waiter(struct rt
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
@ -238,7 +238,7 @@ Index: linux-3.2/kernel/rtmutex.c
}
/*
@@ -579,18 +629,315 @@ void rt_mutex_adjust_pi(struct task_stru
@@ -580,18 +630,315 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
@ -557,7 +557,7 @@ Index: linux-3.2/kernel/rtmutex.c
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
@@ -654,9 +1001,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
@@ -655,9 +1002,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
struct rt_mutex_waiter waiter;
int ret = 0;
@ -569,7 +569,7 @@ Index: linux-3.2/kernel/rtmutex.c
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -709,6 +1057,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
@@ -710,6 +1058,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
int ret = 0;
raw_spin_lock(&lock->wait_lock);
@ -577,7 +577,7 @@ Index: linux-3.2/kernel/rtmutex.c
if (likely(rt_mutex_owner(lock) != current)) {
@@ -941,7 +1290,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
@@ -942,7 +1291,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@ -585,7 +585,7 @@ Index: linux-3.2/kernel/rtmutex.c
plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
@@ -961,7 +1309,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
@@ -962,7 +1310,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@ -606,7 +606,7 @@ Index: linux-3.2/kernel/rtmutex_common.h
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
@@ -125,4 +126,12 @@ extern int rt_mutex_finish_proxy_lock(st
@@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(st
# include "rtmutex.h"
#endif

View File

@ -12,7 +12,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1587,6 +1587,9 @@ struct task_struct {
@@ -1591,6 +1591,9 @@ struct task_struct {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
@ -22,7 +22,7 @@ Index: linux-3.2/include/linux/sched.h
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1771,6 +1774,15 @@ extern struct pid *cad_pid;
@@ -1775,6 +1778,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@ -38,7 +38,7 @@ Index: linux-3.2/include/linux/sched.h
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1778,6 +1790,7 @@ static inline void put_task_struct(struc
@@ -1782,6 +1794,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}

View File

@ -42,7 +42,7 @@ Index: linux-3.2/include/linux/sched.h
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
@@ -1594,9 +1595,6 @@ struct task_struct {
@@ -1598,9 +1599,6 @@ struct task_struct {
#endif
};
@ -52,7 +52,7 @@ Index: linux-3.2/include/linux/sched.h
#ifdef CONFIG_PREEMPT_RT_FULL
static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
#else
@@ -2679,6 +2677,15 @@ static inline void set_task_cpu(struct t
@@ -2683,6 +2681,15 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */

View File

@ -39,7 +39,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -2264,12 +2264,24 @@ extern struct mm_struct * mm_alloc(void)
@@ -2268,12 +2268,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);

View File

@ -12,7 +12,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -2091,12 +2091,20 @@ extern unsigned int sysctl_sched_cfs_ban
@@ -2095,12 +2095,20 @@ extern unsigned int sysctl_sched_cfs_ban
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);

View File

@ -28,7 +28,7 @@ Index: linux-3.2/include/linux/sched.h
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -2174,6 +2176,7 @@ extern void xtime_update(unsigned long t
@@ -2178,6 +2180,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);

View File

@ -0,0 +1,56 @@
Subject: seqlock: Prevent rt starvation
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Feb 2012 12:03:30 +0100
If a low prio writer gets preempted while holding the seqlock write
locked, a high prio reader spins forever on RT.
To prevent this let the reader grab the spinlock, so it blocks and
eventually boosts the writer. This way the writer can proceed and
endless spinning is prevented.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/seqlock.h | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
Index: linux-3.2/include/linux/seqlock.h
===================================================================
--- linux-3.2.orig/include/linux/seqlock.h
+++ linux-3.2/include/linux/seqlock.h
@@ -177,10 +177,33 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
+#ifndef CONFIG_PREEMPT_RT
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
+#else
+/*
+ * Starvation safe read side for RT
+ */
+static inline unsigned read_seqbegin(seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = sl->seqcount.sequence;
+ if (unlikely(ret & 1)) {
+ /*
+ * Take the lock and let the writer proceed (i.e. evtl
+ * boost it), otherwise we could loop here forever.
+ */
+ spin_lock(&sl->lock);
+ spin_unlock(&sl->lock);
+ goto repeat;
+ }
+ return ret;
+}
+#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{

View File

@ -0,0 +1,94 @@
Subject: seqlock: Provide seq_spin_* functions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 27 Feb 2012 17:55:11 +0100
In some cases it's desirable to lock the seqlock w/o changing the
seqcount. Provide functions for this, so we can avoid open coded
constructs.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/seqlock.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
Index: rt/include/linux/seqlock.h
===================================================================
--- rt.orig/include/linux/seqlock.h
+++ rt/include/linux/seqlock.h
@@ -188,6 +188,19 @@ static inline unsigned read_seqretry(con
}
/*
+ * Ditto w/o barriers
+ */
+static inline unsigned __read_seqbegin(const seqlock_t *sl)
+{
+ return __read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned __read_seqretry(const seqlock_t *sl, unsigned start)
+{
+ return __read_seqcount_retry(&sl->seqcount, start);
+}
+
+/*
* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
@@ -247,4 +260,55 @@ write_sequnlock_irqrestore(seqlock_t *sl
spin_unlock_irqrestore(&sl->lock, flags);
}
+/*
+ * Instead of open coding a spinlock and a seqcount, the following
+ * functions allow to serialize on the seqlock w/o touching seqcount.
+ */
+static inline void seq_spin_lock(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+}
+
+static inline int seq_spin_trylock(seqlock_t *sl)
+{
+ return spin_trylock(&sl->lock);
+}
+
+static inline void seq_spin_unlock(seqlock_t *sl)
+{
+ spin_unlock(&sl->lock);
+}
+
+static inline void assert_seq_spin_locked(seqlock_t *sl)
+{
+ assert_spin_locked(&sl->lock);
+}
+
+static inline void seq_spin_lock_nested(seqlock_t *sl, int subclass)
+{
+ spin_lock_nested(&sl->lock, subclass);
+}
+
+/*
+ * For writers which need to take/release the lock w/o updating seqcount for
+ * whatever reasons the following functions allow to update the count
+ * after the lock has been acquired or before it is released.
+ */
+static inline void write_seqlock_begin(seqlock_t *sl)
+{
+ assert_spin_locked(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_seqlock_end(seqlock_t *sl)
+{
+ assert_spin_locked(&sl->lock);
+ write_seqcount_end(&sl->seqcount);
+}
+
+static inline void write_seqlock_barrier(seqlock_t *sl)
+{
+ write_seqcount_barrier(&sl->seqcount);
+}
+
#endif /* __LINUX_SEQLOCK_H */

View File

@ -1,101 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jul 2009 19:27:54 +0200
Subject: seqlock: Create raw_seqlock
raw_seqlock_t will be used to annotate seqlocks which can not be
converted to sleeping locks in preempt-rt
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/seqlock.h | 63 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
Index: linux-3.2/include/linux/seqlock.h
===================================================================
--- linux-3.2.orig/include/linux/seqlock.h
+++ linux-3.2/include/linux/seqlock.h
@@ -152,6 +152,11 @@ static inline void write_seqcount_barrie
typedef struct {
struct seqcount seqcount;
+ raw_spinlock_t lock;
+} raw_seqlock_t;
+
+typedef struct {
+ struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;
@@ -159,6 +164,21 @@ typedef struct {
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
+#define __RAW_SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO, \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ }
+
+#define raw_seqlock_init(x) \
+ do { \
+ seqcount_init(&(x)->seqcount); \
+ raw_spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_RAW_SEQLOCK(x) \
+ raw_seqlock_t x = __RAW_SEQLOCK_UNLOCKED(x)
+
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_ZERO, \
@@ -182,6 +202,49 @@ typedef struct {
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
+static inline void raw_write_seqlock(raw_seqlock_t *sl)
+{
+ raw_spin_lock(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void raw_write_sequnlock(raw_seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock(&sl->lock);
+}
+
+static inline void raw_write_seqlock_irq(raw_seqlock_t *sl)
+{
+ raw_spin_lock_irq(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void raw_write_sequnlock_irq(raw_seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __raw_write_seqlock_irqsave(raw_seqlock_t *sl)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sl->lock, flags);
+ write_seqcount_begin(&sl->seqcount);
+ return flags;
+}
+
+#define raw_write_seqlock_irqsave(lock, flags) \
+ do { flags = __raw_write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+raw_write_sequnlock_irqrestore(raw_seqlock_t *sl, unsigned long flags)
+{
+ write_seqcount_end(&sl->seqcount);
+ raw_spin_unlock_irqrestore(&sl->lock, flags);
+}
+
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);

View File

@ -1,4 +1,4 @@
Subject: seqlock-remove-unused-functions.patch
Subject: seqlock: Remove unused functions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 16 Jul 2011 18:38:22 +0200

View File

@ -1,16 +1,18 @@
Subject: seqlock-use-seqcount.patch
Subject: seqlock: Use seqcount
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 16 Jul 2011 18:40:26 +0200
No point in having different implementations for the same thing.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/seqlock.h | 166 ++++++++++++++++++++++++------------------------
1 file changed, 83 insertions(+), 83 deletions(-)
include/linux/seqlock.h | 176 +++++++++++++++++++++++++-----------------------
1 file changed, 93 insertions(+), 83 deletions(-)
Index: linux-3.2/include/linux/seqlock.h
Index: rt/include/linux/seqlock.h
===================================================================
--- linux-3.2.orig/include/linux/seqlock.h
+++ linux-3.2/include/linux/seqlock.h
--- rt.orig/include/linux/seqlock.h
+++ rt/include/linux/seqlock.h
@@ -30,81 +30,12 @@
#include <linux/preempt.h>
#include <asm/processor.h>
@ -101,7 +103,7 @@ Index: linux-3.2/include/linux/seqlock.h
return __read_seqcount_retry(s, start);
}
@@ -220,21 +150,91 @@ static inline void write_seqcount_barrie
@@ -220,21 +150,101 @@ static inline void write_seqcount_barrie
s->sequence+=2;
}
@ -130,8 +132,18 @@ Index: linux-3.2/include/linux/seqlock.h
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+#define read_seqbegin(sl) read_seqcount_begin(&(sl)->seqcount)
+#define read_seqretry(sl, start) read_seqcount_retry(&(sl)->seqcount, start)
+/*
+ * Read side functions for starting and finalizing a read side section.
+ */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+ return read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+{
+ return read_seqcount_retry(&sl->seqcount, start);
+}
+
+/*
+ * Lock out other writers and update the count.

View File

@ -16,8 +16,6 @@ re-possible-slab-deadlock-while-doing-ifenslave-1.patch
# rtc-deal-with-errors-correctly.patch -- 3.0.1
# rtc-fix-hrtimer-deadlock.patch -- 3.0.1
# rtc-limit-frequency.patch -- 3.0.1
genirq-unmask-oneshot-irqs-when-thread-is-not-woken.patch
genirq-handle-pending-irqs-in-irq-startup.patch
# Some variant of this is in 3.1
@ -98,7 +96,21 @@ sched-prevent-boosting-from-throttling.patch
# Stuff which should go upstream ASAP
############################################################
# Timekeeping / VDSO
time-remove-bogus-comments.patch
x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch
x86-vdso-use-seqcount.patch
ia64-vdso-use-seqcount.patch
# SEQLOCK
seqlock-remove-unused-functions.patch
seqlock-use-seqcount.patch
seqlock-provide-seq-spin-lock.patch
fs-struct-use-seqlock.patch
fs-dentry-use-seqlock.patch
# RAW SPINLOCKS
timekeeping-split-xtime-lock.patch
intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch
# MM memcg
@ -196,25 +208,15 @@ rcu-reduce-lock-section.patch
locking-various-init-fixes.patch
# rtc-tegra-lock-init.patch -- 3.0.1
# SEQLOCK
seqlock-remove-unused-functions.patch
seqlock-use-seqcount.patch
# PCI
wait-provide-__wake_up_all_locked.patch
pci-access-use-__wake_up_all_locked.patch
# ACPI
acpi-make-gbl-hardware-lock-raw.patch
acpi-make-ec-lock-raw-as-well.patch
#####################################################
# Stuff which should go mainline, but wants some care
#####################################################
# SEQLOCK
seqlock-raw-seqlock.patch
timekeeping-covert-xtimelock.patch
# ANON RW SEMAPHORES
@ -559,17 +561,32 @@ x86-crypto-reduce-preempt-disabled-regions.patch
# Device mapper
dm-make-rt-aware.patch
# ACPI
# Dropped those two as they cause a scheduling in atomic failure and
# we have no clue why we made those locks raw in the first place.
# acpi-make-gbl-hardware-lock-raw.patch
# acpi-make-ec-lock-raw-as-well.patch
# This one is just a follow up to the raw spin locks
# Simple raw spinlock based waitqueue
wait-simple-version.patch
acpi-gpe-use-wait-simple.patch
# wait-simple-version.patch
# acpi-gpe-use-wait-simple.patch
# CPUMASK OFFSTACK
cpumask-disable-offstack-on-rt.patch
# Various fixes - fold them back
seqlock-prevent-rt-starvation.patch
fs-protect-opencoded-isize-seqcount.patch
net-u64-stat-protect-seqcount.patch
rfc-timer-fix-hotplug-for-rt.patch
rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch
rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch
# Enable full RT
cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
# Needs some thought and testing
#softirq-preempt-fix.patch

View File

@ -11,7 +11,7 @@ Index: linux-3.2/kernel/signal.c
===================================================================
--- linux-3.2.orig/kernel/signal.c
+++ linux-3.2/kernel/signal.c
@@ -1329,12 +1329,12 @@ struct sighand_struct *__lock_task_sigha
@@ -1362,12 +1362,12 @@ struct sighand_struct *__lock_task_sigha
struct sighand_struct *sighand;
for (;;) {
@ -26,7 +26,7 @@ Index: linux-3.2/kernel/signal.c
break;
}
@@ -1345,7 +1345,7 @@ struct sighand_struct *__lock_task_sigha
@@ -1378,7 +1378,7 @@ struct sighand_struct *__lock_task_sigha
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();

View File

@ -19,7 +19,7 @@ Index: linux-3.2/arch/powerpc/kernel/irq.c
===================================================================
--- linux-3.2.orig/arch/powerpc/kernel/irq.c
+++ linux-3.2/arch/powerpc/kernel/irq.c
@@ -443,6 +443,7 @@ void irq_ctx_init(void)
@@ -440,6 +440,7 @@ void irq_ctx_init(void)
}
}
@ -27,7 +27,7 @@ Index: linux-3.2/arch/powerpc/kernel/irq.c
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
@@ -479,7 +480,7 @@ void do_softirq(void)
@@ -476,7 +477,7 @@ void do_softirq(void)
local_irq_restore(flags);
}

View File

@ -89,7 +89,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1597,6 +1597,7 @@ struct task_struct {
@@ -1601,6 +1601,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;

View File

@ -12,7 +12,7 @@ Index: linux-3.2/include/linux/sched.h
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
@@ -1818,6 +1818,7 @@ extern void thread_group_times(struct ta
@@ -1822,6 +1822,7 @@ extern void thread_group_times(struct ta
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */

View File

@ -0,0 +1,38 @@
Subject: time: Remove bogus comments
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 28 Feb 2012 19:06:50 +0100
There is no global irq lock which makes a syscall magically SMP
safe. Remove the outdated comment concerning do_settimeofday() as
well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time.c | 6 ------
1 file changed, 6 deletions(-)
Index: linux-3.2/kernel/time.c
===================================================================
--- linux-3.2.orig/kernel/time.c
+++ linux-3.2/kernel/time.c
@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct tim
return error;
if (tz) {
- /* SMP safe, global irq locking makes it work. */
sys_tz = *tz;
update_vsyscall_tz();
if (firsttime) {
@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct tim
}
}
if (tv)
- {
- /* SMP safe, again the code in arch/foo/time.c should
- * globally block out interrupts when it runs.
- */
return do_settimeofday(tv);
- }
return 0;
}

View File

@ -1,381 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jul 2009 19:43:27 +0200
Subject: timekeeping: Convert xtime_lock to raw_seqlock
Convert xtime_lock to raw_seqlock and fix up all users.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/ia64/kernel/time.c | 6 +++---
arch/x86/include/asm/vgtod.h | 2 +-
arch/x86/kernel/vsyscall_64.c | 10 +++++-----
kernel/time/ntp.c | 16 ++++++++--------
kernel/time/tick-common.c | 4 ++--
kernel/time/tick-internal.h | 2 +-
kernel/time/tick-sched.c | 8 ++++----
kernel/time/timekeeping.c | 31 +++++++++++++++----------------
8 files changed, 39 insertions(+), 40 deletions(-)
Index: linux-3.2/arch/ia64/kernel/time.c
===================================================================
--- linux-3.2.orig/arch/ia64/kernel/time.c
+++ linux-3.2/arch/ia64/kernel/time.c
@@ -36,7 +36,7 @@
static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
+ .lock = __RAW_SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
};
struct itc_jitter_data_t itc_jitter_data;
@@ -462,7 +462,7 @@ void update_vsyscall(struct timespec *wa
{
unsigned long flags;
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ raw_write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -485,6 +485,6 @@ void update_vsyscall(struct timespec *wa
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ raw_write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}
Index: linux-3.2/arch/x86/include/asm/vgtod.h
===================================================================
--- linux-3.2.orig/arch/x86/include/asm/vgtod.h
+++ linux-3.2/arch/x86/include/asm/vgtod.h
@@ -5,7 +5,7 @@
#include <linux/clocksource.h>
struct vsyscall_gtod_data {
- seqlock_t lock;
+ raw_seqlock_t lock;
/* open coded 'struct timespec' */
time_t wall_time_sec;
Index: linux-3.2/arch/x86/kernel/vsyscall_64.c
===================================================================
--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c
+++ linux-3.2/arch/x86/kernel/vsyscall_64.c
@@ -54,7 +54,7 @@
DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
{
- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
+ .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
@@ -82,10 +82,10 @@ void update_vsyscall_tz(void)
{
unsigned long flags;
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
@@ -93,7 +93,7 @@ void update_vsyscall(struct timespec *wa
{
unsigned long flags;
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* copy vsyscall data */
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -106,7 +106,7 @@ void update_vsyscall(struct timespec *wa
vsyscall_gtod_data.wall_to_monotonic = *wtm;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
Index: linux-3.2/kernel/time/ntp.c
===================================================================
--- linux-3.2.orig/kernel/time/ntp.c
+++ linux-3.2/kernel/time/ntp.c
@@ -358,7 +358,7 @@ static enum hrtimer_restart ntp_leap_sec
{
enum hrtimer_restart res = HRTIMER_NORESTART;
- write_seqlock(&xtime_lock);
+ raw_write_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
@@ -388,7 +388,7 @@ static enum hrtimer_restart ntp_leap_sec
break;
}
- write_sequnlock(&xtime_lock);
+ raw_write_sequnlock(&xtime_lock);
return res;
}
@@ -663,7 +663,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
- write_seqlock_irq(&xtime_lock);
+ raw_write_seqlock_irq(&xtime_lock);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -705,7 +705,7 @@ int do_adjtimex(struct timex *txc)
/* fill PPS status fields */
pps_fill_timex(txc);
- write_sequnlock_irq(&xtime_lock);
+ raw_write_sequnlock_irq(&xtime_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
@@ -903,7 +903,7 @@ void hardpps(const struct timespec *phas
pts_norm = pps_normalize_ts(*phase_ts);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
/* clear the error bits, they will be set again if needed */
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
@@ -916,7 +916,7 @@ void hardpps(const struct timespec *phas
* just start the frequency interval */
if (unlikely(pps_fbase.tv_sec == 0)) {
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
return;
}
@@ -931,7 +931,7 @@ void hardpps(const struct timespec *phas
time_status |= STA_PPSJITTER;
/* restart the frequency calibration interval */
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
pr_err("hardpps: PPSJITTER: bad pulse\n");
return;
}
@@ -948,7 +948,7 @@ void hardpps(const struct timespec *phas
hardpps_update_phase(pts_norm.nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
}
EXPORT_SYMBOL(hardpps);
Index: linux-3.2/kernel/time/tick-common.c
===================================================================
--- linux-3.2.orig/kernel/time/tick-common.c
+++ linux-3.2/kernel/time/tick-common.c
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_write_seqlock(&xtime_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ raw_write_sequnlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
Index: linux-3.2/kernel/time/tick-internal.h
===================================================================
--- linux-3.2.orig/kernel/time/tick-internal.h
+++ linux-3.2/kernel/time/tick-internal.h
@@ -141,4 +141,4 @@ static inline int tick_device_is_functio
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_seqlock_t xtime_lock;
Index: linux-3.2/kernel/time/tick-sched.c
===================================================================
--- linux-3.2.orig/kernel/time/tick-sched.c
+++ linux-3.2/kernel/time/tick-sched.c
@@ -56,7 +56,7 @@ static void tick_do_update_jiffies64(kti
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_write_seqlock(&xtime_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(kti
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ raw_write_sequnlock(&xtime_lock);
}
/*
@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(vo
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_write_seqlock(&xtime_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ raw_write_sequnlock(&xtime_lock);
return period;
}
Index: linux-3.2/kernel/time/timekeeping.c
===================================================================
--- linux-3.2.orig/kernel/time/timekeeping.c
+++ linux-3.2/kernel/time/timekeeping.c
@@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
+__cacheline_aligned_in_smp DEFINE_RAW_SEQLOCK(xtime_lock);
/*
* The current time
@@ -365,7 +364,7 @@ int do_settimeofday(const struct timespe
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
timekeeping_forward_now();
@@ -381,7 +380,7 @@ int do_settimeofday(const struct timespe
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -405,7 +404,7 @@ int timekeeping_inject_offset(struct tim
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
timekeeping_forward_now();
@@ -418,7 +417,7 @@ int timekeeping_inject_offset(struct tim
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -572,7 +571,7 @@ void __init timekeeping_init(void)
read_persistent_clock(&now);
read_boot_clock(&boot);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
ntp_init();
@@ -593,7 +592,7 @@ void __init timekeeping_init(void)
-boot.tv_sec, -boot.tv_nsec);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
}
/* time in seconds when suspend began */
@@ -640,7 +639,7 @@ void timekeeping_inject_sleeptime(struct
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
timekeeping_forward_now();
__timekeeping_inject_sleeptime(delta);
@@ -650,7 +649,7 @@ void timekeeping_inject_sleeptime(struct
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -673,7 +672,7 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -683,7 +682,7 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
@@ -701,7 +700,7 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_write_seqlock_irqsave(&xtime_lock, flags);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -724,7 +723,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ raw_write_sequnlock_irqrestore(&xtime_lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -1239,7 +1238,7 @@ ktime_t ktime_get_monotonic_offset(void)
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_write_seqlock(&xtime_lock);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ raw_write_sequnlock(&xtime_lock);
}

View File

@ -0,0 +1,533 @@
Subject: timekeeping: Split xtime_lock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Mar 2012 15:14:06 +0100
xtime_lock is going to be split apart in mainline, so we can shorten
the seqcount protected regions and avoid updating seqcount in some
code pathes. This is a straight forward split, so we can avoid the
whole mess with raw seqlocks for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/jiffies.c | 4 -
kernel/time/ntp.c | 24 +++++++----
kernel/time/tick-common.c | 10 ++--
kernel/time/tick-internal.h | 3 -
kernel/time/tick-sched.c | 16 ++++---
kernel/time/timekeeping.c | 90 +++++++++++++++++++++++++-------------------
6 files changed, 88 insertions(+), 59 deletions(-)
Index: linux-3.2/kernel/time/jiffies.c
===================================================================
--- linux-3.2.orig/kernel/time/jiffies.c
+++ linux-3.2/kernel/time/jiffies.c
@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
Index: linux-3.2/kernel/time/ntp.c
===================================================================
--- linux-3.2.orig/kernel/time/ntp.c
+++ linux-3.2/kernel/time/ntp.c
@@ -358,7 +358,8 @@ static enum hrtimer_restart ntp_leap_sec
{
enum hrtimer_restart res = HRTIMER_NORESTART;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
switch (time_state) {
case TIME_OK:
@@ -388,7 +389,8 @@ static enum hrtimer_restart ntp_leap_sec
break;
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return res;
}
@@ -663,7 +665,8 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
- write_seqlock_irq(&xtime_lock);
+ raw_spin_lock_irq(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -705,7 +708,8 @@ int do_adjtimex(struct timex *txc)
/* fill PPS status fields */
pps_fill_timex(txc);
- write_sequnlock_irq(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irq(&xtime_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
@@ -903,7 +907,8 @@ void hardpps(const struct timespec *phas
pts_norm = pps_normalize_ts(*phase_ts);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
/* clear the error bits, they will be set again if needed */
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
@@ -916,7 +921,8 @@ void hardpps(const struct timespec *phas
* just start the frequency interval */
if (unlikely(pps_fbase.tv_sec == 0)) {
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
return;
}
@@ -931,7 +937,8 @@ void hardpps(const struct timespec *phas
time_status |= STA_PPSJITTER;
/* restart the frequency calibration interval */
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
pr_err("hardpps: PPSJITTER: bad pulse\n");
return;
}
@@ -948,7 +955,8 @@ void hardpps(const struct timespec *phas
hardpps_update_phase(pts_norm.nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
}
EXPORT_SYMBOL(hardpps);
Index: linux-3.2/kernel/time/tick-common.c
===================================================================
--- linux-3.2.orig/kernel/time/tick-common.c
+++ linux-3.2/kernel/time/tick-common.c
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
Index: linux-3.2/kernel/time/tick-internal.h
===================================================================
--- linux-3.2.orig/kernel/time/tick-internal.h
+++ linux-3.2/kernel/time/tick-internal.h
@@ -141,4 +141,5 @@ static inline int tick_device_is_functio
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_spinlock_t xtime_lock;
+extern seqcount_t xtime_seq;
Index: linux-3.2/kernel/time/tick-sched.c
===================================================================
--- linux-3.2.orig/kernel/time/tick-sched.c
+++ linux-3.2/kernel/time/tick-sched.c
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
/*
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return period;
}
@@ -345,11 +349,11 @@ void tick_nohz_stop_sched_tick(int inidl
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
Index: linux-3.2/kernel/time/timekeeping.c
===================================================================
--- linux-3.2.orig/kernel/time/timekeeping.c
+++ linux-3.2/kernel/time/timekeeping.c
@@ -139,8 +139,8 @@ static inline s64 timekeeping_get_ns_raw
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
+seqcount_t xtime_seq;
/*
* The current time
@@ -222,7 +222,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
*ts = xtime;
nsecs = timekeeping_get_ns();
@@ -230,7 +230,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -245,14 +245,14 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -278,14 +278,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -313,7 +313,7 @@ void getnstime_raw_and_real(struct times
do {
u32 arch_offset;
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
*ts_raw = raw_time;
*ts_real = xtime;
@@ -326,7 +326,7 @@ void getnstime_raw_and_real(struct times
nsecs_raw += arch_offset;
nsecs_real += arch_offset;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -365,7 +365,8 @@ int do_settimeofday(const struct timespe
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
timekeeping_forward_now();
@@ -381,7 +382,8 @@ int do_settimeofday(const struct timespe
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -405,7 +407,8 @@ int timekeeping_inject_offset(struct tim
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
timekeeping_forward_now();
@@ -418,7 +421,8 @@ int timekeeping_inject_offset(struct tim
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -490,11 +494,11 @@ void getrawmonotonic(struct timespec *ts
s64 nsecs;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
nsecs = timekeeping_get_ns_raw();
*ts = raw_time;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -510,11 +514,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
@@ -572,7 +576,8 @@ void __init timekeeping_init(void)
read_persistent_clock(&now);
read_boot_clock(&boot);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
ntp_init();
@@ -593,7 +598,8 @@ void __init timekeeping_init(void)
-boot.tv_sec, -boot.tv_nsec);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
}
/* time in seconds when suspend began */
@@ -640,7 +646,8 @@ void timekeeping_inject_sleeptime(struct
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
timekeeping_forward_now();
__timekeeping_inject_sleeptime(delta);
@@ -650,7 +657,8 @@ void timekeeping_inject_sleeptime(struct
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -673,7 +681,8 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -683,7 +692,8 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
@@ -701,7 +711,8 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&xtime_lock, flags);
+ raw_spin_lock_irqsave(&xtime_lock, flags);
+ write_seqcount_begin(&xtime_seq);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -724,7 +735,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock_irqrestore(&xtime_lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -1101,13 +1113,13 @@ void get_monotonic_boottime(struct times
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
*ts = xtime;
tomono = wall_to_monotonic;
sleep = total_sleep_time;
nsecs = timekeeping_get_ns();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
@@ -1158,10 +1170,10 @@ struct timespec current_kernel_time(void
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
now = xtime;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return now;
}
@@ -1173,11 +1185,11 @@ struct timespec get_monotonic_coarse(voi
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
now = xtime;
mono = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1209,11 +1221,11 @@ void get_xtime_and_monotonic_and_sleep_o
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
*xtim = xtime;
*wtom = wall_to_monotonic;
*sleep = total_sleep_time;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
}
/**
@@ -1225,9 +1237,9 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
wtom = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return timespec_to_ktime(wtom);
}
@@ -1239,7 +1251,9 @@ ktime_t ktime_get_monotonic_offset(void)
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}

View File

@ -36,7 +36,7 @@ Index: linux-3.2/kernel/rtmutex.c
===================================================================
--- linux-3.2.orig/kernel/rtmutex.c
+++ linux-3.2/kernel/rtmutex.c
@@ -860,15 +860,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
@@ -861,15 +861,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
int __lockfunc rt_spin_trylock(spinlock_t *lock)
{

View File

@ -16,7 +16,7 @@ Index: linux-3.2/drivers/usb/core/hcd.c
===================================================================
--- linux-3.2.orig/drivers/usb/core/hcd.c
+++ linux-3.2/drivers/usb/core/hcd.c
@@ -2146,7 +2146,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
@@ -2145,7 +2145,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
* when the first handler doesn't use it. So let's just
* assume it's never used.
*/
@ -25,7 +25,7 @@ Index: linux-3.2/drivers/usb/core/hcd.c
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
rc = IRQ_NONE;
@@ -2159,7 +2159,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
@@ -2158,7 +2158,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
rc = IRQ_HANDLED;
}

View File

@ -1,258 +0,0 @@
Subject: wait-simple: Simple waitqueue implementation
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 12 Dec 2011 12:29:04 +0100
wait_queue is a swiss army knife and in most of the cases the
complexity is not needed. For RT waitqueues are a constant source of
trouble as we can't convert the head lock to a raw spinlock due to
fancy and long lasting callbacks.
Provide a slim version, which allows RT to replace wait queues. This
should go mainline as well, as it lowers memory consumption and
runtime overhead.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/wait-simple.h | 152 ++++++++++++++++++++++++++++++++++++++++++++
kernel/Makefile | 2
kernel/wait-simple.c | 63 ++++++++++++++++++
3 files changed, 216 insertions(+), 1 deletion(-)
Index: linux-3.2/include/linux/wait-simple.h
===================================================================
--- /dev/null
+++ linux-3.2/include/linux/wait-simple.h
@@ -0,0 +1,152 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/current.h>
+
+struct swaiter {
+ struct task_struct *task;
+ struct list_head node;
+};
+
+#define DEFINE_SWAITER(name) \
+ struct swaiter name = { \
+ .task = current, \
+ .node = LIST_HEAD_INIT((name).node), \
+ }
+
+struct swait_head {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+
+#define DEFINE_SWAIT_HEAD(name) \
+ struct swait_head name = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .list = LIST_HEAD_INIT((name).list), \
+ }
+
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
+
+#define init_swait_head(swh) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_swait_head((swh), &__key); \
+ } while (0)
+
+/*
+ * Waiter functions
+ */
+static inline bool swaiter_enqueued(struct swaiter *w)
+{
+ return w->task != NULL;
+}
+
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
+
+/*
+ * Adds w to head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
+{
+ list_add(&w->node, &head->list);
+}
+
+/*
+ * Removes w from head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_dequeue(struct swaiter *w)
+{
+ list_del_init(&w->node);
+}
+
+/*
+ * Wakeup functions
+ */
+extern void __swait_wake(struct swait_head *head, unsigned int state);
+
+static inline void swait_wake(struct swait_head *head)
+{
+ __swait_wake(head, TASK_NORMAL);
+}
+
+/*
+ * Event API
+ */
+
+#define __swait_event(wq, condition) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, ret) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if the @timeout elapsed, and the remaining
+ * jiffies if the condition evaluated to true before the timeout elapsed.
+ */
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __swait_event_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
Index: linux-3.2/kernel/Makefile
===================================================================
--- linux-3.2.orig/kernel/Makefile
+++ linux-3.2/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
hrtimer.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o sched_clock.o cred.o \
- async.o range.o
+ async.o range.o wait-simple.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
Index: linux-3.2/kernel/wait-simple.c
===================================================================
--- /dev/null
+++ linux-3.2/kernel/wait-simple.c
@@ -0,0 +1,63 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
+ *
+ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Based on kernel/wait.c
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/wait-simple.h>
+
+void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
+{
+ raw_spin_lock_init(&head->lock);
+ lockdep_set_class(&head->lock, key);
+ INIT_LIST_HEAD(&head->list);
+}
+EXPORT_SYMBOL_GPL(__init_swait_head);
+
+void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&head->lock, flags);
+ w->task = current;
+ __swait_enqueue(head, w);
+ set_current_state(state);
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+}
+EXPORT_SYMBOL_GPL(swait_prepare);
+
+void swait_finish(struct swait_head *head, struct swaiter *w)
+{
+ unsigned long flags;
+
+ __set_current_state(TASK_RUNNING);
+ if (w->task) {
+ raw_spin_lock_irqsave(&head->lock, flags);
+ __swait_dequeue(w);
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(swait_finish);
+
+void __swait_wake(struct swait_head *head, unsigned int state)
+{
+ struct swaiter *curr, *next;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&head->lock, flags);
+
+ list_for_each_entry_safe(curr, next, &head->list, node) {
+ if (wake_up_state(curr->task, state)) {
+ __swait_dequeue(curr);
+ curr->task = NULL;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+}

View File

@ -0,0 +1,32 @@
Subject: x86: vdso: Remove bogus locking in update_vsyscall_tz()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 28 Feb 2012 19:10:46 +0100
Changing the sequence count in update_vsyscall_tz() is completely
pointless.
The vdso code copies the data unprotected. There is no point to change
this as sys_tz is nowhere protected at all. See sys_gettimeofday().
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/vsyscall_64.c | 5 -----
1 file changed, 5 deletions(-)
Index: linux-2.6/arch/x86/kernel/vsyscall_64.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/vsyscall_64.c
+++ linux-2.6/arch/x86/kernel/vsyscall_64.c
@@ -80,12 +80,7 @@ early_param("vsyscall", vsyscall_setup);
void update_vsyscall_tz(void)
{
- unsigned long flags;
-
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
- /* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,

View File

@ -0,0 +1,124 @@
Subject: x86: vdso: Use seqcount instead of seqlock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 28 Feb 2012 18:24:07 +0100
The update of the vdso data happens under xtime_lock, so adding a
nested lock is pointless. Just use a seqcount to sync the readers.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/vgtod.h | 2 +-
arch/x86/kernel/vsyscall_64.c | 11 +++--------
arch/x86/vdso/vclock_gettime.c | 16 ++++++++--------
3 files changed, 12 insertions(+), 17 deletions(-)
Index: linux-3.2/arch/x86/include/asm/vgtod.h
===================================================================
--- linux-3.2.orig/arch/x86/include/asm/vgtod.h
+++ linux-3.2/arch/x86/include/asm/vgtod.h
@@ -5,7 +5,7 @@
#include <linux/clocksource.h>
struct vsyscall_gtod_data {
- seqlock_t lock;
+ seqcount_t seq;
/* open coded 'struct timespec' */
time_t wall_time_sec;
Index: linux-3.2/arch/x86/kernel/vsyscall_64.c
===================================================================
--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c
+++ linux-3.2/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
#include "vsyscall_trace.h"
DEFINE_VVAR(int, vgetcpu_mode);
-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
-{
- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
-};
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
@@ -86,9 +83,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
- unsigned long flags;
-
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ write_seqcount_begin(&vsyscall_gtod_data.seq);
/* copy vsyscall data */
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,7 +96,7 @@ void update_vsyscall(struct timespec *wa
vsyscall_gtod_data.wall_to_monotonic = *wtm;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ write_seqcount_end(&vsyscall_gtod_data.seq);
}
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
Index: linux-3.2/arch/x86/vdso/vclock_gettime.c
===================================================================
--- linux-3.2.orig/arch/x86/vdso/vclock_gettime.c
+++ linux-3.2/arch/x86/vdso/vclock_gettime.c
@@ -86,11 +86,11 @@ notrace static noinline int do_realtime(
{
unsigned long seq, ns;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec;
ns = vgetns();
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns);
return 0;
}
@@ -99,12 +99,12 @@ notrace static noinline int do_monotonic
{
unsigned long seq, ns, secs;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
secs = gtod->wall_time_sec;
ns = gtod->wall_time_nsec + vgetns();
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
* are all guaranteed to be nonnegative.
@@ -123,10 +123,10 @@ notrace static noinline int do_realtime_
{
unsigned long seq;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
return 0;
}
@@ -134,12 +134,12 @@ notrace static noinline int do_monotonic
{
unsigned long seq, ns, secs;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_seqcount_begin(&gtod->seq);
secs = gtod->wall_time_coarse.tv_sec;
ns = gtod->wall_time_coarse.tv_nsec;
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
/* wall_time_nsec and wall_to_monotonic.tv_nsec are
* guaranteed to be between 0 and NSEC_PER_SEC.

View File

@ -12,6 +12,16 @@
+ features/all/rt/power-allow-irq-threading.patch featureset=rt
+ features/all/rt/sched-keep-period-timer-alive-when-throttled.patch featureset=rt
+ features/all/rt/sched-prevent-boosting-from-throttling.patch featureset=rt
+ features/all/rt/time-remove-bogus-comments.patch featureset=rt
+ features/all/rt/x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch featureset=rt
+ features/all/rt/x86-vdso-use-seqcount.patch featureset=rt
+ features/all/rt/ia64-vdso-use-seqcount.patch featureset=rt
+ features/all/rt/seqlock-remove-unused-functions.patch featureset=rt
+ features/all/rt/seqlock-use-seqcount.patch featureset=rt
+ features/all/rt/seqlock-provide-seq-spin-lock.patch featureset=rt
+ features/all/rt/fs-struct-use-seqlock.patch featureset=rt
+ features/all/rt/fs-dentry-use-seqlock.patch featureset=rt
+ features/all/rt/timekeeping-split-xtime-lock.patch featureset=rt
+ features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch featureset=rt
+ features/all/rt/mm-memcg-shorten-preempt-disabled-section-around-event-checks.patch featureset=rt
+ features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch featureset=rt
@ -57,14 +67,8 @@
+ features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch featureset=rt
+ features/all/rt/rcu-reduce-lock-section.patch featureset=rt
+ features/all/rt/locking-various-init-fixes.patch featureset=rt
+ features/all/rt/seqlock-remove-unused-functions.patch featureset=rt
+ features/all/rt/seqlock-use-seqcount.patch featureset=rt
+ features/all/rt/wait-provide-__wake_up_all_locked.patch featureset=rt
+ features/all/rt/pci-access-use-__wake_up_all_locked.patch featureset=rt
+ features/all/rt/acpi-make-gbl-hardware-lock-raw.patch featureset=rt
+ features/all/rt/acpi-make-ec-lock-raw-as-well.patch featureset=rt
+ features/all/rt/seqlock-raw-seqlock.patch featureset=rt
+ features/all/rt/timekeeping-covert-xtimelock.patch featureset=rt
+ features/all/rt/latency-hist.patch featureset=rt
+ features/all/rt/hwlatdetect.patch featureset=rt
+ features/all/rt/early-printk-consolidate.patch featureset=rt
@ -246,8 +250,15 @@
+ features/all/rt/scsi-fcoe-rt-aware.patch featureset=rt
+ features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch featureset=rt
+ features/all/rt/dm-make-rt-aware.patch featureset=rt
+ features/all/rt/wait-simple-version.patch featureset=rt
+ features/all/rt/acpi-gpe-use-wait-simple.patch featureset=rt
+ features/all/rt/cpumask-disable-offstack-on-rt.patch featureset=rt
+ features/all/rt/seqlock-prevent-rt-starvation.patch featureset=rt
+ features/all/rt/fs-protect-opencoded-isize-seqcount.patch featureset=rt
+ features/all/rt/net-u64-stat-protect-seqcount.patch featureset=rt
+ features/all/rt/rfc-timer-fix-hotplug-for-rt.patch featureset=rt
+ features/all/rt/rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch featureset=rt
+ features/all/rt/rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch featureset=rt
+ features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch featureset=rt
+ features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch featureset=rt
+ features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch featureset=rt
+ features/all/rt/kconfig-disable-a-few-options-rt.patch featureset=rt
+ features/all/rt/kconfig-preempt-rt-full.patch featureset=rt