[rt] bump rt patch to version 3.2.9-rt17

svn path=/dists/sid/linux-2.6/; revision=18800
This commit is contained in:
Uwe Kleine-König 2012-03-07 22:21:48 +00:00
parent d5de9df737
commit ddff170108
10 changed files with 467 additions and 3 deletions

2
debian/changelog vendored
View File

@ -1,6 +1,6 @@
linux-2.6 (3.2.9-2) UNRELEASED; urgency=low
* [rt] bump rt patch to version 3.2.9-rt16
* [rt] bump rt patch to version 3.2.9-rt17
-- Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Tue, 06 Mar 2012 09:26:00 +0100

View File

@ -0,0 +1,80 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
fs/autofs4/autofs_i.h | 1 +
fs/autofs4/expire.c | 2 +-
fs/dcache.c | 7 ++++---
3 files changed, 6 insertions(+), 4 deletions(-)
Index: linux-3.2/fs/autofs4/autofs_i.h
===================================================================
--- linux-3.2.orig/fs/autofs4/autofs_i.h
+++ linux-3.2/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
Index: linux-3.2/fs/autofs4/expire.c
===================================================================
--- linux-3.2.orig/fs/autofs4/expire.c
+++ linux-3.2/fs/autofs4/expire.c
@@ -170,7 +170,7 @@ again:
parent = p->d_parent;
if (!seq_spin_trylock(&parent->d_lock)) {
seq_spin_unlock(&p->d_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
seq_spin_unlock(&p->d_lock);
Index: linux-3.2/fs/dcache.c
===================================================================
--- linux-3.2.orig/fs/dcache.c
+++ linux-3.2/fs/dcache.c
@@ -37,6 +37,7 @@
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
+#include <linux/delay.h>
#include "internal.h"
/*
@@ -410,7 +411,7 @@ static inline struct dentry *dentry_kill
if (inode && !spin_trylock(&inode->i_lock)) {
relock:
seq_spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
return dentry; /* try again with same dentry */
}
if (IS_ROOT(dentry))
@@ -796,7 +797,7 @@ relock:
if (!seq_spin_trylock(&dentry->d_lock)) {
spin_unlock(&dcache_lru_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
@@ -1974,7 +1975,7 @@ again:
if (dentry->d_count == 1) {
if (inode && !spin_trylock(&inode->i_lock)) {
seq_spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;

View File

@ -0,0 +1,35 @@
Subject: fs: namespace: Use cpu_chill() instead of cpu_relax()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:05:19 +0100
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
fs/namespace.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
Index: linux-3.2/fs/namespace.c
===================================================================
--- linux-3.2.orig/fs/namespace.c
+++ linux-3.2/fs/namespace.c
@@ -31,6 +31,7 @@
#include <linux/idr.h>
#include <linux/fs_struct.h>
#include <linux/fsnotify.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -346,7 +347,7 @@ int mnt_want_write(struct vfsmount *mnt)
*/
while (mnt->mnt_flags & MNT_WRITE_HOLD) {
preempt_enable();
- cpu_relax();
+ cpu_chill();
preempt_disable();
}
/*

View File

@ -0,0 +1,93 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue Mar 6 23:18:54 2012 +0100
Subject: genirq: Clear action->thread_mask if IRQ_ONESHOT is not set
commit ac5637611(genirq: Unmask oneshot irqs when thread was not woken)
fails to unmask when a !IRQ_ONESHOT threaded handler is handled by
handle_level_irq.
This happens because thread_mask is or'ed unconditionally in
irq_wake_thread(), but for !IRQ_ONESHOT interrupts never cleared. So
the check for !desc->thread_active fails and keeps the interrupt
disabled.
Keep the thread_mask zero for !IRQ_ONESHOT interrupts.
Document the thread_mask magic while at it.
[ commit 52abb700e16a9aa4cbc03f3d7f80206cbbc80680 upstream ]
Reported-and-tested-by: Sven Joachim <svenjoac@gmx.de>
Reported-and-tested-by: Stefan Lippers-Hollmann <s.l-h@gmx.de>
Cc: stable@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
---
kernel/irq/manage.c | 44 ++++++++++++++++++++++++++++++++++++++------
1 file changed, 38 insertions(+), 6 deletions(-)
Index: linux-3.2/kernel/irq/manage.c
===================================================================
--- linux-3.2.orig/kernel/irq/manage.c
+++ linux-3.2/kernel/irq/manage.c
@@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq
/* add new interrupt at end of irq queue */
do {
+ /*
+ * Or all existing action->thread_mask bits,
+ * so we can find the next zero bit for this
+ * new action.
+ */
thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
@@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq
}
/*
- * Setup the thread mask for this irqaction. Unlikely to have
- * 32 resp 64 irqs sharing one line, but who knows.
+ * Setup the thread mask for this irqaction for ONESHOT. For
+ * !ONESHOT irqs the thread mask is 0 so we can avoid a
+ * conditional in irq_wake_thread().
*/
- if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
- ret = -EBUSY;
- goto out_mask;
+ if (new->flags & IRQF_ONESHOT) {
+ /*
+ * Unlikely to have 32 resp 64 irqs sharing one line,
+ * but who knows.
+ */
+ if (thread_mask == ~0UL) {
+ ret = -EBUSY;
+ goto out_mask;
+ }
+ /*
+ * The thread_mask for the action is or'ed to
+ * desc->thread_active to indicate that the
+ * IRQF_ONESHOT thread handler has been woken, but not
+ * yet finished. The bit is cleared when a thread
+ * completes. When all threads of a shared interrupt
+ * line have completed desc->threads_active becomes
+ * zero and the interrupt line is unmasked. See
+ * handle.c:irq_wake_thread() for further information.
+ *
+ * If no thread is woken by primary (hard irq context)
+ * interrupt handlers, then desc->threads_active is
+ * also checked for zero to unmask the irq line in the
+ * affected hard irq flow handlers
+ * (handle_[fasteoi|level]_irq).
+ *
+ * The new action gets the first zero bit of
+ * thread_mask assigned. See the loop above which or's
+ * all existing action->thread_mask bits.
+ */
+ new->thread_mask = 1 << ffz(thread_mask);
}
- new->thread_mask = 1 << ffz(thread_mask);
if (!shared) {
init_waitqueue_head(&desc->wait_for_threads);

View File

@ -14,4 +14,4 @@ Index: linux-3.2/localversion-rt
--- /dev/null
+++ linux-3.2/localversion-rt
@@ -0,0 +1 @@
+-rt16
+-rt17

View File

@ -0,0 +1,66 @@
Subject: net: Use cpu_chill() instead of cpu_relax()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:10:04 +0100
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
net/packet/af_packet.c | 5 +++--
net/rds/ib_rdma.c | 3 ++-
2 files changed, 5 insertions(+), 3 deletions(-)
Index: linux-3.2/net/packet/af_packet.c
===================================================================
--- linux-3.2.orig/net/packet/af_packet.c
+++ linux-3.2/net/packet/af_packet.c
@@ -89,6 +89,7 @@
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
+#include <linux/delay.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -673,7 +674,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
- cpu_relax();
+ cpu_chill();
}
}
@@ -928,7 +929,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
- cpu_relax();
+ cpu_chill();
}
}
prb_close_block(pkc, pbd, po, status);
Index: linux-3.2/net/rds/ib_rdma.c
===================================================================
--- linux-3.2.orig/net/rds/ib_rdma.c
+++ linux-3.2/net/rds/ib_rdma.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/llist.h>
+#include <linux/delay.h>
#include "rds.h"
#include "ib.h"
@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- cpu_relax();
+ cpu_chill();
}
}

View File

@ -0,0 +1,30 @@
Subject: rt: Introduce cpu_chill()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 20:51:03 +0100
Retry loops on RT might loop forever when the modifying side was
preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
defaults to cpu_relax() for non RT. On RT it puts the looping task to
sleep for a tick so the preempted task can make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/delay.h | 6 ++++++
1 file changed, 6 insertions(+)
Index: linux-3.2/include/linux/delay.h
===================================================================
--- linux-3.2.orig/include/linux/delay.h
+++ linux-3.2/include/linux/delay.h
@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s
msleep(seconds * 1000);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define cpu_chill() msleep(1)
+#else
+# define cpu_chill() cpu_relax()
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */

View File

@ -5,6 +5,7 @@
############################################################
# UPSTREAM changes queued for 3.3 or 3.2
############################################################
genirq-clear-thread-mask-for-non-oneshot.patch
x86_64-patch-for-idle-notifiers.patch
re-possible-slab-deadlock-while-doing-ifenslave-1.patch
@ -585,8 +586,16 @@ rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch
# Enable full RT
cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
softirq-preempt-fix-3-re.txt
rt-introduce-cpu-chill.patch
fs-dcache-use-cpu-chill-in-trylock-loops.patch
fs-more-cpu-chill-fixups.patch
net-use-cpu-chill.patch
# Enable full RT
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch

View File

@ -0,0 +1,145 @@
Subject: softirq: Check preemption after reenabling interrupts
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
raise_softirq_irqoff() disables interrupts and wakes the softirq
daemon, but after reenabling interrupts there is no preemption check,
so the execution of the softirq thread might be delayed arbitrarily.
In principle we could add that check to local_irq_enable/restore, but
that's overkill as the rasie_softirq_irqoff() sections are the only
ones which show this behaviour.
Reported-by: Carsten Emde <cbe@osadl.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
block/blk-iopoll.c | 3 +++
block/blk-softirq.c | 3 +++
include/linux/preempt.h | 2 ++
net/core/dev.c | 6 ++++++
4 files changed, 14 insertions(+)
Index: linux-3.2/block/blk-iopoll.c
===================================================================
--- linux-3.2.orig/block/blk-iopoll.c
+++ linux-3.2/block/blk-iopoll.c
@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll
list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
EXPORT_SYMBOL(blk_iopoll_sched);
@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct so
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
/**
@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_noti
&__get_cpu_var(blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
return NOTIFY_OK;
Index: linux-3.2/block/blk-softirq.c
===================================================================
--- linux-3.2.orig/block/blk-softirq.c
+++ linux-3.2/block/blk-softirq.c
@@ -50,6 +50,7 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
/*
@@ -92,6 +93,7 @@ static int __cpuinit blk_cpu_notify(stru
&__get_cpu_var(blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
return NOTIFY_OK;
@@ -150,6 +152,7 @@ do_local:
goto do_local;
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
/**
Index: linux-3.2/include/linux/preempt.h
===================================================================
--- linux-3.2.orig/include/linux/preempt.h
+++ linux-3.2/include/linux/preempt.h
@@ -56,8 +56,10 @@ do { \
#ifndef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() __preempt_enable_no_resched()
+# define preempt_check_resched_rt() do { } while (0)
#else
# define preempt_enable_no_resched() preempt_enable()
+# define preempt_check_resched_rt() preempt_check_resched()
#endif
#define preempt_enable() \
Index: linux-3.2/net/core/dev.c
===================================================================
--- linux-3.2.orig/net/core/dev.c
+++ linux-3.2/net/core/dev.c
@@ -1779,6 +1779,7 @@ static inline void __netif_reschedule(st
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
void __netif_schedule(struct Qdisc *q)
@@ -1800,6 +1801,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
@@ -2969,6 +2971,7 @@ enqueue:
rps_unlock(sd);
local_irq_restore(flags);
+ preempt_check_resched_rt();
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -3789,6 +3792,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
+ preempt_check_resched_rt();
}
static int process_backlog(struct napi_struct *napi, int quota)
@@ -3861,6 +3865,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);
@@ -6401,6 +6406,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {

View File

@ -1,3 +1,4 @@
+ features/all/rt/genirq-clear-thread-mask-for-non-oneshot.patch featureset=rt
+ features/all/rt/x86_64-patch-for-idle-notifiers.patch featureset=rt
+ features/all/rt/re-possible-slab-deadlock-while-doing-ifenslave-1.patch featureset=rt
+ features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch featureset=rt
@ -260,5 +261,10 @@
+ features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch featureset=rt
+ features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch featureset=rt
+ features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch featureset=rt
+ features/all/rt/softirq-preempt-fix-3-re.txt featureset=rt
+ features/all/rt/rt-introduce-cpu-chill.patch featureset=rt
+ features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch featureset=rt
+ features/all/rt/fs-more-cpu-chill-fixups.patch featureset=rt
+ features/all/rt/net-use-cpu-chill.patch featureset=rt
+ features/all/rt/kconfig-disable-a-few-options-rt.patch featureset=rt
+ features/all/rt/kconfig-preempt-rt-full.patch featureset=rt