diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.txt b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.txt new file mode 100644 index 000000000..154309ec7 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.txt @@ -0,0 +1,153 @@ +Subject: softirq: Check preemption after reenabling interrupts +From: Thomas Gleixner +Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) + +raise_softirq_irqoff() disables interrupts and wakes the softirq +daemon, but after reenabling interrupts there is no preemption check, +so the execution of the softirq thread might be delayed arbitrarily. + +In principle we could add that check to local_irq_enable/restore, but +that's overkill as the rasie_softirq_irqoff() sections are the only +ones which show this behaviour. + +Reported-by: Carsten Emde +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-iopoll.c | 3 +++ + block/blk-softirq.c | 3 +++ + include/linux/preempt.h | 3 +++ + net/core/dev.c | 6 ++++++ + 4 files changed, 15 insertions(+) + +Index: linux-3.4/block/blk-iopoll.c +=================================================================== +--- linux-3.4.orig/block/blk-iopoll.c ++++ linux-3.4/block/blk-iopoll.c +@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll + list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(blk_iopoll_sched); + +@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct so + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_noti + &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +Index: linux-3.4/block/blk-softirq.c +=================================================================== +--- linux-3.4.orig/block/blk-softirq.c ++++ linux-3.4/block/blk-softirq.c +@@ -51,6 +51,7 @@ static void trigger_softirq(void *data) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -93,6 +94,7 @@ static int __cpuinit blk_cpu_notify(stru + &__get_cpu_var(blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +@@ -150,6 +152,7 @@ do_local: + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /** +Index: linux-3.4/include/linux/preempt.h +=================================================================== +--- linux-3.4.orig/include/linux/preempt.h ++++ linux-3.4/include/linux/preempt.h +@@ -56,8 +56,10 @@ do { \ + + #ifndef CONFIG_PREEMPT_RT_BASE + # define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() do { } while (0) + #else + # define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() preempt_check_resched() + #endif + + #define preempt_enable() \ +@@ -105,6 +107,7 @@ do { \ + #define preempt_disable_notrace() do { } while (0) + #define preempt_enable_no_resched_notrace() do { } while (0) + #define preempt_enable_notrace() do { } while (0) ++#define preempt_check_resched_rt() do { } while (0) + + #endif /* CONFIG_PREEMPT_COUNT */ + +Index: linux-3.4/net/core/dev.c +=================================================================== +--- linux-3.4.orig/net/core/dev.c ++++ linux-3.4/net/core/dev.c +@@ -1803,6 +1803,7 @@ static inline void __netif_reschedule(st + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -1824,6 +1825,7 @@ void dev_kfree_skb_irq(struct sk_buff *s + sd->completion_queue = skb; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + } + EXPORT_SYMBOL(dev_kfree_skb_irq); +@@ -2923,6 +2925,7 @@ enqueue: + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -3736,6 +3739,7 @@ static void net_rps_action_and_irq_enabl + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static int process_backlog(struct napi_struct *napi, int quota) +@@ -3808,6 +3812,7 @@ void __napi_schedule(struct napi_struct + local_irq_save(flags); + ____napi_schedule(&__get_cpu_var(softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -6309,6 +6314,7 @@ static int dev_cpu_callback(struct notif + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) {