linux/debian/patches/features/all/rt/sched-load-balance-break-on...

50 lines
1.3 KiB
Diff

From: Peter Zijlstra <peterz@infradead.org>
Date: Tue, 16 Mar 2010 14:31:44 -0700
Subject: sched: Break out from load_balancing on rq_lock contention
Also limit NEW_IDLE pull
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched_fair.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
Index: linux-3.2/kernel/sched_fair.c
===================================================================
--- linux-3.2.orig/kernel/sched_fair.c
+++ linux-3.2/kernel/sched_fair.c
@@ -2899,6 +2899,10 @@ balance_tasks(struct rq *this_rq, int th
*/
if (idle == CPU_NEWLY_IDLE)
break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
#endif
/*
@@ -3039,6 +3043,20 @@ load_balance_fair(struct rq *this_rq, in
rem_load_move -= moved_load;
if (rem_load_move < 0)
break;
+
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible
+ * kernels will stop after the first task is pulled to minimize
+ * the critical section.
+ */
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+ break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
+#endif
}
rcu_read_unlock();