66 lines
2.0 KiB
Diff
66 lines
2.0 KiB
Diff
Subject: FIX [2/2] slub: Tid must be retrieved from the percpu area of the current processor
|
|
From: Christoph Lameter <cl@linux.com>
|
|
Date: Wed, 23 Jan 2013 21:45:48 +0000
|
|
|
|
As Steven Rostedt has pointer out: Rescheduling could occur on a differnet processor
|
|
after the determination of the per cpu pointer and before the tid is retrieved.
|
|
This could result in allocation from the wrong node in slab_alloc.
|
|
|
|
The effect is much more severe in slab_free() where we could free to the freelist
|
|
of the wrong page.
|
|
|
|
The window for something like that occurring is pretty small but it is possible.
|
|
|
|
Signed-off-by: Christoph Lameter <cl@linux.com>
|
|
Cc: Steven Rostedt <rostedt@goodmis.org>
|
|
Cc: Pekka Enberg <penberg@kernel.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
|
mm/slub.c | 15 ++++++++-------
|
|
1 file changed, 8 insertions(+), 7 deletions(-)
|
|
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -2331,13 +2331,13 @@ static __always_inline void *slab_alloc_
|
|
|
|
s = memcg_kmem_get_cache(s, gfpflags);
|
|
redo:
|
|
-
|
|
/*
|
|
- * Must read kmem_cache cpu data via this cpu ptr. Preemption is
|
|
- * enabled. We may switch back and forth between cpus while
|
|
- * reading from one cpu area. That does not matter as long
|
|
- * as we end up on the original cpu again when doing the cmpxchg.
|
|
+ * Preemption is disabled for the retrieval of the tid because that
|
|
+ * must occur from the current processor. We cannot allow rescheduling
|
|
+ * on a different processor between the determination of the pointer
|
|
+ * and the retrieval of the tid.
|
|
*/
|
|
+ preempt_disable();
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
/*
|
|
@@ -2347,7 +2347,7 @@ redo:
|
|
* linked list in between.
|
|
*/
|
|
tid = c->tid;
|
|
- barrier();
|
|
+ preempt_enable();
|
|
|
|
object = c->freelist;
|
|
page = c->page;
|
|
@@ -2594,10 +2594,11 @@ redo:
|
|
* data is retrieved via this pointer. If we are on the same cpu
|
|
* during the cmpxchg then the free will succedd.
|
|
*/
|
|
+ preempt_disable();
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
tid = c->tid;
|
|
- barrier();
|
|
+ preempt_enable();
|
|
|
|
if (likely(page == c->page)) {
|
|
set_freepointer(s, object, c->freelist);
|