131 lines
3.4 KiB
Diff
131 lines
3.4 KiB
Diff
From 6f0a00b4eb54655af3e68ec70cdb9565debc9a3c Mon Sep 17 00:00:00 2001
|
|
From: Ingo Molnar <mingo@elte.hu>
|
|
Date: Fri, 3 Jul 2009 08:30:37 -0500
|
|
Subject: [PATCH 046/278] mm: Prepare decoupling the page fault disabling
|
|
logic
|
|
|
|
Add a pagefault_disabled variable to task_struct to allow decoupling
|
|
the pagefault-disabled logic from the preempt count.
|
|
|
|
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/sched.h | 1 +
|
|
include/linux/uaccess.h | 33 +++------------------------------
|
|
kernel/fork.c | 1 +
|
|
mm/memory.c | 29 +++++++++++++++++++++++++++++
|
|
4 files changed, 34 insertions(+), 30 deletions(-)
|
|
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 3795654..4ccb25e 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -1439,6 +1439,7 @@ struct task_struct {
|
|
/* mutex deadlock detection */
|
|
struct mutex_waiter *blocked_on;
|
|
#endif
|
|
+ int pagefault_disabled;
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
unsigned int irq_events;
|
|
unsigned long hardirq_enable_ip;
|
|
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
|
|
index 5ca0951..9414a1b 100644
|
|
--- a/include/linux/uaccess.h
|
|
+++ b/include/linux/uaccess.h
|
|
@@ -6,37 +6,10 @@
|
|
|
|
/*
|
|
* These routines enable/disable the pagefault handler in that
|
|
- * it will not take any locks and go straight to the fixup table.
|
|
- *
|
|
- * They have great resemblance to the preempt_disable/enable calls
|
|
- * and in fact they are identical; this is because currently there is
|
|
- * no other way to make the pagefault handlers do this. So we do
|
|
- * disable preemption but we don't necessarily care about that.
|
|
+ * it will not take any MM locks and go straight to the fixup table.
|
|
*/
|
|
-static inline void pagefault_disable(void)
|
|
-{
|
|
- inc_preempt_count();
|
|
- /*
|
|
- * make sure to have issued the store before a pagefault
|
|
- * can hit.
|
|
- */
|
|
- barrier();
|
|
-}
|
|
-
|
|
-static inline void pagefault_enable(void)
|
|
-{
|
|
- /*
|
|
- * make sure to issue those last loads/stores before enabling
|
|
- * the pagefault handler again.
|
|
- */
|
|
- barrier();
|
|
- dec_preempt_count();
|
|
- /*
|
|
- * make sure we do..
|
|
- */
|
|
- barrier();
|
|
- preempt_check_resched();
|
|
-}
|
|
+extern void pagefault_disable(void);
|
|
+extern void pagefault_enable(void);
|
|
|
|
#ifndef ARCH_HAS_NOCACHE_UACCESS
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index d1c6b43..a49f324 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1201,6 +1201,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
p->hardirq_context = 0;
|
|
p->softirq_context = 0;
|
|
#endif
|
|
+ p->pagefault_disabled = 0;
|
|
#ifdef CONFIG_LOCKDEP
|
|
p->lockdep_depth = 0; /* no locks held yet */
|
|
p->curr_chain_key = 0;
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 70f5daf..b50e579 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -3447,6 +3447,35 @@ unlock:
|
|
return 0;
|
|
}
|
|
|
|
+void pagefault_disable(void)
|
|
+{
|
|
+ inc_preempt_count();
|
|
+ current->pagefault_disabled++;
|
|
+ /*
|
|
+ * make sure to have issued the store before a pagefault
|
|
+ * can hit.
|
|
+ */
|
|
+ barrier();
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(pagefault_disable);
|
|
+
|
|
+void pagefault_enable(void)
|
|
+{
|
|
+ /*
|
|
+ * make sure to issue those last loads/stores before enabling
|
|
+ * the pagefault handler again.
|
|
+ */
|
|
+ barrier();
|
|
+ current->pagefault_disabled--;
|
|
+ dec_preempt_count();
|
|
+ /*
|
|
+ * make sure we do..
|
|
+ */
|
|
+ barrier();
|
|
+ preempt_check_resched();
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(pagefault_enable);
|
|
+
|
|
/*
|
|
* By the time we get here, we already hold the mm semaphore
|
|
*/
|
|
--
|
|
1.7.10
|
|
|