From 5824badf179c3ac4e502760f7a4f3511cf6c2255 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:37 -0500 Subject: [PATCH 046/303] mm: Prepare decoupling the page fault disabling logic Add a pagefault_disabled variable to task_struct to allow decoupling the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 1 + include/linux/uaccess.h | 33 +++------------------------------ kernel/fork.c | 1 + mm/memory.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 30 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 8cb5ff4..5520922 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1439,6 +1439,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5ca0951..9414a1b 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index c1355fb..8ea3257 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1200,6 +1200,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 4f2add1..75419ba 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3452,6 +3452,35 @@ unlock: return 0; } +void pagefault_disable(void) +{ + inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL_GPL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} +EXPORT_SYMBOL_GPL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */