linux/debian/patches-rt/of-allocate-free-phandle-ca...

83 lines
2.2 KiB
Diff

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 31 Aug 2018 14:16:30 +0200
Subject: [PATCH] of: allocate / free phandle cache outside of the devtree_lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
The phandle cache code allocates memory while holding devtree_lock which
is a raw_spinlock_t. Memory allocation (and free()) is not possible on
RT while a raw_spinlock_t is held.
Invoke the kfree() and kcalloc() while the lock is dropped.
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: devicetree@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/of/base.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -108,46 +108,52 @@ void of_populate_phandle_cache(void)
u32 cache_entries;
struct device_node *np;
u32 phandles = 0;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
-
- kfree(phandle_cache);
+ shadow = phandle_cache;
phandle_cache = NULL;
for_each_of_allnodes(np)
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
phandles++;
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
if (!phandles)
- goto out;
+ return;
cache_entries = roundup_pow_of_two(phandles);
phandle_cache_mask = cache_entries - 1;
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
- GFP_ATOMIC);
- if (!phandle_cache)
- goto out;
+ kfree(shadow);
+ shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL);
+
+ if (!shadow)
+ return;
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ phandle_cache = shadow;
for_each_of_allnodes(np)
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
phandle_cache[np->phandle & phandle_cache_mask] = np;
-out:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
}
int of_free_phandle_cache(void)
{
unsigned long flags;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
- kfree(phandle_cache);
+ shadow = phandle_cache;
phandle_cache = NULL;
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ kfree(shadow);
return 0;
}
#if !defined(CONFIG_MODULES)