From 69ca2dbfbb7c19d8b42bb361a06f9a669cf700c1 Mon Sep 17 00:00:00 2001 Message-Id: <69ca2dbfbb7c19d8b42bb361a06f9a669cf700c1.1601675152.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 12:32:34 +0200 Subject: [PATCH 171/333] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However on RT we have also a spinlock which needs an init call so we can't use that. Signed-off-by: Sebastian Andrzej Siewior --- fs/dcache.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/dcache.c b/fs/dcache.c index 6e0022326afe..10225a9135fb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3060,6 +3060,8 @@ __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { + unsigned int loop; + /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ @@ -3076,11 +3078,16 @@ static void __init dcache_init_early(void) NULL, 0, 0); + + for (loop = 0; loop < (1U << d_hash_shift); loop++) + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); + d_hash_shift = 32 - d_hash_shift; } static void __init dcache_init(void) { + unsigned int loop; /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature @@ -3104,6 +3111,10 @@ static void __init dcache_init(void) NULL, 0, 0); + + for (loop = 0; loop < (1U << d_hash_shift); loop++) + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); + d_hash_shift = 32 - d_hash_shift; } -- 2.17.1