177 lines
5.5 KiB
Diff
177 lines
5.5 KiB
Diff
From: Mike Galbraith <umgwanakikbuti@gmail.com>
|
|
Date: Thu, 31 Mar 2016 04:08:28 +0200
|
|
Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
|
|
for -rt
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9-rt1.tar.xz
|
|
|
|
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
|
|
OTOH, they're a lot less wasteful than an rtmutex per page.
|
|
|
|
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
|
|
drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
|
|
2 files changed, 57 insertions(+), 14 deletions(-)
|
|
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -519,6 +519,8 @@ static struct zram_meta *zram_meta_alloc
|
|
goto out_error;
|
|
}
|
|
|
|
+ zram_meta_init_table_locks(meta, disksize);
|
|
+
|
|
return meta;
|
|
|
|
out_error:
|
|
@@ -567,12 +569,12 @@ static int zram_decompress_page(struct z
|
|
unsigned long handle;
|
|
unsigned int size;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
handle = meta->table[index].handle;
|
|
size = zram_get_obj_size(meta, index);
|
|
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
clear_page(mem);
|
|
return 0;
|
|
}
|
|
@@ -587,7 +589,7 @@ static int zram_decompress_page(struct z
|
|
zcomp_stream_put(zram->comp);
|
|
}
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret)) {
|
|
@@ -607,14 +609,14 @@ static int zram_bvec_read(struct zram *z
|
|
struct zram_meta *meta = zram->meta;
|
|
page = bvec->bv_page;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
handle_zero_page(bvec);
|
|
return 0;
|
|
}
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
if (is_partial_io(bvec))
|
|
/* Use a temporary buffer to decompress the page */
|
|
@@ -691,10 +693,10 @@ static int zram_bvec_write(struct zram *
|
|
if (user_mem)
|
|
kunmap_atomic(user_mem);
|
|
/* Free memory associated with this sector now. */
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
atomic64_inc(&zram->stats.zero_pages);
|
|
ret = 0;
|
|
@@ -785,12 +787,12 @@ static int zram_bvec_write(struct zram *
|
|
* Free memory associated with this sector
|
|
* before overwriting unused sectors.
|
|
*/
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
|
|
meta->table[index].handle = handle;
|
|
zram_set_obj_size(meta, index, clen);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
/* Update stats */
|
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
|
@@ -833,9 +835,9 @@ static void zram_bio_discard(struct zram
|
|
}
|
|
|
|
while (n >= PAGE_SIZE) {
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
index++;
|
|
n -= PAGE_SIZE;
|
|
@@ -964,9 +966,9 @@ static void zram_slot_free_notify(struct
|
|
zram = bdev->bd_disk->private_data;
|
|
meta = zram->meta;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
}
|
|
|
|
--- a/drivers/block/zram/zram_drv.h
|
|
+++ b/drivers/block/zram/zram_drv.h
|
|
@@ -73,6 +73,9 @@ enum zram_pageflags {
|
|
struct zram_table_entry {
|
|
unsigned long handle;
|
|
unsigned long value;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t lock;
|
|
+#endif
|
|
};
|
|
|
|
struct zram_stats {
|
|
@@ -120,4 +123,42 @@ struct zram {
|
|
*/
|
|
bool claim; /* Protected by bdev->bd_mutex */
|
|
};
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+static inline void zram_lock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ bit_spin_lock(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_unlock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
|
|
+#else /* CONFIG_PREEMPT_RT_BASE */
|
|
+static inline void zram_lock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ spin_lock(&table->lock);
|
|
+ __set_bit(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_unlock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ __clear_bit(ZRAM_ACCESS, &table->value);
|
|
+ spin_unlock(&table->lock);
|
|
+}
|
|
+
|
|
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
|
|
+{
|
|
+ size_t num_pages = disksize >> PAGE_SHIFT;
|
|
+ size_t index;
|
|
+
|
|
+ for (index = 0; index < num_pages; index++) {
|
|
+ spinlock_t *lock = &meta->table[index].lock;
|
|
+ spin_lock_init(lock);
|
|
+ }
|
|
+}
|
|
+#endif /* CONFIG_PREEMPT_RT_BASE */
|
|
+
|
|
#endif
|