[x86] Add ramzswap driver (Closes: #573912)
svn path=/dists/sid/linux-2.6/; revision=15483
This commit is contained in:
parent
8d0b192617
commit
c183e2165b
|
@ -22,6 +22,7 @@ linux-2.6 (2.6.32-11) UNRELEASED; urgency=low
|
|||
* linux-base: Convert disk IDs in crypttab (Closes: #575056)
|
||||
* linux-base: Redirect stdin and stdout of child processes to avoid
|
||||
interfering with debconf (Closes: #574987)
|
||||
* [x86] Add ramzswap driver (Closes: #573912)
|
||||
|
||||
[ maximilian attems]
|
||||
* [alpha, hppa] Disable oprofile as tracing code is unsupported here.
|
||||
|
|
|
@ -1121,6 +1121,12 @@ CONFIG_POHMELFS=m
|
|||
# CONFIG_POHMELFS_DEBUG is not set
|
||||
CONFIG_POHMELFS_CRYPTO=y
|
||||
|
||||
##
|
||||
## file: drivers/staging/ramzswap/Kconfig
|
||||
##
|
||||
CONFIG_RAMZSWAP=m
|
||||
CONFIG_RAMZSWAP_STATS=y
|
||||
|
||||
##
|
||||
## file: drivers/staging/rt2860/Kconfig
|
||||
##
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
From 224f0ef4e20327ab108e6a11ebc3a92f337c5e85 Mon Sep 17 00:00:00 2001
|
||||
From: Nitin Gupta <ngupta@vflare.org>
|
||||
Date: Tue, 22 Sep 2009 15:32:33 +0530
|
||||
Subject: [PATCH 4/5] Staging: ramzswap: add TODO file
|
||||
|
||||
TODO file for ramzswap.
|
||||
|
||||
Signed-off-by: Nitin Gupta <ngupta@vflare.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
|
||||
---
|
||||
drivers/staging/ramzswap/TODO | 6 ++++++
|
||||
1 files changed, 6 insertions(+), 0 deletions(-)
|
||||
create mode 100644 drivers/staging/ramzswap/TODO
|
||||
|
||||
diff --git a/drivers/staging/ramzswap/TODO b/drivers/staging/ramzswap/TODO
|
||||
new file mode 100644
|
||||
index 0000000..bac40d6
|
||||
--- /dev/null
|
||||
+++ b/drivers/staging/ramzswap/TODO
|
||||
@@ -0,0 +1,6 @@
|
||||
+TODO:
|
||||
+ - Add support for swap notifiers
|
||||
+ - Remove CONFIG_ARM hack
|
||||
+
|
||||
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
|
||||
+Nitin Gupta <ngupta@vflare.org>
|
||||
--
|
||||
1.7.0.3
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,74 @@
|
|||
From 47f9afb38f0de2f153deea34bf1ef5c778815f2e Mon Sep 17 00:00:00 2001
|
||||
From: Nitin Gupta <ngupta@vflare.org>
|
||||
Date: Tue, 22 Sep 2009 10:26:54 +0530
|
||||
Subject: [PATCH 3/5] Staging: ramzswap: documentation
|
||||
|
||||
Short guide on how to setup and use ramzswap.
|
||||
|
||||
Signed-off-by: Nitin Gupta <ngupta@vflare.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
|
||||
---
|
||||
drivers/staging/ramzswap/ramzswap.txt | 51 +++++++++++++++++++++++++++++++++
|
||||
1 files changed, 51 insertions(+), 0 deletions(-)
|
||||
create mode 100644 drivers/staging/ramzswap/ramzswap.txt
|
||||
|
||||
diff --git a/drivers/staging/ramzswap/ramzswap.txt b/drivers/staging/ramzswap/ramzswap.txt
|
||||
new file mode 100644
|
||||
index 0000000..e9f1619
|
||||
--- /dev/null
|
||||
+++ b/drivers/staging/ramzswap/ramzswap.txt
|
||||
@@ -0,0 +1,51 @@
|
||||
+ramzswap: Compressed RAM based swap device
|
||||
+-------------------------------------------
|
||||
+
|
||||
+Project home: http://compcache.googlecode.com/
|
||||
+
|
||||
+* Introduction
|
||||
+
|
||||
+It creates RAM based block devices which can be used (only) as swap disks.
|
||||
+Pages swapped to these devices are compressed and stored in memory itself.
|
||||
+See project home for use cases, performance numbers and a lot more.
|
||||
+
|
||||
+Individual ramzswap devices are configured and initialized using rzscontrol
|
||||
+userspace utility as shown in examples below. See rzscontrol man page for more
|
||||
+details.
|
||||
+
|
||||
+* Usage
|
||||
+
|
||||
+Following shows a typical sequence of steps for using ramzswap.
|
||||
+
|
||||
+1) Load Modules:
|
||||
+ modprobe ramzswap num_devices=4
|
||||
+ This creates 4 (uninitialized) devices: /dev/ramzswap{0,1,2,3}
|
||||
+ (num_devices parameter is optional. Default: 1)
|
||||
+
|
||||
+2) Initialize:
|
||||
+ Use rzscontrol utility to configure and initialize individual
|
||||
+ ramzswap devices. Example:
|
||||
+ rzscontrol /dev/ramzswap2 --init # uses default value of disksize_kb
|
||||
+
|
||||
+ *See rzscontrol man page for more details and examples*
|
||||
+
|
||||
+3) Activate:
|
||||
+ swapon /dev/ramzswap2 # or any other initialized ramzswap device
|
||||
+
|
||||
+4) Stats:
|
||||
+ rzscontrol /dev/ramzswap2 --stats
|
||||
+
|
||||
+5) Deactivate:
|
||||
+ swapoff /dev/ramzswap2
|
||||
+
|
||||
+6) Reset:
|
||||
+ rzscontrol /dev/ramzswap2 --reset
|
||||
+ (This frees all the memory allocated for this device).
|
||||
+
|
||||
+
|
||||
+Please report any problems at:
|
||||
+ - Mailing list: linux-mm-cc at laptop dot org
|
||||
+ - Issue tracker: http://code.google.com/p/compcache/issues/list
|
||||
+
|
||||
+Nitin Gupta
|
||||
+ngupta@vflare.org
|
||||
--
|
||||
1.7.0.3
|
||||
|
94
debian/patches/features/all/ramzswap/ramzswap-remove-ARM-specific-d-cache-hack.patch
vendored
Normal file
94
debian/patches/features/all/ramzswap/ramzswap-remove-ARM-specific-d-cache-hack.patch
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
From 30fb8a7141e906116bb536dd54be99480b8fd238 Mon Sep 17 00:00:00 2001
|
||||
From: Nitin Gupta <ngupta@vflare.org>
|
||||
Date: Sat, 12 Dec 2009 11:44:46 +0530
|
||||
Subject: [PATCH 5/5] Staging: ramzswap: remove ARM specific d-cache hack
|
||||
|
||||
Remove d-cache hack in ramzswap driver that was needed
|
||||
to workaround a bug in ARM version of update_mmu_cache()
|
||||
which caused stale data in d-cache to be transferred to
|
||||
userspace. This bug was fixed by git commit:
|
||||
787b2faadc4356b6c2c71feb42fb944fece9a12f
|
||||
This also brings down one entry in TODO file.
|
||||
|
||||
Signed-off-by: Nitin Gupta <ngupta@vflare.org>
|
||||
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
|
||||
---
|
||||
drivers/staging/ramzswap/TODO | 1 -
|
||||
drivers/staging/ramzswap/ramzswap_drv.c | 28 +++-------------------------
|
||||
2 files changed, 3 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/drivers/staging/ramzswap/TODO b/drivers/staging/ramzswap/TODO
|
||||
index bac40d6..8d64e28 100644
|
||||
--- a/drivers/staging/ramzswap/TODO
|
||||
+++ b/drivers/staging/ramzswap/TODO
|
||||
@@ -1,6 +1,5 @@
|
||||
TODO:
|
||||
- Add support for swap notifiers
|
||||
- - Remove CONFIG_ARM hack
|
||||
|
||||
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
|
||||
Nitin Gupta <ngupta@vflare.org>
|
||||
diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c
|
||||
index b839f05..989fac5 100644
|
||||
--- a/drivers/staging/ramzswap/ramzswap_drv.c
|
||||
+++ b/drivers/staging/ramzswap/ramzswap_drv.c
|
||||
@@ -222,28 +222,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
-static void ramzswap_flush_dcache_page(struct page *page)
|
||||
-{
|
||||
-#ifdef CONFIG_ARM
|
||||
- int flag = 0;
|
||||
- /*
|
||||
- * Ugly hack to get flush_dcache_page() work on ARM.
|
||||
- * page_mapping(page) == NULL after clearing this swap cache flag.
|
||||
- * Without clearing this flag, flush_dcache_page() will simply set
|
||||
- * "PG_dcache_dirty" bit and return.
|
||||
- */
|
||||
- if (PageSwapCache(page)) {
|
||||
- flag = 1;
|
||||
- ClearPageSwapCache(page);
|
||||
- }
|
||||
-#endif
|
||||
- flush_dcache_page(page);
|
||||
-#ifdef CONFIG_ARM
|
||||
- if (flag)
|
||||
- SetPageSwapCache(page);
|
||||
-#endif
|
||||
-}
|
||||
-
|
||||
void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
|
||||
struct ramzswap_ioctl_stats *s)
|
||||
{
|
||||
@@ -655,7 +633,7 @@ static int handle_zero_page(struct bio *bio)
|
||||
memset(user_mem, 0, PAGE_SIZE);
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
|
||||
- ramzswap_flush_dcache_page(page);
|
||||
+ flush_dcache_page(page);
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
@@ -679,7 +657,7 @@ static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
kunmap_atomic(cmem, KM_USER1);
|
||||
|
||||
- ramzswap_flush_dcache_page(page);
|
||||
+ flush_dcache_page(page);
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
@@ -779,7 +757,7 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
|
||||
goto out;
|
||||
}
|
||||
|
||||
- ramzswap_flush_dcache_page(page);
|
||||
+ flush_dcache_page(page);
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
--
|
||||
1.7.0.3
|
||||
|
|
@ -0,0 +1,700 @@
|
|||
From 644bf7b5983cf2540b57a5b25b775cb3c1e8e943 Mon Sep 17 00:00:00 2001
|
||||
From: Nitin Gupta <ngupta@vflare.org>
|
||||
Date: Tue, 22 Sep 2009 10:26:52 +0530
|
||||
Subject: [PATCH 1/5] Staging: xvmalloc memory allocator
|
||||
|
||||
* Features:
|
||||
- Low metadata overhead (just 4 bytes per object)
|
||||
- O(1) Alloc/Free - except when we have to call system page allocator to
|
||||
get additional memory.
|
||||
- Very low fragmentation: In all tests, xvmalloc memory usage is within 12%
|
||||
of "Ideal".
|
||||
- Pool based allocator: Each pool can grow and shrink.
|
||||
- It maps pages only when required. So, it does not hog vmalloc area which
|
||||
is very small on 32-bit systems.
|
||||
|
||||
SLUB allocator could not be used due to fragmentation issues:
|
||||
http://code.google.com/p/compcache/wiki/AllocatorsComparison
|
||||
Data here shows kmalloc using ~43% more memory than TLSF and xvMalloc
|
||||
is showed ~2% more space efficiency than TLSF (due to smaller metadata).
|
||||
Creating various kmem_caches can reduce space efficiency gap but still
|
||||
problem of being limited to low memory exists. Also, it depends on
|
||||
allocating higher order pages to reduce fragmentation - this is not
|
||||
acceptable for ramzswap as it is used under memory crunch (its a swap
|
||||
device!).
|
||||
|
||||
SLOB allocator could not be used do to reasons mentioned here:
|
||||
http://lkml.org/lkml/2009/3/18/210
|
||||
|
||||
* Implementation:
|
||||
It uses two-level bitmap search to find free list containing block of
|
||||
correct size. This idea is taken from TLSF (Two-Level Segregate Fit)
|
||||
allocator and is well explained in its paper (see [Links] below).
|
||||
|
||||
* Limitations:
|
||||
- Poor scalability: No per-cpu data structures (work in progress).
|
||||
|
||||
[Links]
|
||||
1. Details and Performance data:
|
||||
http://code.google.com/p/compcache/wiki/xvMalloc
|
||||
http://code.google.com/p/compcache/wiki/xvMallocPerformance
|
||||
|
||||
2. TLSF memory allocator:
|
||||
home: http://rtportal.upv.es/rtmalloc/
|
||||
paper: http://rtportal.upv.es/rtmalloc/files/MRBC_2008.pdf
|
||||
|
||||
Signed-off-by: Nitin Gupta <ngupta@vflare.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
|
||||
---
|
||||
drivers/staging/ramzswap/xvmalloc.c | 507 +++++++++++++++++++++++++++++++
|
||||
drivers/staging/ramzswap/xvmalloc.h | 30 ++
|
||||
drivers/staging/ramzswap/xvmalloc_int.h | 86 ++++++
|
||||
3 files changed, 623 insertions(+), 0 deletions(-)
|
||||
create mode 100644 drivers/staging/ramzswap/xvmalloc.c
|
||||
create mode 100644 drivers/staging/ramzswap/xvmalloc.h
|
||||
create mode 100644 drivers/staging/ramzswap/xvmalloc_int.h
|
||||
|
||||
diff --git a/drivers/staging/ramzswap/xvmalloc.c b/drivers/staging/ramzswap/xvmalloc.c
|
||||
new file mode 100644
|
||||
index 0000000..b3e986c
|
||||
--- /dev/null
|
||||
+++ b/drivers/staging/ramzswap/xvmalloc.c
|
||||
@@ -0,0 +1,507 @@
|
||||
+/*
|
||||
+ * xvmalloc memory allocator
|
||||
+ *
|
||||
+ * Copyright (C) 2008, 2009 Nitin Gupta
|
||||
+ *
|
||||
+ * This code is released using a dual license strategy: BSD/GPL
|
||||
+ * You can choose the licence that better fits your requirements.
|
||||
+ *
|
||||
+ * Released under the terms of 3-clause BSD License
|
||||
+ * Released under the terms of GNU General Public License Version 2.0
|
||||
+ */
|
||||
+
|
||||
+#include <linux/bitops.h>
|
||||
+#include <linux/errno.h>
|
||||
+#include <linux/highmem.h>
|
||||
+#include <linux/init.h>
|
||||
+#include <linux/string.h>
|
||||
+#include <linux/slab.h>
|
||||
+
|
||||
+#include "xvmalloc.h"
|
||||
+#include "xvmalloc_int.h"
|
||||
+
|
||||
+static void stat_inc(u64 *value)
|
||||
+{
|
||||
+ *value = *value + 1;
|
||||
+}
|
||||
+
|
||||
+static void stat_dec(u64 *value)
|
||||
+{
|
||||
+ *value = *value - 1;
|
||||
+}
|
||||
+
|
||||
+static int test_flag(struct block_header *block, enum blockflags flag)
|
||||
+{
|
||||
+ return block->prev & BIT(flag);
|
||||
+}
|
||||
+
|
||||
+static void set_flag(struct block_header *block, enum blockflags flag)
|
||||
+{
|
||||
+ block->prev |= BIT(flag);
|
||||
+}
|
||||
+
|
||||
+static void clear_flag(struct block_header *block, enum blockflags flag)
|
||||
+{
|
||||
+ block->prev &= ~BIT(flag);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Given <page, offset> pair, provide a derefrencable pointer.
|
||||
+ * This is called from xv_malloc/xv_free path, so it
|
||||
+ * needs to be fast.
|
||||
+ */
|
||||
+static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
|
||||
+{
|
||||
+ unsigned char *base;
|
||||
+
|
||||
+ base = kmap_atomic(page, type);
|
||||
+ return base + offset;
|
||||
+}
|
||||
+
|
||||
+static void put_ptr_atomic(void *ptr, enum km_type type)
|
||||
+{
|
||||
+ kunmap_atomic(ptr, type);
|
||||
+}
|
||||
+
|
||||
+static u32 get_blockprev(struct block_header *block)
|
||||
+{
|
||||
+ return block->prev & PREV_MASK;
|
||||
+}
|
||||
+
|
||||
+static void set_blockprev(struct block_header *block, u16 new_offset)
|
||||
+{
|
||||
+ block->prev = new_offset | (block->prev & FLAGS_MASK);
|
||||
+}
|
||||
+
|
||||
+static struct block_header *BLOCK_NEXT(struct block_header *block)
|
||||
+{
|
||||
+ return (struct block_header *)
|
||||
+ ((char *)block + block->size + XV_ALIGN);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Get index of free list containing blocks of maximum size
|
||||
+ * which is less than or equal to given size.
|
||||
+ */
|
||||
+static u32 get_index_for_insert(u32 size)
|
||||
+{
|
||||
+ if (unlikely(size > XV_MAX_ALLOC_SIZE))
|
||||
+ size = XV_MAX_ALLOC_SIZE;
|
||||
+ size &= ~FL_DELTA_MASK;
|
||||
+ return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Get index of free list having blocks of size greater than
|
||||
+ * or equal to requested size.
|
||||
+ */
|
||||
+static u32 get_index(u32 size)
|
||||
+{
|
||||
+ if (unlikely(size < XV_MIN_ALLOC_SIZE))
|
||||
+ size = XV_MIN_ALLOC_SIZE;
|
||||
+ size = ALIGN(size, FL_DELTA);
|
||||
+ return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * find_block - find block of at least given size
|
||||
+ * @pool: memory pool to search from
|
||||
+ * @size: size of block required
|
||||
+ * @page: page containing required block
|
||||
+ * @offset: offset within the page where block is located.
|
||||
+ *
|
||||
+ * Searches two level bitmap to locate block of at least
|
||||
+ * the given size. If such a block is found, it provides
|
||||
+ * <page, offset> to identify this block and returns index
|
||||
+ * in freelist where we found this block.
|
||||
+ * Otherwise, returns 0 and <page, offset> params are not touched.
|
||||
+ */
|
||||
+static u32 find_block(struct xv_pool *pool, u32 size,
|
||||
+ struct page **page, u32 *offset)
|
||||
+{
|
||||
+ ulong flbitmap, slbitmap;
|
||||
+ u32 flindex, slindex, slbitstart;
|
||||
+
|
||||
+ /* There are no free blocks in this pool */
|
||||
+ if (!pool->flbitmap)
|
||||
+ return 0;
|
||||
+
|
||||
+ /* Get freelist index correspoding to this size */
|
||||
+ slindex = get_index(size);
|
||||
+ slbitmap = pool->slbitmap[slindex / BITS_PER_LONG];
|
||||
+ slbitstart = slindex % BITS_PER_LONG;
|
||||
+
|
||||
+ /*
|
||||
+ * If freelist is not empty at this index, we found the
|
||||
+ * block - head of this list. This is approximate best-fit match.
|
||||
+ */
|
||||
+ if (test_bit(slbitstart, &slbitmap)) {
|
||||
+ *page = pool->freelist[slindex].page;
|
||||
+ *offset = pool->freelist[slindex].offset;
|
||||
+ return slindex;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * No best-fit found. Search a bit further in bitmap for a free block.
|
||||
+ * Second level bitmap consists of series of 32-bit chunks. Search
|
||||
+ * further in the chunk where we expected a best-fit, starting from
|
||||
+ * index location found above.
|
||||
+ */
|
||||
+ slbitstart++;
|
||||
+ slbitmap >>= slbitstart;
|
||||
+
|
||||
+ /* Skip this search if we were already at end of this bitmap chunk */
|
||||
+ if ((slbitstart != BITS_PER_LONG) && slbitmap) {
|
||||
+ slindex += __ffs(slbitmap) + 1;
|
||||
+ *page = pool->freelist[slindex].page;
|
||||
+ *offset = pool->freelist[slindex].offset;
|
||||
+ return slindex;
|
||||
+ }
|
||||
+
|
||||
+ /* Now do a full two-level bitmap search to find next nearest fit */
|
||||
+ flindex = slindex / BITS_PER_LONG;
|
||||
+
|
||||
+ flbitmap = (pool->flbitmap) >> (flindex + 1);
|
||||
+ if (!flbitmap)
|
||||
+ return 0;
|
||||
+
|
||||
+ flindex += __ffs(flbitmap) + 1;
|
||||
+ slbitmap = pool->slbitmap[flindex];
|
||||
+ slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap);
|
||||
+ *page = pool->freelist[slindex].page;
|
||||
+ *offset = pool->freelist[slindex].offset;
|
||||
+
|
||||
+ return slindex;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Insert block at <page, offset> in freelist of given pool.
|
||||
+ * freelist used depends on block size.
|
||||
+ */
|
||||
+static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
|
||||
+ struct block_header *block)
|
||||
+{
|
||||
+ u32 flindex, slindex;
|
||||
+ struct block_header *nextblock;
|
||||
+
|
||||
+ slindex = get_index_for_insert(block->size);
|
||||
+ flindex = slindex / BITS_PER_LONG;
|
||||
+
|
||||
+ block->link.prev_page = 0;
|
||||
+ block->link.prev_offset = 0;
|
||||
+ block->link.next_page = pool->freelist[slindex].page;
|
||||
+ block->link.next_offset = pool->freelist[slindex].offset;
|
||||
+ pool->freelist[slindex].page = page;
|
||||
+ pool->freelist[slindex].offset = offset;
|
||||
+
|
||||
+ if (block->link.next_page) {
|
||||
+ nextblock = get_ptr_atomic(block->link.next_page,
|
||||
+ block->link.next_offset, KM_USER1);
|
||||
+ nextblock->link.prev_page = page;
|
||||
+ nextblock->link.prev_offset = offset;
|
||||
+ put_ptr_atomic(nextblock, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
|
||||
+ __set_bit(flindex, &pool->flbitmap);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Remove block from head of freelist. Index 'slindex' identifies the freelist.
|
||||
+ */
|
||||
+static void remove_block_head(struct xv_pool *pool,
|
||||
+ struct block_header *block, u32 slindex)
|
||||
+{
|
||||
+ struct block_header *tmpblock;
|
||||
+ u32 flindex = slindex / BITS_PER_LONG;
|
||||
+
|
||||
+ pool->freelist[slindex].page = block->link.next_page;
|
||||
+ pool->freelist[slindex].offset = block->link.next_offset;
|
||||
+ block->link.prev_page = 0;
|
||||
+ block->link.prev_offset = 0;
|
||||
+
|
||||
+ if (!pool->freelist[slindex].page) {
|
||||
+ __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
|
||||
+ if (!pool->slbitmap[flindex])
|
||||
+ __clear_bit(flindex, &pool->flbitmap);
|
||||
+ } else {
|
||||
+ /*
|
||||
+ * DEBUG ONLY: We need not reinitialize freelist head previous
|
||||
+ * pointer to 0 - we never depend on its value. But just for
|
||||
+ * sanity, lets do it.
|
||||
+ */
|
||||
+ tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
|
||||
+ pool->freelist[slindex].offset, KM_USER1);
|
||||
+ tmpblock->link.prev_page = 0;
|
||||
+ tmpblock->link.prev_offset = 0;
|
||||
+ put_ptr_atomic(tmpblock, KM_USER1);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Remove block from freelist. Index 'slindex' identifies the freelist.
|
||||
+ */
|
||||
+static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
|
||||
+ struct block_header *block, u32 slindex)
|
||||
+{
|
||||
+ u32 flindex;
|
||||
+ struct block_header *tmpblock;
|
||||
+
|
||||
+ if (pool->freelist[slindex].page == page
|
||||
+ && pool->freelist[slindex].offset == offset) {
|
||||
+ remove_block_head(pool, block, slindex);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ flindex = slindex / BITS_PER_LONG;
|
||||
+
|
||||
+ if (block->link.prev_page) {
|
||||
+ tmpblock = get_ptr_atomic(block->link.prev_page,
|
||||
+ block->link.prev_offset, KM_USER1);
|
||||
+ tmpblock->link.next_page = block->link.next_page;
|
||||
+ tmpblock->link.next_offset = block->link.next_offset;
|
||||
+ put_ptr_atomic(tmpblock, KM_USER1);
|
||||
+ }
|
||||
+
|
||||
+ if (block->link.next_page) {
|
||||
+ tmpblock = get_ptr_atomic(block->link.next_page,
|
||||
+ block->link.next_offset, KM_USER1);
|
||||
+ tmpblock->link.prev_page = block->link.prev_page;
|
||||
+ tmpblock->link.prev_offset = block->link.prev_offset;
|
||||
+ put_ptr_atomic(tmpblock, KM_USER1);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Allocate a page and add it freelist of given pool.
|
||||
+ */
|
||||
+static int grow_pool(struct xv_pool *pool, gfp_t flags)
|
||||
+{
|
||||
+ struct page *page;
|
||||
+ struct block_header *block;
|
||||
+
|
||||
+ page = alloc_page(flags);
|
||||
+ if (unlikely(!page))
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ stat_inc(&pool->total_pages);
|
||||
+
|
||||
+ spin_lock(&pool->lock);
|
||||
+ block = get_ptr_atomic(page, 0, KM_USER0);
|
||||
+
|
||||
+ block->size = PAGE_SIZE - XV_ALIGN;
|
||||
+ set_flag(block, BLOCK_FREE);
|
||||
+ clear_flag(block, PREV_FREE);
|
||||
+ set_blockprev(block, 0);
|
||||
+
|
||||
+ insert_block(pool, page, 0, block);
|
||||
+
|
||||
+ put_ptr_atomic(block, KM_USER0);
|
||||
+ spin_unlock(&pool->lock);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Create a memory pool. Allocates freelist, bitmaps and other
|
||||
+ * per-pool metadata.
|
||||
+ */
|
||||
+struct xv_pool *xv_create_pool(void)
|
||||
+{
|
||||
+ u32 ovhd_size;
|
||||
+ struct xv_pool *pool;
|
||||
+
|
||||
+ ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
|
||||
+ pool = kzalloc(ovhd_size, GFP_KERNEL);
|
||||
+ if (!pool)
|
||||
+ return NULL;
|
||||
+
|
||||
+ spin_lock_init(&pool->lock);
|
||||
+
|
||||
+ return pool;
|
||||
+}
|
||||
+
|
||||
+void xv_destroy_pool(struct xv_pool *pool)
|
||||
+{
|
||||
+ kfree(pool);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * xv_malloc - Allocate block of given size from pool.
|
||||
+ * @pool: pool to allocate from
|
||||
+ * @size: size of block to allocate
|
||||
+ * @page: page no. that holds the object
|
||||
+ * @offset: location of object within page
|
||||
+ *
|
||||
+ * On success, <page, offset> identifies block allocated
|
||||
+ * and 0 is returned. On failure, <page, offset> is set to
|
||||
+ * 0 and -ENOMEM is returned.
|
||||
+ *
|
||||
+ * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
|
||||
+ */
|
||||
+int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
|
||||
+ u32 *offset, gfp_t flags)
|
||||
+{
|
||||
+ int error;
|
||||
+ u32 index, tmpsize, origsize, tmpoffset;
|
||||
+ struct block_header *block, *tmpblock;
|
||||
+
|
||||
+ *page = NULL;
|
||||
+ *offset = 0;
|
||||
+ origsize = size;
|
||||
+
|
||||
+ if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ size = ALIGN(size, XV_ALIGN);
|
||||
+
|
||||
+ spin_lock(&pool->lock);
|
||||
+
|
||||
+ index = find_block(pool, size, page, offset);
|
||||
+
|
||||
+ if (!*page) {
|
||||
+ spin_unlock(&pool->lock);
|
||||
+ if (flags & GFP_NOWAIT)
|
||||
+ return -ENOMEM;
|
||||
+ error = grow_pool(pool, flags);
|
||||
+ if (unlikely(error))
|
||||
+ return error;
|
||||
+
|
||||
+ spin_lock(&pool->lock);
|
||||
+ index = find_block(pool, size, page, offset);
|
||||
+ }
|
||||
+
|
||||
+ if (!*page) {
|
||||
+ spin_unlock(&pool->lock);
|
||||
+ return -ENOMEM;
|
||||
+ }
|
||||
+
|
||||
+ block = get_ptr_atomic(*page, *offset, KM_USER0);
|
||||
+
|
||||
+ remove_block_head(pool, block, index);
|
||||
+
|
||||
+ /* Split the block if required */
|
||||
+ tmpoffset = *offset + size + XV_ALIGN;
|
||||
+ tmpsize = block->size - size;
|
||||
+ tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
|
||||
+ if (tmpsize) {
|
||||
+ tmpblock->size = tmpsize - XV_ALIGN;
|
||||
+ set_flag(tmpblock, BLOCK_FREE);
|
||||
+ clear_flag(tmpblock, PREV_FREE);
|
||||
+
|
||||
+ set_blockprev(tmpblock, *offset);
|
||||
+ if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
|
||||
+ insert_block(pool, *page, tmpoffset, tmpblock);
|
||||
+
|
||||
+ if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
|
||||
+ tmpblock = BLOCK_NEXT(tmpblock);
|
||||
+ set_blockprev(tmpblock, tmpoffset);
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* This block is exact fit */
|
||||
+ if (tmpoffset != PAGE_SIZE)
|
||||
+ clear_flag(tmpblock, PREV_FREE);
|
||||
+ }
|
||||
+
|
||||
+ block->size = origsize;
|
||||
+ clear_flag(block, BLOCK_FREE);
|
||||
+
|
||||
+ put_ptr_atomic(block, KM_USER0);
|
||||
+ spin_unlock(&pool->lock);
|
||||
+
|
||||
+ *offset += XV_ALIGN;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Free block identified with <page, offset>
|
||||
+ */
|
||||
+void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
|
||||
+{
|
||||
+ void *page_start;
|
||||
+ struct block_header *block, *tmpblock;
|
||||
+
|
||||
+ offset -= XV_ALIGN;
|
||||
+
|
||||
+ spin_lock(&pool->lock);
|
||||
+
|
||||
+ page_start = get_ptr_atomic(page, 0, KM_USER0);
|
||||
+ block = (struct block_header *)((char *)page_start + offset);
|
||||
+
|
||||
+ /* Catch double free bugs */
|
||||
+ BUG_ON(test_flag(block, BLOCK_FREE));
|
||||
+
|
||||
+ block->size = ALIGN(block->size, XV_ALIGN);
|
||||
+
|
||||
+ tmpblock = BLOCK_NEXT(block);
|
||||
+ if (offset + block->size + XV_ALIGN == PAGE_SIZE)
|
||||
+ tmpblock = NULL;
|
||||
+
|
||||
+ /* Merge next block if its free */
|
||||
+ if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
|
||||
+ /*
|
||||
+ * Blocks smaller than XV_MIN_ALLOC_SIZE
|
||||
+ * are not inserted in any free list.
|
||||
+ */
|
||||
+ if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
|
||||
+ remove_block(pool, page,
|
||||
+ offset + block->size + XV_ALIGN, tmpblock,
|
||||
+ get_index_for_insert(tmpblock->size));
|
||||
+ }
|
||||
+ block->size += tmpblock->size + XV_ALIGN;
|
||||
+ }
|
||||
+
|
||||
+ /* Merge previous block if its free */
|
||||
+ if (test_flag(block, PREV_FREE)) {
|
||||
+ tmpblock = (struct block_header *)((char *)(page_start) +
|
||||
+ get_blockprev(block));
|
||||
+ offset = offset - tmpblock->size - XV_ALIGN;
|
||||
+
|
||||
+ if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
|
||||
+ remove_block(pool, page, offset, tmpblock,
|
||||
+ get_index_for_insert(tmpblock->size));
|
||||
+
|
||||
+ tmpblock->size += block->size + XV_ALIGN;
|
||||
+ block = tmpblock;
|
||||
+ }
|
||||
+
|
||||
+ /* No used objects in this page. Free it. */
|
||||
+ if (block->size == PAGE_SIZE - XV_ALIGN) {
|
||||
+ put_ptr_atomic(page_start, KM_USER0);
|
||||
+ spin_unlock(&pool->lock);
|
||||
+
|
||||
+ __free_page(page);
|
||||
+ stat_dec(&pool->total_pages);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ set_flag(block, BLOCK_FREE);
|
||||
+ if (block->size >= XV_MIN_ALLOC_SIZE)
|
||||
+ insert_block(pool, page, offset, block);
|
||||
+
|
||||
+ if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
|
||||
+ tmpblock = BLOCK_NEXT(block);
|
||||
+ set_flag(tmpblock, PREV_FREE);
|
||||
+ set_blockprev(tmpblock, offset);
|
||||
+ }
|
||||
+
|
||||
+ put_ptr_atomic(page_start, KM_USER0);
|
||||
+ spin_unlock(&pool->lock);
|
||||
+}
|
||||
+
|
||||
+u32 xv_get_object_size(void *obj)
|
||||
+{
|
||||
+ struct block_header *blk;
|
||||
+
|
||||
+ blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
|
||||
+ return blk->size;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Returns total memory used by allocator (userdata + metadata)
|
||||
+ */
|
||||
+u64 xv_get_total_size_bytes(struct xv_pool *pool)
|
||||
+{
|
||||
+ return pool->total_pages << PAGE_SHIFT;
|
||||
+}
|
||||
diff --git a/drivers/staging/ramzswap/xvmalloc.h b/drivers/staging/ramzswap/xvmalloc.h
|
||||
new file mode 100644
|
||||
index 0000000..010c6fe
|
||||
--- /dev/null
|
||||
+++ b/drivers/staging/ramzswap/xvmalloc.h
|
||||
@@ -0,0 +1,30 @@
|
||||
+/*
|
||||
+ * xvmalloc memory allocator
|
||||
+ *
|
||||
+ * Copyright (C) 2008, 2009 Nitin Gupta
|
||||
+ *
|
||||
+ * This code is released using a dual license strategy: BSD/GPL
|
||||
+ * You can choose the licence that better fits your requirements.
|
||||
+ *
|
||||
+ * Released under the terms of 3-clause BSD License
|
||||
+ * Released under the terms of GNU General Public License Version 2.0
|
||||
+ */
|
||||
+
|
||||
+#ifndef _XV_MALLOC_H_
|
||||
+#define _XV_MALLOC_H_
|
||||
+
|
||||
+#include <linux/types.h>
|
||||
+
|
||||
+struct xv_pool;
|
||||
+
|
||||
+struct xv_pool *xv_create_pool(void);
|
||||
+void xv_destroy_pool(struct xv_pool *pool);
|
||||
+
|
||||
+int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
|
||||
+ u32 *offset, gfp_t flags);
|
||||
+void xv_free(struct xv_pool *pool, struct page *page, u32 offset);
|
||||
+
|
||||
+u32 xv_get_object_size(void *obj);
|
||||
+u64 xv_get_total_size_bytes(struct xv_pool *pool);
|
||||
+
|
||||
+#endif
|
||||
diff --git a/drivers/staging/ramzswap/xvmalloc_int.h b/drivers/staging/ramzswap/xvmalloc_int.h
|
||||
new file mode 100644
|
||||
index 0000000..03c1a65
|
||||
--- /dev/null
|
||||
+++ b/drivers/staging/ramzswap/xvmalloc_int.h
|
||||
@@ -0,0 +1,86 @@
|
||||
+/*
|
||||
+ * xvmalloc memory allocator
|
||||
+ *
|
||||
+ * Copyright (C) 2008, 2009 Nitin Gupta
|
||||
+ *
|
||||
+ * This code is released using a dual license strategy: BSD/GPL
|
||||
+ * You can choose the licence that better fits your requirements.
|
||||
+ *
|
||||
+ * Released under the terms of 3-clause BSD License
|
||||
+ * Released under the terms of GNU General Public License Version 2.0
|
||||
+ */
|
||||
+
|
||||
+#ifndef _XV_MALLOC_INT_H_
|
||||
+#define _XV_MALLOC_INT_H_
|
||||
+
|
||||
+#include <linux/kernel.h>
|
||||
+#include <linux/types.h>
|
||||
+
|
||||
+/* User configurable params */
|
||||
+
|
||||
+/* Must be power of two */
|
||||
+#define XV_ALIGN_SHIFT 2
|
||||
+#define XV_ALIGN (1 << XV_ALIGN_SHIFT)
|
||||
+#define XV_ALIGN_MASK (XV_ALIGN - 1)
|
||||
+
|
||||
+/* This must be greater than sizeof(link_free) */
|
||||
+#define XV_MIN_ALLOC_SIZE 32
|
||||
+#define XV_MAX_ALLOC_SIZE (PAGE_SIZE - XV_ALIGN)
|
||||
+
|
||||
+/* Free lists are separated by FL_DELTA bytes */
|
||||
+#define FL_DELTA_SHIFT 3
|
||||
+#define FL_DELTA (1 << FL_DELTA_SHIFT)
|
||||
+#define FL_DELTA_MASK (FL_DELTA - 1)
|
||||
+#define NUM_FREE_LISTS ((XV_MAX_ALLOC_SIZE - XV_MIN_ALLOC_SIZE) \
|
||||
+ / FL_DELTA + 1)
|
||||
+
|
||||
+#define MAX_FLI DIV_ROUND_UP(NUM_FREE_LISTS, BITS_PER_LONG)
|
||||
+
|
||||
+/* End of user params */
|
||||
+
|
||||
+enum blockflags {
|
||||
+ BLOCK_FREE,
|
||||
+ PREV_FREE,
|
||||
+ __NR_BLOCKFLAGS,
|
||||
+};
|
||||
+
|
||||
+#define FLAGS_MASK XV_ALIGN_MASK
|
||||
+#define PREV_MASK (~FLAGS_MASK)
|
||||
+
|
||||
+struct freelist_entry {
|
||||
+ struct page *page;
|
||||
+ u16 offset;
|
||||
+ u16 pad;
|
||||
+};
|
||||
+
|
||||
+struct link_free {
|
||||
+ struct page *prev_page;
|
||||
+ struct page *next_page;
|
||||
+ u16 prev_offset;
|
||||
+ u16 next_offset;
|
||||
+};
|
||||
+
|
||||
+struct block_header {
|
||||
+ union {
|
||||
+ /* This common header must be ALIGN bytes */
|
||||
+ u8 common[XV_ALIGN];
|
||||
+ struct {
|
||||
+ u16 size;
|
||||
+ u16 prev;
|
||||
+ };
|
||||
+ };
|
||||
+ struct link_free link;
|
||||
+};
|
||||
+
|
||||
+struct xv_pool {
|
||||
+ ulong flbitmap;
|
||||
+ ulong slbitmap[MAX_FLI];
|
||||
+ spinlock_t lock;
|
||||
+
|
||||
+ struct freelist_entry freelist[NUM_FREE_LISTS];
|
||||
+
|
||||
+ /* stats */
|
||||
+ u64 total_pages;
|
||||
+};
|
||||
+
|
||||
+#endif
|
||||
--
|
||||
1.7.0.3
|
||||
|
|
@ -19,3 +19,8 @@
|
|||
- bugfix/all/hrtimer-tune-hrtimer_interrupt-hang-logic.patch
|
||||
+ features/all/phylib-Support-phy-module-autoloading.patch
|
||||
+ features/all/phylib-Add-module-table-to-all-existing-phy-drivers.patch
|
||||
+ features/all/ramzswap/xvmalloc-memory-allocator.patch
|
||||
+ features/all/ramzswap/ramzswap-add.patch
|
||||
+ features/all/ramzswap/ramzswap-documentation.patch
|
||||
+ features/all/ramzswap/ramzswap-add-TODO-file.patch
|
||||
+ features/all/ramzswap/ramzswap-remove-ARM-specific-d-cache-hack.patch
|
||||
|
|
Loading…
Reference in New Issue