aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram/zram_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/zram/zram_drv.c')
-rw-r--r--drivers/staging/zram/zram_drv.c607
1 files changed, 359 insertions, 248 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5258c78..e1a4994 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -32,10 +32,6 @@
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
-#ifdef CONFIG_ZRAM_FOR_ANDROID
-#include <linux/swap.h>
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
-
#include "zram_drv.h"
@@ -44,7 +40,7 @@ static int zram_major;
struct zram *zram_devices;
/* Module params (documentation at end) */
-unsigned int num_devices;
+unsigned int zram_num_devices;
static void zram_stat_inc(u32 *v)
{
@@ -137,22 +133,6 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
zram->disksize &= PAGE_MASK;
}
-#ifdef CONFIG_ZRAM_FOR_ANDROID
-/*
- * Swap header (1st page of swap device) contains information
- * about a swap file/partition. Prepare such a header for the
- * given ramzswap device so that swapon can identify it as a
- * swap partition.
- */
-static void setup_swap_header(struct zram *zram, union swap_header *s)
-{
- s->info.version = 1;
- s->info.last_page = (zram->disksize >> PAGE_SHIFT) - 1;
- s->info.nr_badpages = 0;
- memcpy(s->magic.magic, "SWAPSPACE2", 10);
-}
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
-
static void zram_free_page(struct zram *zram, size_t index)
{
u32 clen;
@@ -197,224 +177,355 @@ out:
zram->table[index].offset = 0;
}
-static void handle_zero_page(struct page *page)
+static void handle_zero_page(struct bio_vec *bvec)
{
+ struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page, KM_USER0);
- memset(user_mem, 0, PAGE_SIZE);
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
kunmap_atomic(user_mem, KM_USER0);
flush_dcache_page(page);
}
-static void handle_uncompressed_page(struct zram *zram,
- struct page *page, u32 index)
+static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
{
+ struct page *page = bvec->bv_page;
unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1);
- memcpy(user_mem, cmem, PAGE_SIZE);
- kunmap_atomic(user_mem, KM_USER0);
+ memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(user_mem, KM_USER0);
flush_dcache_page(page);
}
-static void zram_read(struct zram *zram, struct bio *bio)
+static inline int is_partial_io(struct bio_vec *bvec)
{
+ return bvec->bv_len != PAGE_SIZE;
+}
- int i;
- u32 index;
- struct bio_vec *bvec;
-
- zram_stat64_inc(zram, &zram->stats.num_reads);
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
+ int ret;
+ size_t clen;
+ struct page *page;
+ struct zobj_header *zheader;
+ unsigned char *user_mem, *cmem, *uncmem = NULL;
- bio_for_each_segment(bvec, bio, i) {
- int ret;
- size_t clen;
- struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem;
+ page = bvec->bv_page;
- page = bvec->bv_page;
+ if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ handle_zero_page(bvec);
+ return 0;
+ }
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- handle_zero_page(page);
- index++;
- continue;
- }
+ /* Requested page is not present in compressed area */
+ if (unlikely(!zram->table[index].page)) {
+ pr_debug("Read before write: sector=%lu, size=%u",
+ (ulong)(bio->bi_sector), bio->bi_size);
+ handle_zero_page(bvec);
+ return 0;
+ }
- /* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
- pr_debug("Read before write: sector=%lu, size=%u",
- (ulong)(bio->bi_sector), bio->bi_size);
- handle_zero_page(page);
- index++;
- continue;
- }
+ /* Page is stored uncompressed since it's incompressible */
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ handle_uncompressed_page(zram, bvec, index, offset);
+ return 0;
+ }
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, page, index);
- index++;
- continue;
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ return -ENOMEM;
}
+ }
- user_mem = kmap_atomic(page, KM_USER0);
- clen = PAGE_SIZE;
+ user_mem = kmap_atomic(page, KM_USER0);
+ if (!is_partial_io(bvec))
+ uncmem = user_mem;
+ clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
- ret = lzo1x_decompress_safe(
- cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
- user_mem, &clen);
+ ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
+ xv_get_object_size(cmem) - sizeof(*zheader),
+ uncmem, &clen);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
+ if (is_partial_io(bvec)) {
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
+ kfree(uncmem);
+ }
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n",
- ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- goto out;
- }
+ kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(user_mem, KM_USER0);
- flush_dcache_page(page);
- index++;
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ zram_stat64_inc(zram, &zram->stats.failed_reads);
+ return ret;
}
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return;
+ flush_dcache_page(page);
-out:
- bio_io_error(bio);
+ return 0;
}
-static void zram_write(struct zram *zram, struct bio *bio)
+static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
{
- int i;
- u32 index;
- struct bio_vec *bvec;
+ int ret;
+ size_t clen = PAGE_SIZE;
+ struct zobj_header *zheader;
+ unsigned char *cmem;
- zram_stat64_inc(zram, &zram->stats.num_writes);
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ if (zram_test_flag(zram, index, ZRAM_ZERO) ||
+ !zram->table[index].page) {
+ memset(mem, 0, PAGE_SIZE);
+ return 0;
+ }
- bio_for_each_segment(bvec, bio, i) {
- int ret;
- u32 offset;
- size_t clen;
- struct zobj_header *zheader;
- struct page *page, *page_store;
- unsigned char *user_mem, *cmem, *src;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
+ zram->table[index].offset;
+
+ /* Page is stored uncompressed since it's incompressible */
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ memcpy(mem, cmem, PAGE_SIZE);
+ kunmap_atomic(cmem, KM_USER0);
+ return 0;
+ }
+
+ ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
+ xv_get_object_size(cmem) - sizeof(*zheader),
+ mem, &clen);
+ kunmap_atomic(cmem, KM_USER0);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ zram_stat64_inc(zram, &zram->stats.failed_reads);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset)
+{
+ int ret;
+ u32 store_offset;
+ size_t clen;
+ struct zobj_header *zheader;
+ struct page *page, *page_store;
+ unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
- page = bvec->bv_page;
- src = zram->compress_buffer;
+ page = bvec->bv_page;
+ src = zram->compress_buffer;
+ if (is_partial_io(bvec)) {
/*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
*/
- if (zram->table[index].page ||
- zram_test_flag(zram, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
- mutex_lock(&zram->lock);
-
- user_mem = kmap_atomic(page, KM_USER0);
- if (page_zero_filled(user_mem)) {
- kunmap_atomic(user_mem, KM_USER0);
- mutex_unlock(&zram->lock);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
- index++;
- continue;
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = zram_read_before_write(zram, uncmem, index);
+ if (ret) {
+ kfree(uncmem);
+ goto out;
}
+ }
+
+ /*
+ * System overwrites unused sectors. Free memory associated
+ * with this sector now.
+ */
+ if (zram->table[index].page ||
+ zram_test_flag(zram, index, ZRAM_ZERO))
+ zram_free_page(zram, index);
+
+ user_mem = kmap_atomic(page, KM_USER0);
- ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ if (is_partial_io(bvec))
+ memcpy(uncmem + offset, user_mem + bvec->bv_offset,
+ bvec->bv_len);
+ else
+ uncmem = user_mem;
+ if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem, KM_USER0);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ zram_stat_inc(&zram->stats.pages_zero);
+ zram_set_flag(zram, index, ZRAM_ZERO);
+ ret = 0;
+ goto out;
+ }
- if (unlikely(ret != LZO_E_OK)) {
- mutex_unlock(&zram->lock);
- pr_err("Compression failed! err=%d\n", ret);
- zram_stat64_inc(zram, &zram->stats.failed_writes);
- goto out;
- }
+ ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
+ zram->compress_workmem);
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many disk write
- * errors which has side effect of hanging the system.
- */
- if (unlikely(clen > max_zpage_size)) {
- clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- mutex_unlock(&zram->lock);
- pr_info("Error allocating memory for "
- "incompressible page: %u\n", index);
- zram_stat64_inc(zram,
- &zram->stats.failed_writes);
- goto out;
- }
-
- offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_inc(&zram->stats.pages_expand);
- zram->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
- goto memstore;
- }
+ kunmap_atomic(user_mem, KM_USER0);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Compression failed! err=%d\n", ret);
+ goto out;
+ }
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
- mutex_unlock(&zram->lock);
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
- zram_stat64_inc(zram, &zram->stats.failed_writes);
+ /*
+ * Page is incompressible. Store it as-is (uncompressed)
+ * since we do not want to return too many disk write
+ * errors which has side effect of hanging the system.
+ */
+ if (unlikely(clen > max_zpage_size)) {
+ clen = PAGE_SIZE;
+ page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+ if (unlikely(!page_store)) {
+ pr_info("Error allocating memory for "
+ "incompressible page: %u\n", index);
+ ret = -ENOMEM;
goto out;
}
+ store_offset = 0;
+ zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
+ zram_stat_inc(&zram->stats.pages_expand);
+ zram->table[index].page = page_store;
+ src = kmap_atomic(page, KM_USER0);
+ goto memstore;
+ }
+
+ if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
+ &zram->table[index].page, &store_offset,
+ GFP_NOIO | __GFP_HIGHMEM)) {
+ pr_info("Error allocating memory for compressed "
+ "page: %u, size=%zu\n", index, clen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
memstore:
- zram->table[index].offset = offset;
+ zram->table[index].offset = store_offset;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
#if 0
- /* Back-reference needed for memory defragmentation */
- if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
+ /* Back-reference needed for memory defragmentation */
+ if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
+ zheader = (struct zobj_header *)cmem;
+ zheader->table_idx = index;
+ cmem += sizeof(*zheader);
+ }
#endif
- memcpy(cmem, src, clen);
+ memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(cmem, KM_USER1);
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ kunmap_atomic(src, KM_USER0);
+
+ /* Update stats */
+ zram_stat64_add(zram, &zram->stats.compr_size, clen);
+ zram_stat_inc(&zram->stats.pages_stored);
+ if (clen <= PAGE_SIZE / 2)
+ zram_stat_inc(&zram->stats.good_compress);
+
+ return 0;
+
+out:
+ if (ret)
+ zram_stat64_inc(zram, &zram->stats.failed_writes);
+ return ret;
+}
+
+static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset, struct bio *bio, int rw)
+{
+ int ret;
+
+ if (rw == READ) {
+ down_read(&zram->lock);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ up_read(&zram->lock);
+ } else {
+ down_write(&zram->lock);
+ ret = zram_bvec_write(zram, bvec, index, offset);
+ up_write(&zram->lock);
+ }
+
+ return ret;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+}
+
+static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
+{
+ int i, offset;
+ u32 index;
+ struct bio_vec *bvec;
+
+ switch (rw) {
+ case READ:
+ zram_stat64_inc(zram, &zram->stats.num_reads);
+ break;
+ case WRITE:
+ zram_stat64_inc(zram, &zram->stats.num_writes);
+ break;
+ }
+
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- /* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
- if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
+ bio_for_each_segment(bvec, bio, i) {
+ int max_transfer_size = PAGE_SIZE - offset;
+
+ if (bvec->bv_len > max_transfer_size) {
+ /*
+ * zram_bvec_rw() can only make operation on a single
+ * zram page. Split the bio vector.
+ */
+ struct bio_vec bv;
+
+ bv.bv_page = bvec->bv_page;
+ bv.bv_len = max_transfer_size;
+ bv.bv_offset = bvec->bv_offset;
+
+ if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
+ goto out;
+
+ bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_offset += max_transfer_size;
+ if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
+ goto out;
+ } else
+ if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+ < 0)
+ goto out;
- mutex_unlock(&zram->lock);
- index++;
+ update_position(&index, &offset, bvec);
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -426,17 +537,24 @@ out:
}
/*
- * Check if request is within bounds and page aligned.
+ * Check if request is within bounds and aligned on zram logical blocks.
*/
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
- if (unlikely(
- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
- (bio->bi_size & (PAGE_SIZE - 1)))) {
+ u64 start, end, bound;
+
+ /* unaligned request */
+ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ return 0;
+ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ return 0;
+ start = bio->bi_sector;
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+ if (unlikely(start >= bound || end > bound || start > end))
return 0;
- }
/* I/O request is valid */
return 1;
@@ -445,39 +563,37 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static int zram_make_request(struct request_queue *queue, struct bio *bio)
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
+ if (unlikely(!zram->init_done) && zram_init_device(zram))
+ goto error;
+
+ down_read(&zram->init_lock);
+ if (unlikely(!zram->init_done))
+ goto error_unlock;
+
if (!valid_io_request(zram, bio)) {
zram_stat64_inc(zram, &zram->stats.invalid_io);
- bio_io_error(bio);
- return 0;
+ goto error_unlock;
}
- if (unlikely(!zram->init_done) && zram_init_device(zram)) {
- bio_io_error(bio);
- return 0;
- }
+ __zram_make_request(zram, bio, bio_data_dir(bio));
+ up_read(&zram->init_lock);
- switch (bio_data_dir(bio)) {
- case READ:
- zram_read(zram, bio);
- break;
-
- case WRITE:
- zram_write(zram, bio);
- break;
- }
+ return;
- return 0;
+error_unlock:
+ up_read(&zram->init_lock);
+error:
+ bio_io_error(bio);
}
-void zram_reset_device(struct zram *zram)
+void __zram_reset_device(struct zram *zram)
{
size_t index;
- mutex_lock(&zram->init_lock);
zram->init_done = 0;
/* Free various per-device buffers */
@@ -514,22 +630,24 @@ void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- mutex_unlock(&zram->init_lock);
+}
+
+void zram_reset_device(struct zram *zram)
+{
+ down_write(&zram->init_lock);
+ __zram_reset_device(zram);
+ up_write(&zram->init_lock);
}
int zram_init_device(struct zram *zram)
{
int ret;
size_t num_pages;
-#ifdef CONFIG_ZRAM_FOR_ANDROID
- struct page *page;
- union swap_header *swap_header;
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
- mutex_lock(&zram->init_lock);
+ down_write(&zram->init_lock);
if (zram->init_done) {
- mutex_unlock(&zram->init_lock);
+ up_write(&zram->init_lock);
return 0;
}
@@ -539,39 +657,24 @@ int zram_init_device(struct zram *zram)
if (!zram->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
num_pages = zram->disksize >> PAGE_SHIFT;
zram->table = vzalloc(num_pages * sizeof(*zram->table));
if (!zram->table) {
pr_err("Error allocating zram address table\n");
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
ret = -ENOMEM;
- goto fail;
+ goto fail_no_table;
}
-#ifdef CONFIG_ZRAM_FOR_ANDROID
- page = alloc_page(__GFP_ZERO);
- if (!page) {
- pr_err("Error allocating swap header page\n");
- ret = -ENOMEM;
- goto fail;
- }
- zram->table[0].page = page;
- zram_set_flag(zram, 0, ZRAM_UNCOMPRESSED);
- swap_header = kmap(page);
- setup_swap_header(zram, swap_header);
- kunmap(page);
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
/* zram devices sort of resembles non-rotational disks */
@@ -585,20 +688,23 @@ int zram_init_device(struct zram *zram)
}
zram->init_done = 1;
- mutex_unlock(&zram->init_lock);
+ up_write(&zram->init_lock);
pr_debug("Initialization done!\n");
return 0;
+fail_no_table:
+ /* To prevent accessing table entries during cleanup */
+ zram->disksize = 0;
fail:
- mutex_unlock(&zram->init_lock);
- zram_reset_device(zram);
-
+ __zram_reset_device(zram);
+ up_write(&zram->init_lock);
pr_err("Initialization failed: err=%d\n", ret);
return ret;
}
-void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
+static void zram_slot_free_notify(struct block_device *bdev,
+ unsigned long index)
{
struct zram *zram;
@@ -614,17 +720,16 @@ static const struct block_device_operations zram_devops = {
static int create_device(struct zram *zram, int device_id)
{
- int ret = 0;
+ int ret = -ENOMEM;
- mutex_init(&zram->lock);
- mutex_init(&zram->init_lock);
+ init_rwsem(&zram->lock);
+ init_rwsem(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
- ret = -ENOMEM;
goto out;
}
@@ -634,11 +739,9 @@ static int create_device(struct zram *zram, int device_id)
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
- blk_cleanup_queue(zram->queue);
pr_warning("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
- goto out;
+ goto out_free_queue;
}
zram->disk->major = zram_major;
@@ -667,11 +770,17 @@ static int create_device(struct zram *zram, int device_id)
&zram_disk_attr_group);
if (ret < 0) {
pr_warning("Error creating sysfs group");
- goto out;
+ goto out_free_disk;
}
zram->init_done = 0;
+ return 0;
+out_free_disk:
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+out_free_queue:
+ blk_cleanup_queue(zram->queue);
out:
return ret;
}
@@ -694,9 +803,9 @@ static int __init zram_init(void)
{
int ret, dev_id;
- if (num_devices > max_num_devices) {
+ if (zram_num_devices > max_num_devices) {
pr_warning("Invalid value for num_devices: %u\n",
- num_devices);
+ zram_num_devices);
ret = -EINVAL;
goto out;
}
@@ -708,20 +817,20 @@ static int __init zram_init(void)
goto out;
}
- if (!num_devices) {
+ if (!zram_num_devices) {
pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
+ zram_num_devices = 1;
}
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
- zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
+ pr_info("Creating %u devices ...\n", zram_num_devices);
+ zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
goto unregister;
}
- for (dev_id = 0; dev_id < num_devices; dev_id++) {
+ for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
goto free_devices;
@@ -744,12 +853,14 @@ static void __exit zram_exit(void)
int i;
struct zram *zram;
- for (i = 0; i < num_devices; i++) {
+ for (i = 0; i < zram_num_devices; i++) {
zram = &zram_devices[i];
+ get_disk(zram->disk);
destroy_device(zram);
if (zram->init_done)
zram_reset_device(zram);
+ put_disk(zram->disk);
}
unregister_blkdev(zram_major, "zram");
@@ -758,8 +869,8 @@ static void __exit zram_exit(void)
pr_debug("Cleanup done!\n");
}
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
+module_param(zram_num_devices, uint, 0);
+MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
module_init(zram_init);
module_exit(zram_exit);