Skip to content

Commit

Permalink
zram: use common zpool interface
Browse files Browse the repository at this point in the history
Change ZRAM into using zpool API. This patch allows to use any
zpool compatible allocation backend with ZRAM. It is meant to make
no functional changes to ZRAM.

zpool-registered backend can be selected via the module parameter
or kernel boot string. 'zsmalloc' is taken by default.

Signed-off-by: Vitaly Wool <[email protected]>
Signed-off-by: Diab Neiroukh <[email protected]>
  • Loading branch information
vwool authored and lzlrd committed Jan 26, 2021
1 parent 80657a6 commit 0410625
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 31 deletions.
3 changes: 2 additions & 1 deletion drivers/block/zram/Kconfig
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
depends on BLOCK && SYSFS && CRYPTO
select CRYPTO_LZO
select ZPOOL
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
Expand Down
62 changes: 34 additions & 28 deletions drivers/block/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = "lzo-rle";

#define BACKEND_PAR_BUF_SIZE 32
static char backend_par_buf[BACKEND_PAR_BUF_SIZE];

/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
Expand Down Expand Up @@ -284,7 +287,7 @@ static ssize_t mem_used_max_store(struct device *dev,
down_read(&zram->init_lock);
if (init_done(zram)) {
atomic_long_set(&zram->stats.max_used_pages,
zs_get_total_pages(zram->mem_pool));
zpool_get_total_size(zram->mem_pool) >> PAGE_SHIFT);
}
up_read(&zram->init_lock);

Expand Down Expand Up @@ -1026,7 +1029,7 @@ static ssize_t compact_store(struct device *dev,
return -EINVAL;
}

zs_compact(zram->mem_pool);
zpool_compact(zram->mem_pool);
up_read(&zram->init_lock);

return len;
Expand Down Expand Up @@ -1054,17 +1057,14 @@ static ssize_t mm_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
struct zs_pool_stats pool_stats;
u64 orig_size, mem_used = 0;
long max_used;
long max_used, num_compacted = 0;
ssize_t ret;

memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));

down_read(&zram->init_lock);
if (init_done(zram)) {
mem_used = zs_get_total_pages(zram->mem_pool);
zs_pool_stats(zram->mem_pool, &pool_stats);
mem_used = zpool_get_total_size(zram->mem_pool);
num_compacted = zpool_get_num_compacted(zram->mem_pool);
}

orig_size = atomic64_read(&zram->stats.pages_stored);
Expand All @@ -1074,11 +1074,11 @@ static ssize_t mm_stat_show(struct device *dev,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
mem_used,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
pool_stats.pages_compacted,
num_compacted,
(u64)atomic64_read(&zram->stats.huge_pages));
up_read(&zram->init_lock);

Expand Down Expand Up @@ -1139,27 +1139,30 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);

zs_destroy_pool(zram->mem_pool);
zpool_destroy_pool(zram->mem_pool);
vfree(zram->table);
}

static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
size_t num_pages;
char *backend;

num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
if (!zram->table)
return false;

zram->mem_pool = zs_create_pool(zram->disk->disk_name);
backend = strlen(backend_par_buf) ? backend_par_buf : "zsmalloc";
zram->mem_pool = zpool_create_pool(backend, zram->disk->disk_name,
GFP_NOIO, NULL);
if (!zram->mem_pool) {
vfree(zram->table);
return false;
}

if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
huge_class_size = zpool_huge_class_size(zram->mem_pool);
return true;
}

Expand Down Expand Up @@ -1203,7 +1206,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (!handle)
return;

zs_free(zram->mem_pool, handle);
zpool_free(zram->mem_pool, handle);

atomic64_sub(zram_get_obj_size(zram, index),
&zram->stats.compr_data_size);
Expand Down Expand Up @@ -1256,7 +1259,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
if (size != PAGE_SIZE)
zstrm = zcomp_stream_get(zram->comp);

src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
src = zpool_map_handle(zram->mem_pool, handle, ZPOOL_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
Expand All @@ -1268,7 +1271,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
zcomp_stream_put(zram->comp);
}
zs_unmap_object(zram->mem_pool, handle);
zpool_unmap_handle(zram->mem_pool, handle);
zram_slot_unlock(zram, index);

/* Should NEVER happen. Return bio error if it does. */
Expand Down Expand Up @@ -1343,7 +1346,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
zs_free(zram->mem_pool, handle);
zpool_free(zram->mem_pool, handle);
return ret;
}

Expand All @@ -1362,33 +1365,34 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
* if we have a 'non-null' handle here then we are coming
* from the slow path and handle has already been allocated.
*/
if (!handle)
handle = zs_malloc(zram->mem_pool, comp_len,
if (handle == 0)
ret = zpool_malloc(zram->mem_pool, comp_len,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (!handle) {
__GFP_MOVABLE,
&handle);
if (ret) {
zcomp_stream_put(zram->comp);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
ret = zpool_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
if (handle)
__GFP_MOVABLE, &handle);
if (ret == 0)
goto compress_again;
return -ENOMEM;
}

alloced_pages = zs_get_total_pages(zram->mem_pool);
alloced_pages = zpool_get_total_size(zram->mem_pool) >> PAGE_SHIFT;
update_used_max(zram, alloced_pages);

if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
zpool_free(zram->mem_pool, handle);
return -ENOMEM;
}

dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
dst = zpool_map_handle(zram->mem_pool, handle, ZPOOL_MM_WO);

src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
Expand All @@ -1398,7 +1402,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
kunmap_atomic(src);

zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
zpool_unmap_handle(zram->mem_pool, handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);
out:
/*
Expand Down Expand Up @@ -2145,6 +2149,8 @@ module_exit(zram_exit);

module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
module_param_string(backend, backend_par_buf, BACKEND_PAR_BUF_SIZE, S_IRUGO);
MODULE_PARM_DESC(backend, "Compression storage (backend) name");

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <[email protected]>");
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/zram/zram_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#define _ZRAM_DRV_H_

#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
#include <linux/zpool.h>
#include <linux/crypto.h>

#include "zcomp.h"
Expand Down Expand Up @@ -91,7 +91,7 @@ struct zram_stats {

struct zram {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
struct zpool *mem_pool;
struct zcomp *comp;
struct gendisk *disk;
/* Prevent concurrent execution of device init */
Expand Down

0 comments on commit 0410625

Please sign in to comment.