Revert "zram: make deduplication feature optional"

This reverts commit be0c36ce98.

Reason for revert: revert non upstream code
Bug: 153969530
Test: memory stress test
Signed-off-by: Martin Liu <liumartin@google.com>
Change-Id: I65a52eae96fead55d48a70ec51f842940ddf08a7
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
Martin Liu 2020-04-14 22:22:58 +08:00 committed by spakkkk
parent fc8e375ee8
commit 186ff90d2f
8 changed files with 13 additions and 145 deletions

View File

@ -137,13 +137,3 @@ Description:
The writeback_limit file is read-write and specifies the maximum
amount of writeback ZRAM can do. The limit could be changed
in run time.
What: /sys/block/zram<id>/use_dedup
Date: March 2017
Contact: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Description:
The use_dedup file is read-write and specifies deduplication
feature is used or not. If enabled, duplicated data is
managed by reference count and will not be stored in memory
twice. Benefit of this feature largely depends on the workload
so keep attention when use.

View File

@ -173,7 +173,7 @@ compact WO trigger memory compaction
debug_stat RO this file is used for zram debugging purposes
backing_dev RW set up backend storage for zram to write out
idle WO mark allocated slot as idle
use_dedup RW show and set deduplication feature
User space is advised to use the following files to read the device statistics.

View File

@ -15,20 +15,6 @@ config ZRAM
See Documentation/blockdev/zram.txt for more information.
config ZRAM_DEDUP
bool "Deduplication support for ZRAM data"
depends on ZRAM
default n
help
Deduplicate ZRAM data to reduce amount of memory consumption.
Advantage largely depends on the workload. In some cases, this
option reduces memory usage to the half. However, if there is no
duplicated data, the amount of memory consumption would be
increased due to additional metadata usage. And, there is
computation time trade-off. Please check the benefit before
enabling this option. Experiment shows the positive effect when
the zram is used as blockdev and is used to store build output.
config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
depends on ZRAM

View File

@ -1,4 +1,3 @@
zram-y := zcomp.o zram_drv.o
zram-$(CONFIG_ZRAM_DEDUP) += zram_dedup.o
zram-y := zcomp.o zram_drv.o zram_dedup.o
obj-$(CONFIG_ZRAM) += zram.o

View File

@ -41,9 +41,6 @@ void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
struct rb_node **rb_node, *parent = NULL;
struct zram_entry *entry;
if (!zram_dedup_enabled(zram))
return;
new->checksum = checksum;
hash = &zram->hash[checksum % zram->hash_size];
rb_root = &hash->rb_root;
@ -151,9 +148,6 @@ struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
void *mem;
struct zram_entry *entry;
if (!zram_dedup_enabled(zram))
return NULL;
mem = kmap_atomic(page);
*checksum = zram_dedup_checksum(mem);
@ -166,9 +160,6 @@ struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
unsigned long handle, unsigned int len)
{
if (!zram_dedup_enabled(zram))
return;
entry->handle = handle;
entry->refcount = 1;
entry->len = len;
@ -176,9 +167,6 @@ void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry)
{
if (!zram_dedup_enabled(zram))
return true;
if (zram_dedup_put(zram, entry))
return false;
@ -190,9 +178,6 @@ int zram_dedup_init(struct zram *zram, size_t num_pages)
int i;
struct zram_hash *hash;
if (!zram_dedup_enabled(zram))
return 0;
zram->hash_size = num_pages >> ZRAM_HASH_SHIFT;
zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size);
zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size);

View File

@ -4,8 +4,6 @@
struct zram;
struct zram_entry;
#ifdef CONFIG_ZRAM_DEDUP
u64 zram_dedup_dup_size(struct zram *zram);
u64 zram_dedup_meta_size(struct zram *zram);
@ -20,26 +18,5 @@ bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry);
int zram_dedup_init(struct zram *zram, size_t num_pages);
void zram_dedup_fini(struct zram *zram);
#else
static inline u64 zram_dedup_dup_size(struct zram *zram) { return 0; }
static inline u64 zram_dedup_meta_size(struct zram *zram) { return 0; }
static inline void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
u32 checksum) { }
static inline struct zram_entry *zram_dedup_find(struct zram *zram,
struct page *page, u32 *checksum) { return NULL; }
static inline void zram_dedup_init_entry(struct zram *zram,
struct zram_entry *entry, unsigned long handle,
unsigned int len) { }
static inline bool zram_dedup_put_entry(struct zram *zram,
struct zram_entry *entry) { return true; }
static inline int zram_dedup_init(struct zram *zram,
size_t num_pages) { return 0; }
static inline void zram_dedup_fini(struct zram *zram) { }
#endif
#endif /* _ZRAM_DEDUP_H_ */

View File

@ -1031,41 +1031,6 @@ static ssize_t comp_algorithm_store(struct device *dev,
return len;
}
static ssize_t use_dedup_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
val = zram->use_dedup;
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", (int)val);
}
#ifdef CONFIG_ZRAM_DEDUP
static ssize_t use_dedup_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int val;
struct zram *zram = dev_to_zram(dev);
if (kstrtoint(buf, 10, &val) || (val != 0 && val != 1))
return -EINVAL;
down_write(&zram->init_lock);
if (init_done(zram)) {
up_write(&zram->init_lock);
pr_info("Can't change dedup usage for initialized device\n");
return -EBUSY;
}
zram->use_dedup = val;
up_write(&zram->init_lock);
return len;
}
#endif
static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@ -1183,32 +1148,20 @@ static DEVICE_ATTR_RO(bd_stat);
#endif
static DEVICE_ATTR_RO(debug_stat);
static unsigned long zram_entry_handle(struct zram *zram,
struct zram_entry *entry)
{
if (zram_dedup_enabled(zram))
return entry->handle;
else
return (unsigned long)entry;
}
static struct zram_entry *zram_entry_alloc(struct zram *zram,
unsigned int len, gfp_t flags)
{
struct zram_entry *entry;
unsigned long handle;
handle = zs_malloc(zram->mem_pool, len, flags);
if (!handle)
return NULL;
if (!zram_dedup_enabled(zram))
return (struct zram_entry *)handle;
entry = kzalloc(sizeof(*entry),
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
if (!entry) {
zs_free(zram->mem_pool, handle);
if (!entry)
return NULL;
handle = zs_malloc(zram->mem_pool, len, flags);
if (!handle) {
kfree(entry);
return NULL;
}
@ -1223,11 +1176,7 @@ void zram_entry_free(struct zram *zram, struct zram_entry *entry)
if (!zram_dedup_put_entry(zram, entry))
return;
zs_free(zram->mem_pool, zram_entry_handle(zram, entry));
if (!zram_dedup_enabled(zram))
return;
zs_free(zram->mem_pool, entry->handle);
kfree(entry);
atomic64_sub(sizeof(*entry), &zram->stats.meta_data_size);
@ -1363,8 +1312,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
size = zram_get_obj_size(zram, index);
src = zs_map_object(zram->mem_pool,
zram_entry_handle(zram, entry), ZS_MM_RO);
src = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
@ -1378,7 +1326,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
zcomp_stream_put(zram->comp);
}
zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry));
zs_unmap_object(zram->mem_pool, entry->handle);
zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */
@ -1507,8 +1455,7 @@ compress_again:
return -ENOMEM;
}
dst = zs_map_object(zram->mem_pool,
zram_entry_handle(zram, entry), ZS_MM_WO);
dst = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_WO);
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
@ -1518,7 +1465,7 @@ compress_again:
kunmap_atomic(src);
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry));
zs_unmap_object(zram->mem_pool, entry->handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);
zram_dedup_insert(zram, entry, checksum);
out:
@ -1962,11 +1909,6 @@ static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
#ifdef CONFIG_ZRAM_DEDUP
static DEVICE_ATTR_RW(use_dedup);
#else
static DEVICE_ATTR_RO(use_dedup);
#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@ -1984,7 +1926,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
#endif
&dev_attr_use_dedup.attr,
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
#ifdef CONFIG_ZRAM_WRITEBACK

View File

@ -134,7 +134,6 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
bool use_dedup;
struct file *backing_dev;
#ifdef CONFIG_ZRAM_WRITEBACK
spinlock_t wb_limit_lock;
@ -150,14 +149,5 @@ struct zram {
#endif
};
static inline bool zram_dedup_enabled(struct zram *zram)
{
#ifdef CONFIG_ZRAM_DEDUP
return zram->use_dedup;
#else
return false;
#endif
}
void zram_entry_free(struct zram *zram, struct zram_entry *entry);
#endif