ANDROID: Incremental fs: Avoid continually recalculating hashes

With a verified file (use incfs_perf to create a verified file), throughput
measured using dd after dropping caches increases from 200M/s to 290M/s

Test: incfs_test passes
Bug: 155996534
Signed-off-by: Paul Lawrence <paullawrence@google.com>
Change-Id: I7abb5ad92e4167f82f3452acc9db322fec8307dd
This commit is contained in:
Paul Lawrence 2020-05-07 09:26:27 -07:00
parent cf38436cb4
commit adb33b84e3

View File

@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/lz4.h>
#include <linux/crc32.h>
@ -366,7 +367,8 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
}
static int validate_hash_tree(struct file *bf, struct data_file *df,
int block_index, struct mem_range data, u8 *buf)
int block_index, struct mem_range data,
u8 *tmp_buf)
{
u8 digest[INCFS_MAX_HASH_SIZE] = {};
struct mtree *tree = NULL;
@ -379,6 +381,7 @@ static int validate_hash_tree(struct file *bf, struct data_file *df,
int hash_per_block;
int lvl = 0;
int res;
struct page *saved_page = NULL;
tree = df->df_hash_tree;
sig = df->df_signature;
@ -400,17 +403,39 @@ static int validate_hash_tree(struct file *bf, struct data_file *df,
INCFS_DATA_FILE_BLOCK_SIZE);
size_t hash_off_in_block = hash_block_index * digest_size
% INCFS_DATA_FILE_BLOCK_SIZE;
struct mem_range buf_range = range(buf,
INCFS_DATA_FILE_BLOCK_SIZE);
ssize_t read_res = incfs_kread(bf, buf,
INCFS_DATA_FILE_BLOCK_SIZE, hash_block_off);
struct mem_range buf_range;
struct page *page = NULL;
bool aligned = (hash_block_off &
(INCFS_DATA_FILE_BLOCK_SIZE - 1)) == 0;
u8 *actual_buf;
if (read_res < 0)
return read_res;
if (read_res != INCFS_DATA_FILE_BLOCK_SIZE)
return -EIO;
if (aligned) {
page = read_mapping_page(
bf->f_inode->i_mapping,
hash_block_off / INCFS_DATA_FILE_BLOCK_SIZE,
NULL);
saved_digest_rng = range(buf + hash_off_in_block, digest_size);
if (IS_ERR(page))
return PTR_ERR(page);
actual_buf = page_address(page);
} else {
size_t read_res =
incfs_kread(bf, tmp_buf,
INCFS_DATA_FILE_BLOCK_SIZE,
hash_block_off);
if (read_res < 0)
return read_res;
if (read_res != INCFS_DATA_FILE_BLOCK_SIZE)
return -EIO;
actual_buf = tmp_buf;
}
buf_range = range(actual_buf, INCFS_DATA_FILE_BLOCK_SIZE);
saved_digest_rng =
range(actual_buf + hash_off_in_block, digest_size);
if (!incfs_equal_ranges(calc_digest_rng, saved_digest_rng)) {
int i;
bool zero = true;
@ -425,9 +450,37 @@ static int validate_hash_tree(struct file *bf, struct data_file *df,
if (zero)
pr_debug("incfs: Note saved_digest all zero - did you forget to load the hashes?\n");
if (saved_page)
put_page(saved_page);
if (page)
put_page(page);
return -EBADMSG;
}
if (saved_page) {
/*
* This is something of a kludge. The PageChecked flag
* is reserved for the file system, but we are setting
* this on the pages belonging to the underlying file
* system. incfs is only going to be used on f2fs and
* ext4 which only use this flag when fs-verity is being
* used, so this is safe for now, however a better
* mechanism needs to be found.
*/
SetPageChecked(saved_page);
put_page(saved_page);
saved_page = NULL;
}
if (page && PageChecked(page)) {
put_page(page);
return 0;
}
saved_page = page;
page = NULL;
res = incfs_calc_digest(tree->alg, buf_range, calc_digest_rng);
if (res)
return res;
@ -437,8 +490,15 @@ static int validate_hash_tree(struct file *bf, struct data_file *df,
root_hash_rng = range(tree->root_hash, digest_size);
if (!incfs_equal_ranges(calc_digest_rng, root_hash_rng)) {
pr_debug("incfs: Root hash mismatch blk:%d\n", block_index);
if (saved_page)
put_page(saved_page);
return -EBADMSG;
}
if (saved_page) {
SetPageChecked(saved_page);
put_page(saved_page);
}
return 0;
}