Revert "f2fs: handle decompress only post processing in softirq"

This reverts commit edbfe7380b.
This commit is contained in:
Jaegeuk Kim 2022-08-08 17:16:28 -07:00
parent c4f620ec7f
commit a09cf47c4e
3 changed files with 112 additions and 179 deletions

View File

@ -699,107 +699,14 @@ out:
return ret;
}
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool end_io)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
int i;
if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
return 0;
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages)
return 1;
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) {
dic->tpages[i] = dic->rpages[i];
continue;
}
dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i])
return 1;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf)
return 1;
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
if (!dic->cbuf)
return 1;
cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
if (cops->init_decompress_ctx) {
int ret = cops->init_decompress_ctx(dic);
if (ret)
return 1;
}
return 0;
}
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool end_io)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
return;
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
if (dic->cbuf)
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
if (dic->rbuf)
vm_unmap_ram(dic->rbuf, dic->cluster_size);
}
static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback)
{
int i;
f2fs_release_decomp_mem(dic, bypass_destroy_callback, false);
if (dic->tpages) {
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i])
continue;
if (!dic->tpages[i])
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
for (i = 0; i < dic->nr_cpages; i++) {
if (!dic->cpages[i])
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}
page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
bool bypass_callback = false;
int ret;
int i;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm);
@ -809,10 +716,41 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
goto out_end_io;
}
if (f2fs_prepare_decomp_mem(dic, true)) {
bypass_callback = true;
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_release;
goto out_end_io;
}
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) {
dic->tpages[i] = dic->rpages[i];
continue;
}
dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i]) {
ret = -ENOMEM;
goto out_end_io;
}
}
if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic);
if (ret)
goto out_end_io;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf) {
ret = -ENOMEM;
goto out_destroy_decompress_ctx;
}
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
if (!dic->cbuf) {
ret = -ENOMEM;
goto out_vunmap_rbuf;
}
dic->clen = le32_to_cpu(dic->cbuf->clen);
@ -820,7 +758,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED;
goto out_release;
goto out_vunmap_cbuf;
}
ret = cops->decompress_pages(dic);
@ -841,13 +779,17 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
}
}
out_release:
f2fs_release_decomp_mem(dic, bypass_callback, true);
out_vunmap_cbuf:
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf:
vm_unmap_ram(dic->rbuf, dic->cluster_size);
out_destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret);
f2fs_decompress_end_io(dic, ret, in_task);
f2fs_decompress_end_io(dic, ret);
}
/*
@ -857,7 +799,7 @@ out_end_io:
* (or in the case of a failure, cleans up without actually decompressing).
*/
void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr, bool in_task)
block_t blkaddr)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
@ -867,12 +809,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
if (failed)
WRITE_ONCE(dic->failed, true);
else if (blkaddr && in_task)
else if (blkaddr)
f2fs_cache_compressed_page(sbi, page,
dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic, in_task);
f2fs_decompress_cluster(dic);
}
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@ -1580,14 +1522,16 @@ destroy_out:
return err;
}
static void f2fs_free_dic(struct decompress_io_ctx *dic);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
struct decompress_io_ctx *dic;
pgoff_t start_idx = start_idx_of_cluster(cc);
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
int i;
dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
false, F2FS_I_SB(cc->inode));
if (!dic)
return ERR_PTR(-ENOMEM);
@ -1628,43 +1572,52 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->cpages[i] = page;
}
if (f2fs_prepare_decomp_mem(dic, false))
goto out_free;
return dic;
out_free:
f2fs_free_dic(dic, true);
f2fs_free_dic(dic);
return ERR_PTR(-ENOMEM);
}
static void f2fs_late_free_dic(struct work_struct *work)
static void f2fs_free_dic(struct decompress_io_ctx *dic)
{
struct decompress_io_ctx *dic =
container_of(work, struct decompress_io_ctx, free_work);
int i;
f2fs_free_dic(dic, false);
if (dic->tpages) {
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i])
continue;
if (!dic->tpages[i])
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
for (i = 0; i < dic->nr_cpages; i++) {
if (!dic->cpages[i])
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}
page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
static void f2fs_put_dic(struct decompress_io_ctx *dic)
{
if (refcount_dec_and_test(&dic->refcnt)) {
if (in_task) {
f2fs_free_dic(dic, false);
} else {
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
&dic->free_work);
}
}
if (refcount_dec_and_test(&dic->refcnt))
f2fs_free_dic(dic);
}
/*
* Update and unlock the cluster's pagecache pages, and release the reference to
* the decompress_io_ctx that was being held for I/O completion.
*/
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
{
int i;
@ -1685,7 +1638,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
unlock_page(rpage);
}
f2fs_put_dic(dic, in_task);
f2fs_put_dic(dic);
}
static void f2fs_verify_cluster(struct work_struct *work)
@ -1702,15 +1655,14 @@ static void f2fs_verify_cluster(struct work_struct *work)
SetPageError(rpage);
}
__f2fs_decompress_end_io(dic, false, true);
__f2fs_decompress_end_io(dic, false);
}
/*
* This is called when a compressed cluster has been decompressed
* (or failed to be read and/or decompressed).
*/
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
{
if (!failed && dic->need_verity) {
/*
@ -1722,7 +1674,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work);
} else {
__f2fs_decompress_end_io(dic, failed, in_task);
__f2fs_decompress_end_io(dic, failed);
}
}
@ -1731,12 +1683,12 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
*
* This is called when the page is no longer needed and can be freed.
*/
void f2fs_put_page_dic(struct page *page, bool in_task)
void f2fs_put_page_dic(struct page *page)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
f2fs_put_dic(dic, in_task);
f2fs_put_dic(dic);
}
/*

View File

@ -117,7 +117,7 @@ struct bio_post_read_ctx {
block_t fs_blkaddr;
};
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
static void f2fs_finish_read_bio(struct bio *bio)
{
struct bio_vec *bv;
int iter_all;
@ -131,9 +131,8 @@ static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
if (f2fs_is_compressed_page(page)) {
if (bio->bi_status)
f2fs_end_read_compressed_page(page, true, 0,
in_task);
f2fs_put_page_dic(page, in_task);
f2fs_end_read_compressed_page(page, true, 0);
f2fs_put_page_dic(page);
continue;
}
@ -190,7 +189,7 @@ static void f2fs_verify_bio(struct work_struct *work)
fsverity_verify_bio(bio);
}
f2fs_finish_read_bio(bio, true);
f2fs_finish_read_bio(bio);
}
/*
@ -202,7 +201,7 @@ static void f2fs_verify_bio(struct work_struct *work)
* can involve reading verity metadata pages from the file, and these verity
* metadata pages may be encrypted and/or compressed.
*/
static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
static void f2fs_verify_and_finish_bio(struct bio *bio)
{
struct bio_post_read_ctx *ctx = bio->bi_private;
@ -210,7 +209,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
INIT_WORK(&ctx->work, f2fs_verify_bio);
fsverity_enqueue_verify_work(&ctx->work);
} else {
f2fs_finish_read_bio(bio, in_task);
f2fs_finish_read_bio(bio);
}
}
@ -223,8 +222,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
* that the bio includes at least one compressed page. The actual decompression
* is done on a per-cluster basis, not a per-bio basis.
*/
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
bool in_task)
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
{
struct bio_vec *bv;
int iter_all;
@ -237,7 +235,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
/* PG_error was set if decryption failed. */
if (f2fs_is_compressed_page(page))
f2fs_end_read_compressed_page(page, PageError(page),
blkaddr, in_task);
blkaddr);
else
all_compressed = false;
@ -262,16 +260,15 @@ static void f2fs_post_read_work(struct work_struct *work)
fscrypt_decrypt_bio(ctx->bio);
if (ctx->enabled_steps & STEP_DECOMPRESS)
f2fs_handle_step_decompress(ctx, true);
f2fs_handle_step_decompress(ctx);
f2fs_verify_and_finish_bio(ctx->bio, true);
f2fs_verify_and_finish_bio(ctx->bio);
}
static void f2fs_read_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
struct bio_post_read_ctx *ctx;
bool intask = in_task();
iostat_update_and_unbind_ctx(bio, 0);
ctx = bio->bi_private;
@ -282,29 +279,16 @@ static void f2fs_read_end_io(struct bio *bio)
}
if (bio->bi_status) {
f2fs_finish_read_bio(bio, intask);
f2fs_finish_read_bio(bio);
return;
}
if (ctx) {
unsigned int enabled_steps = ctx->enabled_steps &
(STEP_DECRYPT | STEP_DECOMPRESS);
/*
* If we have only decompression step between decompression and
* decrypt, we don't need post processing for this.
*/
if (enabled_steps == STEP_DECOMPRESS &&
!f2fs_low_mem_mode(sbi)) {
f2fs_handle_step_decompress(ctx, intask);
} else if (enabled_steps) {
INIT_WORK(&ctx->work, f2fs_post_read_work);
queue_work(ctx->sbi->post_read_wq, &ctx->work);
return;
}
if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
INIT_WORK(&ctx->work, f2fs_post_read_work);
queue_work(ctx->sbi->post_read_wq, &ctx->work);
} else {
f2fs_verify_and_finish_bio(bio);
}
f2fs_verify_and_finish_bio(bio, intask);
}
static void f2fs_write_end_io(struct bio *bio)
@ -2263,7 +2247,7 @@ skip_reading_dnode:
if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic, true);
f2fs_decompress_cluster(dic);
continue;
}
@ -2280,7 +2264,7 @@ submit_and_realloc:
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
f2fs_decompress_end_io(dic, ret, true);
f2fs_decompress_end_io(dic, ret);
f2fs_put_dnode(&dn);
*bio_ret = NULL;
return ret;

View File

@ -1591,7 +1591,6 @@ struct decompress_io_ctx {
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
struct work_struct free_work; /* work for late free this structure itself */
};
#define NULL_CLUSTER ((unsigned int)(~0))
@ -4198,9 +4197,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr, bool in_task);
block_t blkaddr);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
@ -4219,9 +4218,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);
void f2fs_put_page_dic(struct page *page, bool in_task);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
void f2fs_put_page_dic(struct page *page);
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
@ -4267,14 +4265,13 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
bool in_task) { }
static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
static inline void f2fs_end_read_compressed_page(struct page *page,
bool failed, block_t blkaddr, bool in_task)
bool failed, block_t blkaddr)
{
WARN_ON_ONCE(1);
}
static inline void f2fs_put_page_dic(struct page *page, bool in_task)
static inline void f2fs_put_page_dic(struct page *page)
{
WARN_ON_ONCE(1);
}