UPSTREAM: erofs: use sync decompression for atomic contexts only

Sync decompression was introduced to get rid of additional kworker
scheduling overhead. But there is no such overhead in non-atomic
contexts. Therefore, it should be better to turn off sync decompression
to avoid the current thread waiting in z_erofs_runqueue.

Link: https://lore.kernel.org/r/20210317035448.13921-3-huangjianan@oppo.com
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Huang Jianan <huangjianan@oppo.com>
Signed-off-by: Guo Weichao <guoweichao@oppo.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>

Bug: 190585249
Change-Id: I7e03053d690b9cda4fe78c0ed0e2fbb0ba188d38
(cherry picked from commit 30048cdac4b92f39ee50e2a1344f5899f8e70cb6)
Signed-off-by: Huang Jianan <huangjianan@oppo.com>
This commit is contained in:
Huang Jianan 2021-03-17 11:54:48 +08:00 committed by Michael Bestas
parent 7eb2ac540a
commit 2e36d10d7f
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
3 changed files with 9 additions and 2 deletions

View File

@ -50,6 +50,8 @@ struct erofs_fs_context {
#ifdef CONFIG_EROFS_FS_ZIP
/* current strategy of how to use managed cache */
unsigned char cache_strategy;
/* strategy of sync decompression (false - auto, true - force on) */
bool readahead_sync_decompress;
/* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages;

View File

@ -201,6 +201,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx)
#ifdef CONFIG_EROFS_FS_ZIP
ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
ctx->max_sync_decompress_pages = 3;
ctx->readahead_sync_decompress = false;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
set_opt(ctx, XATTR_USER);

View File

@ -713,6 +713,8 @@ static void z_erofs_decompressqueue_work(struct work_struct *work);
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
/* wake up the caller thread for sync decompression */
if (sync) {
unsigned long flags;
@ -726,9 +728,10 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
if (atomic_add_return(bios, &io->pending_bios))
return;
/* Use workqueue decompression for atomic contexts only */
/* Use workqueue and sync decompression for atomic contexts only */
if (in_atomic() || irqs_disabled()) {
queue_work(z_erofs_workqueue, &io->u.work);
sbi->ctx.readahead_sync_decompress = true;
return;
}
z_erofs_decompressqueue_work(&io->u.work);
@ -1340,7 +1343,8 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages);
bool sync = (sbi->ctx.readahead_sync_decompress &&
nr_pages <= sbi->ctx.max_sync_decompress_pages);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
struct page *head = NULL;