Skip to content

Commit bff139b

Browse files
Daeho JeongJaegeuk Kim
authored andcommitted
f2fs: handle decompress only post processing in softirq
Now decompression is being handled in workqueue and it makes read I/O latency non-deterministic, because of the non-deterministic scheduling nature of workqueues. So, I made it handled in softirq context only if possible, not in low memory devices, since this modification will maintain decompresion related memory a little longer. Signed-off-by: Daeho Jeong <[email protected]> Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Jaegeuk Kim <[email protected]>
1 parent 90be48b commit bff139b

File tree

3 files changed

+179
-93
lines changed

3 files changed

+179
-93
lines changed

fs/f2fs/compress.c

Lines changed: 135 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -729,14 +729,19 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
729729
return ret;
730730
}
731731

732-
void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
732+
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
733+
bool pre_alloc);
734+
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
735+
bool bypass_destroy_callback, bool pre_alloc);
736+
737+
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
733738
{
734739
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
735740
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
736741
const struct f2fs_compress_ops *cops =
737742
f2fs_cops[fi->i_compress_algorithm];
743+
bool bypass_callback = false;
738744
int ret;
739-
int i;
740745

741746
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
742747
dic->cluster_size, fi->i_compress_algorithm);
@@ -746,49 +751,18 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
746751
goto out_end_io;
747752
}
748753

749-
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
750-
if (!dic->tpages) {
751-
ret = -ENOMEM;
752-
goto out_end_io;
753-
}
754-
755-
for (i = 0; i < dic->cluster_size; i++) {
756-
if (dic->rpages[i]) {
757-
dic->tpages[i] = dic->rpages[i];
758-
continue;
759-
}
760-
761-
dic->tpages[i] = f2fs_compress_alloc_page();
762-
if (!dic->tpages[i]) {
763-
ret = -ENOMEM;
764-
goto out_end_io;
765-
}
766-
}
767-
768-
if (cops->init_decompress_ctx) {
769-
ret = cops->init_decompress_ctx(dic);
770-
if (ret)
771-
goto out_end_io;
772-
}
773-
774-
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
775-
if (!dic->rbuf) {
776-
ret = -ENOMEM;
777-
goto out_destroy_decompress_ctx;
778-
}
779-
780-
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
781-
if (!dic->cbuf) {
782-
ret = -ENOMEM;
783-
goto out_vunmap_rbuf;
754+
ret = f2fs_prepare_decomp_mem(dic, false);
755+
if (ret) {
756+
bypass_callback = true;
757+
goto out_release;
784758
}
785759

786760
dic->clen = le32_to_cpu(dic->cbuf->clen);
787761
dic->rlen = PAGE_SIZE << dic->log_cluster_size;
788762

789763
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
790764
ret = -EFSCORRUPTED;
791-
goto out_vunmap_cbuf;
765+
goto out_release;
792766
}
793767

794768
ret = cops->decompress_pages(dic);
@@ -809,17 +783,13 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
809783
}
810784
}
811785

812-
out_vunmap_cbuf:
813-
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
814-
out_vunmap_rbuf:
815-
vm_unmap_ram(dic->rbuf, dic->cluster_size);
816-
out_destroy_decompress_ctx:
817-
if (cops->destroy_decompress_ctx)
818-
cops->destroy_decompress_ctx(dic);
786+
out_release:
787+
f2fs_release_decomp_mem(dic, bypass_callback, false);
788+
819789
out_end_io:
820790
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
821791
dic->clen, ret);
822-
f2fs_decompress_end_io(dic, ret);
792+
f2fs_decompress_end_io(dic, ret, in_task);
823793
}
824794

825795
/*
@@ -829,7 +799,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
829799
* (or in the case of a failure, cleans up without actually decompressing).
830800
*/
831801
void f2fs_end_read_compressed_page(struct page *page, bool failed,
832-
block_t blkaddr)
802+
block_t blkaddr, bool in_task)
833803
{
834804
struct decompress_io_ctx *dic =
835805
(struct decompress_io_ctx *)page_private(page);
@@ -839,12 +809,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
839809

840810
if (failed)
841811
WRITE_ONCE(dic->failed, true);
842-
else if (blkaddr)
812+
else if (blkaddr && in_task)
843813
f2fs_cache_compressed_page(sbi, page,
844814
dic->inode->i_ino, blkaddr);
845815

846816
if (atomic_dec_and_test(&dic->remaining_pages))
847-
f2fs_decompress_cluster(dic);
817+
f2fs_decompress_cluster(dic, in_task);
848818
}
849819

850820
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -1552,16 +1522,85 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
15521522
return err;
15531523
}
15541524

1555-
static void f2fs_free_dic(struct decompress_io_ctx *dic);
1525+
static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1526+
bool pre_alloc)
1527+
{
1528+
return pre_alloc ^ f2fs_low_mem_mode(sbi);
1529+
}
1530+
1531+
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1532+
bool pre_alloc)
1533+
{
1534+
const struct f2fs_compress_ops *cops =
1535+
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1536+
int i;
1537+
1538+
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1539+
return 0;
1540+
1541+
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1542+
if (!dic->tpages)
1543+
return -ENOMEM;
1544+
1545+
for (i = 0; i < dic->cluster_size; i++) {
1546+
if (dic->rpages[i]) {
1547+
dic->tpages[i] = dic->rpages[i];
1548+
continue;
1549+
}
1550+
1551+
dic->tpages[i] = f2fs_compress_alloc_page();
1552+
if (!dic->tpages[i])
1553+
return -ENOMEM;
1554+
}
1555+
1556+
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1557+
if (!dic->rbuf)
1558+
return -ENOMEM;
1559+
1560+
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1561+
if (!dic->cbuf)
1562+
return -ENOMEM;
1563+
1564+
if (cops->init_decompress_ctx) {
1565+
int ret = cops->init_decompress_ctx(dic);
1566+
1567+
if (ret)
1568+
return ret;
1569+
}
1570+
1571+
return 0;
1572+
}
1573+
1574+
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1575+
bool bypass_destroy_callback, bool pre_alloc)
1576+
{
1577+
const struct f2fs_compress_ops *cops =
1578+
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1579+
1580+
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1581+
return;
1582+
1583+
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1584+
cops->destroy_decompress_ctx(dic);
1585+
1586+
if (dic->cbuf)
1587+
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1588+
1589+
if (dic->rbuf)
1590+
vm_unmap_ram(dic->rbuf, dic->cluster_size);
1591+
}
1592+
1593+
static void f2fs_free_dic(struct decompress_io_ctx *dic,
1594+
bool bypass_destroy_callback);
15561595

15571596
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
15581597
{
15591598
struct decompress_io_ctx *dic;
15601599
pgoff_t start_idx = start_idx_of_cluster(cc);
1561-
int i;
1600+
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1601+
int i, ret;
15621602

1563-
dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
1564-
false, F2FS_I_SB(cc->inode));
1603+
dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
15651604
if (!dic)
15661605
return ERR_PTR(-ENOMEM);
15671606

@@ -1587,32 +1626,43 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
15871626
dic->nr_rpages = cc->cluster_size;
15881627

15891628
dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1590-
if (!dic->cpages)
1629+
if (!dic->cpages) {
1630+
ret = -ENOMEM;
15911631
goto out_free;
1632+
}
15921633

15931634
for (i = 0; i < dic->nr_cpages; i++) {
15941635
struct page *page;
15951636

15961637
page = f2fs_compress_alloc_page();
1597-
if (!page)
1638+
if (!page) {
1639+
ret = -ENOMEM;
15981640
goto out_free;
1641+
}
15991642

16001643
f2fs_set_compressed_page(page, cc->inode,
16011644
start_idx + i + 1, dic);
16021645
dic->cpages[i] = page;
16031646
}
16041647

1648+
ret = f2fs_prepare_decomp_mem(dic, true);
1649+
if (ret)
1650+
goto out_free;
1651+
16051652
return dic;
16061653

16071654
out_free:
1608-
f2fs_free_dic(dic);
1609-
return ERR_PTR(-ENOMEM);
1655+
f2fs_free_dic(dic, true);
1656+
return ERR_PTR(ret);
16101657
}
16111658

1612-
static void f2fs_free_dic(struct decompress_io_ctx *dic)
1659+
static void f2fs_free_dic(struct decompress_io_ctx *dic,
1660+
bool bypass_destroy_callback)
16131661
{
16141662
int i;
16151663

1664+
f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1665+
16161666
if (dic->tpages) {
16171667
for (i = 0; i < dic->cluster_size; i++) {
16181668
if (dic->rpages[i])
@@ -1637,17 +1687,33 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
16371687
kmem_cache_free(dic_entry_slab, dic);
16381688
}
16391689

1640-
static void f2fs_put_dic(struct decompress_io_ctx *dic)
1690+
static void f2fs_late_free_dic(struct work_struct *work)
1691+
{
1692+
struct decompress_io_ctx *dic =
1693+
container_of(work, struct decompress_io_ctx, free_work);
1694+
1695+
f2fs_free_dic(dic, false);
1696+
}
1697+
1698+
static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
16411699
{
1642-
if (refcount_dec_and_test(&dic->refcnt))
1643-
f2fs_free_dic(dic);
1700+
if (refcount_dec_and_test(&dic->refcnt)) {
1701+
if (in_task) {
1702+
f2fs_free_dic(dic, false);
1703+
} else {
1704+
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1705+
queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1706+
&dic->free_work);
1707+
}
1708+
}
16441709
}
16451710

16461711
/*
16471712
* Update and unlock the cluster's pagecache pages, and release the reference to
16481713
* the decompress_io_ctx that was being held for I/O completion.
16491714
*/
1650-
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1715+
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1716+
bool in_task)
16511717
{
16521718
int i;
16531719

@@ -1668,7 +1734,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
16681734
unlock_page(rpage);
16691735
}
16701736

1671-
f2fs_put_dic(dic);
1737+
f2fs_put_dic(dic, in_task);
16721738
}
16731739

16741740
static void f2fs_verify_cluster(struct work_struct *work)
@@ -1685,14 +1751,15 @@ static void f2fs_verify_cluster(struct work_struct *work)
16851751
SetPageError(rpage);
16861752
}
16871753

1688-
__f2fs_decompress_end_io(dic, false);
1754+
__f2fs_decompress_end_io(dic, false, true);
16891755
}
16901756

16911757
/*
16921758
* This is called when a compressed cluster has been decompressed
16931759
* (or failed to be read and/or decompressed).
16941760
*/
1695-
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1761+
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1762+
bool in_task)
16961763
{
16971764
if (!failed && dic->need_verity) {
16981765
/*
@@ -1704,7 +1771,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
17041771
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
17051772
fsverity_enqueue_verify_work(&dic->verity_work);
17061773
} else {
1707-
__f2fs_decompress_end_io(dic, failed);
1774+
__f2fs_decompress_end_io(dic, failed, in_task);
17081775
}
17091776
}
17101777

@@ -1713,12 +1780,12 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
17131780
*
17141781
* This is called when the page is no longer needed and can be freed.
17151782
*/
1716-
void f2fs_put_page_dic(struct page *page)
1783+
void f2fs_put_page_dic(struct page *page, bool in_task)
17171784
{
17181785
struct decompress_io_ctx *dic =
17191786
(struct decompress_io_ctx *)page_private(page);
17201787

1721-
f2fs_put_dic(dic);
1788+
f2fs_put_dic(dic, in_task);
17221789
}
17231790

17241791
/*

0 commit comments

Comments
 (0)