]> git.baikalelectronics.ru Git - kernel.git/commitdiff
f2fs: add a way to limit roll forward recovery time
authorJaegeuk Kim <jaegeuk@kernel.org>
Thu, 27 Jan 2022 21:31:43 +0000 (13:31 -0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Sat, 12 Feb 2022 13:58:18 +0000 (05:58 -0800)
This adds a sysfs entry to call checkpoint during fsync() in order to avoid
long elapsed time to run roll-forward recovery when booting the device.
Default value doesn't enforce the limitation which is same as before.

Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Documentation/ABI/testing/sysfs-fs-f2fs
fs/f2fs/checkpoint.c
fs/f2fs/debug.c
fs/f2fs/f2fs.h
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/super.c
fs/f2fs/sysfs.c

index 7b50bf82f14dd044ede405f1d64c37200f4c2fe8..58bf0dc83712dbdca184995a6e33ae8a1fd5d0bc 100644 (file)
@@ -568,3 +568,9 @@ Contact:    "Daeho Jeong" <daehojeong@google.com>
 Description:   You can set the trial count limit for GC urgent high mode with this value.
                If GC thread gets to the limit, the mode will turn back to GC normal mode.
                By default, the value is zero, which means there is no limit like before.
+
+What:          /sys/fs/f2fs/<disk>/max_roll_forward_node_blocks
+Date:          January 2022
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:   Controls max # of node block writes to be used for roll forward
+               recovery. This can limit the roll forward recovery time.
index a13b6b4af220a0282d1eb37e33e0fdd9c3a1608d..203a1577942d3e23f5c295860f413d782487904f 100644 (file)
@@ -1547,6 +1547,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        /* update user_block_counts */
        sbi->last_valid_block_count = sbi->total_valid_block_count;
        percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+       percpu_counter_set(&sbi->rf_node_block_count, 0);
 
        /* Here, we have one bio having CP pack except cp pack 2 page */
        f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
index 8c50518475a99a32e7ac9c4f724a982c745ad46b..9a13902c770267a2e3cd202e1331d3b0dcb475d1 100644 (file)
@@ -532,6 +532,9 @@ static int stat_show(struct seq_file *s, void *v)
                           si->ndirty_meta, si->meta_pages);
                seq_printf(s, "  - imeta: %4d\n",
                           si->ndirty_imeta);
+               seq_printf(s, "  - fsync mark: %4lld\n",
+                          percpu_counter_sum_positive(
+                                       &si->sbi->rf_node_block_count));
                seq_printf(s, "  - NATs: %9d/%9d\n  - SITs: %9d/%9d\n",
                           si->dirty_nats, si->nats, si->dirty_sits, si->sits);
                seq_printf(s, "  - free_nids: %9d/%9d\n  - alloc_nids: %9d\n",
index 3b4bf1c3f1ed64fa2fbee09ffe9f03f2857d4431..c9515c3c54fd563c4109f3b5fb7ca9ad707fcc5d 100644 (file)
@@ -917,6 +917,7 @@ struct f2fs_nm_info {
        nid_t max_nid;                  /* maximum possible node ids */
        nid_t available_nids;           /* # of available node ids */
        nid_t next_scan_nid;            /* the next nid to be scanned */
+       nid_t max_rf_node_blocks;       /* max # of nodes for recovery */
        unsigned int ram_thresh;        /* control the memory footprint */
        unsigned int ra_nid_pages;      /* # of nid pages to be readaheaded */
        unsigned int dirty_nats_ratio;  /* control dirty nats ratio threshold */
@@ -1688,6 +1689,8 @@ struct f2fs_sb_info {
        atomic_t nr_pages[NR_COUNT_TYPE];
        /* # of allocated blocks */
        struct percpu_counter alloc_valid_block_count;
+       /* # of node block writes as roll forward recovery */
+       struct percpu_counter rf_node_block_count;
 
        /* writeback control */
        atomic_t wb_sync_req[META];     /* count # of WB_SYNC threads */
index 93512f8859d5da24128dfc788d62727ec07164f8..0d988345757977f138379f0e3d3c46423e1dfe69 100644 (file)
@@ -1782,6 +1782,7 @@ continue_unlock:
 
                        if (!atomic || page == last_page) {
                                set_fsync_mark(page, 1);
+                               percpu_counter_inc(&sbi->rf_node_block_count);
                                if (IS_INODE(page)) {
                                        if (is_inode_flag_set(inode,
                                                                FI_DIRTY_INODE))
@@ -3218,6 +3219,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
        nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
        nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
+       nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
 
        INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
        INIT_LIST_HEAD(&nm_i->free_nid_list);
index 18b98cf0465b843076a8e0b2dc531a55e05f0e97..4c1d34bfea7811e3bbb20dd5fc57d34d5802fcec 100644 (file)
@@ -31,6 +31,9 @@
 /* control total # of nats */
 #define DEF_NAT_CACHE_THRESHOLD                        100000
 
+/* control total # of node writes used for roll-fowrad recovery */
+#define DEF_RF_NODE_BLOCKS                     0
+
 /* vector size for gang look-up from nat cache that consists of radix tree */
 #define NATVEC_SIZE    64
 #define SETVEC_SIZE    32
index 2af503f75b4fa0bf4d693b7aa686eb0f28108153..ab33e474af07020b80baba0e0ee00ac7e179aa26 100644 (file)
@@ -56,6 +56,10 @@ bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
 
        if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
                return false;
+       if (NM_I(sbi)->max_rf_node_blocks &&
+               percpu_counter_sum_positive(&sbi->rf_node_block_count) >=
+                                               NM_I(sbi)->max_rf_node_blocks)
+               return false;
        return true;
 }
 
index 806836184ebc9b2e80ec18ec1fa02dd3a960d161..f816d7d1987d9a36516ee625d7bc668ab2da0b1f 100644 (file)
@@ -1501,8 +1501,9 @@ static void f2fs_free_inode(struct inode *inode)
 
 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
 {
-       percpu_counter_destroy(&sbi->alloc_valid_block_count);
        percpu_counter_destroy(&sbi->total_valid_inode_count);
+       percpu_counter_destroy(&sbi->rf_node_block_count);
+       percpu_counter_destroy(&sbi->alloc_valid_block_count);
 }
 
 static void destroy_device_list(struct f2fs_sb_info *sbi)
@@ -3619,11 +3620,20 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
+       err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
+       if (err)
+               goto err_valid_block;
+
        err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
                                                                GFP_KERNEL);
        if (err)
-               percpu_counter_destroy(&sbi->alloc_valid_block_count);
+               goto err_node_block;
+       return 0;
 
+err_node_block:
+       percpu_counter_destroy(&sbi->rf_node_block_count);
+err_valid_block:
+       percpu_counter_destroy(&sbi->alloc_valid_block_count);
        return err;
 }
 
index 281bc0133ee6fba9f61e9c306912dc1198825f27..47efcf233afdd79caaf402961cca00e3c0df7315 100644 (file)
@@ -732,6 +732,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, max_roll_forward_node_blocks, max_rf_node_blocks);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
@@ -855,6 +856,7 @@ static struct attribute *f2fs_attrs[] = {
        ATTR_LIST(ram_thresh),
        ATTR_LIST(ra_nid_pages),
        ATTR_LIST(dirty_nats_ratio),
+       ATTR_LIST(max_roll_forward_node_blocks),
        ATTR_LIST(cp_interval),
        ATTR_LIST(idle_interval),
        ATTR_LIST(discard_idle_interval),