]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: throttle split bio in case of iops limit
authorMing Lei <ming.lei@redhat.com>
Wed, 16 Feb 2022 04:45:12 +0000 (12:45 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 17 Feb 2022 02:42:28 +0000 (19:42 -0700)
Commit 790c89a6d34a ("block-throttle: avoid double charge") marks bio as
BIO_THROTTLED unconditionally if __blk_throtl_bio() is called on this bio,
then this bio won't be called into __blk_throtl_bio() any more. This way
is to avoid double charge in case of bio splitting. It is reasonable for
read/write throughput limit, but not reasonable for IOPS limit because
block layer provides io accounting against split bio.

Chunguang Xu has already observed this issue and fixed it in commit
c349bd4a2046 ("blk-throtl: optimize IOPS throttle for large IO scenarios").
However, that patch only covers bio splitting in __blk_queue_split(), and
we have other kind of bio splitting, such as bio_split() &
submit_bio_noacct() and other ways.

This patch tries to fix the issue in one generic way by always charging
the bio for iops limit in blk_throtl_bio(). This way is reasonable:
re-submission & fast-cloned bio is charged if it is submitted to same
disk/queue, and BIO_THROTTLED will be cleared if bio->bi_bdev is changed.

This new approach can get much more smooth/stable iops limit compared with
commit c349bd4a2046 ("blk-throtl: optimize IOPS throttle for large IO
scenarios") since that commit can't throttle current split bios actually.

Also this way won't cause new double bio iops charge in
blk_throtl_dispatch_work_fn() in which blk_throtl_bio() won't be called
any more.

Reported-by: Ning Li <lining2020x@163.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Chunguang Xu <brookxu@tencent.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220216044514.2903784-7-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-merge.c
block/blk-throttle.c
block/blk-throttle.h

index 4de34a332c9fdd7ba5a95632e15885032d13d065..f5255991b773ca9ac4415e91a6d73b018fb0749a 100644 (file)
@@ -368,8 +368,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
                trace_block_split(split, (*bio)->bi_iter.bi_sector);
                submit_bio_noacct(*bio);
                *bio = split;
-
-               blk_throtl_charge_bio_split(*bio);
        }
 }
 
index 8770768f1000952f4e58dfc7bc64fe43b9ba6d6d..c7aa26d52e84583357921337f1702d87e3ed6a6c 100644 (file)
@@ -807,7 +807,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
        unsigned int bio_size = throtl_bio_data_size(bio);
 
-       if (bps_limit == U64_MAX) {
+       /* no need to throttle if this bio's bytes have been accounted */
+       if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED)) {
                if (wait)
                        *wait = 0;
                return true;
@@ -919,9 +920,12 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
        unsigned int bio_size = throtl_bio_data_size(bio);
 
        /* Charge the bio to the group */
-       tg->bytes_disp[rw] += bio_size;
+       if (!bio_flagged(bio, BIO_THROTTLED)) {
+               tg->bytes_disp[rw] += bio_size;
+               tg->last_bytes_disp[rw] += bio_size;
+       }
+
        tg->io_disp[rw]++;
-       tg->last_bytes_disp[rw] += bio_size;
        tg->last_io_disp[rw]++;
 
        /*
index 175f03abd9e41673a7e7a8287945057ab8f73dfb..cb43f4417d6ea0014778a45bb403b08f035e7c97 100644 (file)
@@ -170,8 +170,6 @@ static inline bool blk_throtl_bio(struct bio *bio)
 {
        struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
 
-       if (bio_flagged(bio, BIO_THROTTLED))
-               return false;
        if (!tg->has_rules[bio_data_dir(bio)])
                return false;