]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: move blk_account_io_{start,done} to blk-mq.c
authorChristoph Hellwig <hch@lst.de>
Wed, 17 Nov 2021 06:14:01 +0000 (07:14 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:34:51 +0000 (06:34 -0700)
These are only used for request based I/O, so move them where they are
used.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20211117061404.331732-9-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk.h

index 65891f058f3db2b07a192c1891dec384377c8a7f..29c4db03742b425af138d7598ed69fe3b9bbf084 100644 (file)
@@ -1064,8 +1064,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
 }
 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
 
-static void update_io_ticks(struct block_device *part, unsigned long now,
-               bool end)
+void update_io_ticks(struct block_device *part, unsigned long now, bool end)
 {
        unsigned long stamp;
 again:
@@ -1080,30 +1079,6 @@ again:
        }
 }
 
-void __blk_account_io_done(struct request *req, u64 now)
-{
-       const int sgrp = op_stat_group(req_op(req));
-
-       part_stat_lock();
-       update_io_ticks(req->part, jiffies, true);
-       part_stat_inc(req->part, ios[sgrp]);
-       part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
-       part_stat_unlock();
-}
-
-void __blk_account_io_start(struct request *rq)
-{
-       /* passthrough requests can hold bios that do not have ->bi_bdev set */
-       if (rq->bio && rq->bio->bi_bdev)
-               rq->part = rq->bio->bi_bdev;
-       else
-               rq->part = rq->rq_disk->part0;
-
-       part_stat_lock();
-       update_io_ticks(rq->part, jiffies, false);
-       part_stat_unlock();
-}
-
 static unsigned long __part_start_io_acct(struct block_device *part,
                                          unsigned int sectors, unsigned int op)
 {
index cd5f31c4d2fd6ecb081b738fc7aef5f9c3fe8580..3921c7cfd64c4a26efbb8ba8dd2742de5458eafe 100644 (file)
@@ -809,6 +809,48 @@ bool blk_update_request(struct request *req, blk_status_t error,
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
+static void __blk_account_io_done(struct request *req, u64 now)
+{
+       const int sgrp = op_stat_group(req_op(req));
+
+       part_stat_lock();
+       update_io_ticks(req->part, jiffies, true);
+       part_stat_inc(req->part, ios[sgrp]);
+       part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+       part_stat_unlock();
+}
+
+static inline void blk_account_io_done(struct request *req, u64 now)
+{
+       /*
+        * Account IO completion.  flush_rq isn't accounted as a
+        * normal IO on queueing nor completion.  Accounting the
+        * containing request is enough.
+        */
+       if (blk_do_io_stat(req) && req->part &&
+           !(req->rq_flags & RQF_FLUSH_SEQ))
+               __blk_account_io_done(req, now);
+}
+
+static void __blk_account_io_start(struct request *rq)
+{
+       /* passthrough requests can hold bios that do not have ->bi_bdev set */
+       if (rq->bio && rq->bio->bi_bdev)
+               rq->part = rq->bio->bi_bdev;
+       else
+               rq->part = rq->rq_disk->part0;
+
+       part_stat_lock();
+       update_io_ticks(rq->part, jiffies, false);
+       part_stat_unlock();
+}
+
+static inline void blk_account_io_start(struct request *req)
+{
+       if (blk_do_io_stat(req))
+               __blk_account_io_start(req);
+}
+
 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
 {
        if (rq->rq_flags & RQF_STATS) {
index 8a3761b6dc33d41a42812a76aea6fb3091234d31..50aae8c0e03ce266e37b68dcaf06254ec6b72ab0 100644 (file)
@@ -257,9 +257,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
                        struct bio *bio, unsigned int nr_segs);
 
-void __blk_account_io_start(struct request *req);
-void __blk_account_io_done(struct request *req, u64 now);
-
 /*
  * Plug flush limits
  */
@@ -350,23 +347,7 @@ static inline bool blk_do_io_stat(struct request *rq)
        return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
 }
 
-static inline void blk_account_io_done(struct request *req, u64 now)
-{
-       /*
-        * Account IO completion.  flush_rq isn't accounted as a
-        * normal IO on queueing nor completion.  Accounting the
-        * containing request is enough.
-        */
-       if (blk_do_io_stat(req) && req->part &&
-           !(req->rq_flags & RQF_FLUSH_SEQ))
-               __blk_account_io_done(req, now);
-}
-
-static inline void blk_account_io_start(struct request *req)
-{
-       if (blk_do_io_stat(req))
-               __blk_account_io_start(req);
-}
+void update_io_ticks(struct block_device *part, unsigned long now, bool end);
 
 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
 {