]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: factor out a blk_try_enter_queue helper
authorChristoph Hellwig <hch@lst.de>
Wed, 29 Sep 2021 07:12:38 +0000 (09:12 +0200)
committerJens Axboe <axboe@kernel.dk>
Sat, 16 Oct 2021 03:02:44 +0000 (21:02 -0600)
Factor out the code to try to get q_usage_counter without blocking into
a separate helper.  Both to improve code readability and to prepare for
splitting bio_queue_enter from blk_queue_enter.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20210929071241.934472-3-hch@lst.de
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c

index c071f1a90b1048a9f8c092d2437a38421568b4ab..7e9eadacf2dea2d53ac732faa1400f2a26afebec 100644 (file)
@@ -416,6 +416,30 @@ void blk_cleanup_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
 
+static bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+       rcu_read_lock();
+       if (!percpu_ref_tryget_live(&q->q_usage_counter))
+               goto fail;
+
+       /*
+        * The code that increments the pm_only counter must ensure that the
+        * counter is globally visible before the queue is unfrozen.
+        */
+       if (blk_queue_pm_only(q) &&
+           (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+               goto fail_put;
+
+       rcu_read_unlock();
+       return true;
+
+fail_put:
+       percpu_ref_put(&q->q_usage_counter);
+fail:
+       rcu_read_unlock();
+       return false;
+}
+
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
@@ -425,40 +449,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
        const bool pm = flags & BLK_MQ_REQ_PM;
 
-       while (true) {
-               bool success = false;
-
-               rcu_read_lock();
-               if (percpu_ref_tryget_live(&q->q_usage_counter)) {
-                       /*
-                        * The code that increments the pm_only counter is
-                        * responsible for ensuring that that counter is
-                        * globally visible before the queue is unfrozen.
-                        */
-                       if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
-                           !blk_queue_pm_only(q)) {
-                               success = true;
-                       } else {
-                               percpu_ref_put(&q->q_usage_counter);
-                       }
-               }
-               rcu_read_unlock();
-
-               if (success)
-                       return 0;
-
+       while (!blk_try_enter_queue(q, pm)) {
                if (flags & BLK_MQ_REQ_NOWAIT)
                        return -EBUSY;
 
                /*
-                * read pair of barrier in blk_freeze_queue_start(),
-                * we need to order reading __PERCPU_REF_DEAD flag of
-                * .q_usage_counter and reading .mq_freeze_depth or
-                * queue dying flag, otherwise the following wait may
-                * never return if the two reads are reordered.
+                * read pair of barrier in blk_freeze_queue_start(), we need to
+                * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+                * reading .mq_freeze_depth or queue dying flag, otherwise the
+                * following wait may never return if the two reads are
+                * reordered.
                 */
                smp_rmb();
-
                wait_event(q->mq_freeze_wq,
                           (!q->mq_freeze_depth &&
                            blk_pm_resume_queue(pm, q)) ||
@@ -466,6 +468,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                if (blk_queue_dying(q))
                        return -ENODEV;
        }
+
+       return 0;
 }
 
 static inline int bio_queue_enter(struct bio *bio)