]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dm mpath: simplify __must_push_back
authorMike Snitzer <snitzer@redhat.com>
Tue, 26 May 2020 20:06:56 +0000 (16:06 -0400)
committerMike Snitzer <snitzer@redhat.com>
Fri, 5 Jun 2020 18:59:55 +0000 (14:59 -0400)
Remove micro-optimization that infers device is between presuspend and
resume (was done purely to avoid call to dm_noflush_suspending, which
isn't expensive anyway).

Remove flags argument since they are no longer checked.

And remove must_push_back_bio() since it was simply a call to
__must_push_back().

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-mpath.c

index 95f16d816585061bab60a6eff04814462f3bfd1b..4c34d037aa35246f19be802fd9af3ea876651709 100644 (file)
@@ -457,33 +457,15 @@ do {                                                                      \
 /*
  * Check whether bios must be queued in the device-mapper core rather
  * than here in the target.
- *
- * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
- * the same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
  */
-static bool __must_push_back(struct multipath *m, unsigned long flags)
+static bool __must_push_back(struct multipath *m)
 {
-       return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
-                test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
-               dm_noflush_suspending(m->ti));
+       return dm_noflush_suspending(m->ti);
 }
 
-/*
- * Following functions use READ_ONCE to get atomic access to
- * all m->flags to avoid taking spinlock
- */
 static bool must_push_back_rq(struct multipath *m)
 {
-       unsigned long flags = READ_ONCE(m->flags);
-       return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
-       unsigned long flags = READ_ONCE(m->flags);
-       return __must_push_back(m, flags);
+       return test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m);
 }
 
 /*
@@ -620,7 +602,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
                return DM_MAPIO_SUBMITTED;
 
        if (!pgpath) {
-               if (must_push_back_bio(m))
+               if (__must_push_back(m))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
                return DM_MAPIO_KILL;
@@ -1642,7 +1624,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-               if (must_push_back_bio(m)) {
+               if (__must_push_back(m)) {
                        r = DM_ENDIO_REQUEUE;
                } else {
                        dm_report_EIO(m);