]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dm cache policy smq: stop preemptively demoting blocks
authorJoe Thornber <ejt@redhat.com>
Thu, 11 May 2017 11:48:18 +0000 (07:48 -0400)
committerMike Snitzer <snitzer@redhat.com>
Mon, 15 May 2017 01:54:33 +0000 (21:54 -0400)
It causes a lot of churn if the working set's size is close to the fast
device's size.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-cache-policy-smq.c

index 54421a846a0c50737ccfc492b15a2c0ec700af0d..758480a1893dc46eb0b084f909a7087abe833d26 100644 (file)
@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
                percent_to_target(mq, CLEAN_TARGET);
 }
 
-static bool free_target_met(struct smq_policy *mq, bool idle)
+static bool free_target_met(struct smq_policy *mq)
 {
        unsigned nr_free;
 
-       if (!idle)
-               return true;
-
        nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
        return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
                percent_to_target(mq, FREE_TARGET);
@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
                 * We always claim to be 'idle' to ensure some demotions happen
                 * with continuous loads.
                 */
-               if (!free_target_met(mq, true))
+               if (!free_target_met(mq))
                        queue_demotion(mq);
                return;
        }
@@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
        spin_lock_irqsave(&mq->lock, flags);
        r = btracker_issue(mq->bg_work, result);
        if (r == -ENODATA) {
-               /* find some writeback work to do */
-               if (mq->migrations_allowed && !free_target_met(mq, idle))
-                       queue_demotion(mq);
-
-               else if (!clean_target_met(mq, idle))
+               if (!clean_target_met(mq, idle)) {
                        queue_writeback(mq);
-
-               r = btracker_issue(mq->bg_work, result);
+                       r = btracker_issue(mq->bg_work, result);
+               }
        }
        spin_unlock_irqrestore(&mq->lock, flags);