]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/swap: add helper swap_offset_available()
authorMiaohe Lin <linmiaohe@huawei.com>
Thu, 19 May 2022 21:08:52 +0000 (14:08 -0700)
committerakpm <akpm@linux-foundation.org>
Thu, 19 May 2022 21:08:52 +0000 (14:08 -0700)
Add helper swap_offset_available() to remove some duplicated codes.  Minor
readability improvement.

[akpm@linux-foundation.org: s/swap_offset_available/swap_offset_available_and_locked/, per Neil]
Link: https://lkml.kernel.org/r/20220509131416.17553-12-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swapfile.c

index f64b298f1034a1ab67186af076cc2bfe09c6440e..331aa0cc5b9e8e76f52ab03137cf827c6ec016c8 100644 (file)
@@ -775,6 +775,22 @@ static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
        this_cpu_write(*si->cluster_next_cpu, next);
 }
 
+static bool swap_offset_available_and_locked(struct swap_info_struct *si,
+                                            unsigned long offset)
+{
+       if (data_race(!si->swap_map[offset])) {
+               spin_lock(&si->lock);
+               return true;
+       }
+
+       if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
+               spin_lock(&si->lock);
+               return true;
+       }
+
+       return false;
+}
+
 static int scan_swap_map_slots(struct swap_info_struct *si,
                               unsigned char usage, int nr,
                               swp_entry_t slots[])
@@ -952,15 +968,8 @@ done:
 scan:
        spin_unlock(&si->lock);
        while (++offset <= READ_ONCE(si->highest_bit)) {
-               if (data_race(!si->swap_map[offset])) {
-                       spin_lock(&si->lock);
+               if (swap_offset_available_and_locked(si, offset))
                        goto checks;
-               }
-               if (vm_swap_full() &&
-                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
-                       spin_lock(&si->lock);
-                       goto checks;
-               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
@@ -969,15 +978,8 @@ scan:
        }
        offset = si->lowest_bit;
        while (offset < scan_base) {
-               if (data_race(!si->swap_map[offset])) {
-                       spin_lock(&si->lock);
+               if (swap_offset_available_and_locked(si, offset))
                        goto checks;
-               }
-               if (vm_swap_full() &&
-                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
-                       spin_lock(&si->lock);
-                       goto checks;
-               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;