]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/swapfile.c: put_swap_page: share more between huge/normal code path
authorHuang Ying <ying.huang@intel.com>
Wed, 22 Aug 2018 04:52:29 +0000 (21:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Aug 2018 17:52:44 +0000 (10:52 -0700)
In this patch, locking related code is shared between huge/normal code
path in put_swap_page() to reduce code duplication. The `free_entries == 0`
case is merged into the more general `free_entries != SWAPFILE_CLUSTER`
case, because the new locking method makes it easy.

The added lines is same as the removed lines.  But the code size is
increased when CONFIG_TRANSPARENT_HUGEPAGE=n.

text    data     bss     dec     hex filename
base:        24123    2004     340   26467    6763 mm/swapfile.o
unified:       24485    2004     340   26829    68cd mm/swapfile.o

Dig on step deeper with `size -A mm/swapfile.o` for base and unified
kernel and compare the result, yields,

  -.text                                17723      0
  +.text                                17835      0
  -.orc_unwind_ip                        1380      0
  +.orc_unwind_ip                        1480      0
  -.orc_unwind                           2070      0
  +.orc_unwind                           2220      0
  -Total                                26686
  +Total                                27048

The total difference is the same.  The text segment difference is much
smaller: 112.  More difference comes from the ORC unwinder segments:
(1480 + 2220) - (1380 + 2070) = 250.  If the frame pointer unwinder is
used, this costs nothing.

Link: http://lkml.kernel.org/r/20180720071845.17920-9-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swapfile.c

index d44b2d60a66a6502a0b364af5381d92ef5e939f9..d954b71c4f9c2e842e142713e1a921addb6a4c9d 100644 (file)
@@ -1223,8 +1223,8 @@ void put_swap_page(struct page *page, swp_entry_t entry)
        if (!si)
                return;
 
+       ci = lock_cluster_or_swap_info(si, offset);
        if (size == SWAPFILE_CLUSTER) {
-               ci = lock_cluster(si, offset);
                VM_BUG_ON(!cluster_is_huge(ci));
                map = si->swap_map + offset;
                for (i = 0; i < SWAPFILE_CLUSTER; i++) {
@@ -1233,13 +1233,9 @@ void put_swap_page(struct page *page, swp_entry_t entry)
                        if (val == SWAP_HAS_CACHE)
                                free_entries++;
                }
-               if (!free_entries) {
-                       for (i = 0; i < SWAPFILE_CLUSTER; i++)
-                               map[i] &= ~SWAP_HAS_CACHE;
-               }
                cluster_clear_huge(ci);
-               unlock_cluster(ci);
                if (free_entries == SWAPFILE_CLUSTER) {
+                       unlock_cluster_or_swap_info(si, ci);
                        spin_lock(&si->lock);
                        ci = lock_cluster(si, offset);
                        memset(map, 0, SWAPFILE_CLUSTER);
@@ -1250,12 +1246,16 @@ void put_swap_page(struct page *page, swp_entry_t entry)
                        return;
                }
        }
-       if (size == 1 || free_entries) {
-               for (i = 0; i < size; i++, entry.val++) {
-                       if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
-                               free_swap_slot(entry);
+       for (i = 0; i < size; i++, entry.val++) {
+               if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       free_swap_slot(entry);
+                       if (i == size - 1)
+                               return;
+                       lock_cluster_or_swap_info(si, offset);
                }
        }
+       unlock_cluster_or_swap_info(si, ci);
 }
 
 #ifdef CONFIG_THP_SWAP