]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/mm: Clean up the TLB flushing code
authorDave Hansen <dave.hansen@linux.intel.com>
Thu, 31 Jul 2014 15:40:54 +0000 (08:40 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Thu, 31 Jul 2014 15:48:50 +0000 (08:48 -0700)
The

if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)

line of code is not exactly the easiest to audit, especially when
it ends up at two different indentation levels.  This eliminates
one of the the copy-n-paste versions.  It also gives us a unified
exit point for each path through this function.  We need this in
a minute for our tracepoint.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: http://lkml.kernel.org/r/20140731154054.44F1CDDC@viggo.jf.intel.com
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/mm/tlb.c

index dd8dda167a242621515c901a3a5d62b4fcadf37b..378fbef279d27dabc7a3b1128340d4bfb0600542 100644 (file)
@@ -161,23 +161,24 @@ void flush_tlb_current_task(void)
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag)
 {
+       bool need_flush_others_all = true;
        unsigned long addr;
        unsigned act_entries, tlb_entries = 0;
        unsigned long nr_base_pages;
 
        preempt_disable();
        if (current->active_mm != mm)
-               goto flush_all;
+               goto out;
 
        if (!current->mm) {
                leave_mm(smp_processor_id());
-               goto flush_all;
+               goto out;
        }
 
        if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
                                        || vmflag & VM_HUGETLB) {
                local_flush_tlb();
-               goto flush_all;
+               goto out;
        }
 
        /* In modern CPU, last level tlb used for both data/ins */
@@ -196,22 +197,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
                local_flush_tlb();
        } else {
+               need_flush_others_all = false;
                /* flush range by one by one 'invlpg' */
                for (addr = start; addr < end;  addr += PAGE_SIZE) {
                        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
                        __flush_tlb_single(addr);
                }
-
-               if (cpumask_any_but(mm_cpumask(mm),
-                               smp_processor_id()) < nr_cpu_ids)
-                       flush_tlb_others(mm_cpumask(mm), mm, start, end);
-               preempt_enable();
-               return;
        }
-
-flush_all:
+out:
+       if (need_flush_others_all) {
+               start = 0UL;
+               end = TLB_FLUSH_ALL;
+       }
        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+               flush_tlb_others(mm_cpumask(mm), mm, start, end);
        preempt_enable();
 }