]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/vdso: Replace the clockid switch case
authorThomas Gleixner <tglx@linutronix.de>
Mon, 17 Sep 2018 12:45:41 +0000 (14:45 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 4 Oct 2018 21:00:26 +0000 (23:00 +0200)
Now that the time getter functions use the clockid as index into the
storage array for the base time access, the switch case can be replaced.

- Check for clockid >= MAX_CLOCKS and for negative clockid (CPU/FD) first
  and call the fallback function right away.

- After establishing that clockid is < MAX_CLOCKS, convert the clockid to a
  bitmask

- Check for the supported high resolution and coarse functions by anding
  the bitmask of supported clocks and check whether a bit is set.

This completely avoids jump tables, reduces the number of conditionals and
makes the VDSO extensible for other clock ids.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Matt Rickard <matt@softrans.com.au>
Cc: Stephen Boyd <sboyd@kernel.org>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: devel@linuxdriverproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Juergen Gross <jgross@suse.com>
Link: https://lkml.kernel.org/r/20180917130707.574315796@linutronix.de
arch/x86/entry/vdso/vclock_gettime.c

index b27dea0e23af058d599466849235e03ad36a3231..672e50e35d6c277d889915f1fe0ca2ea2b5dc563 100644 (file)
@@ -241,29 +241,27 @@ notrace static void do_coarse(clockid_t clk, struct timespec *ts)
 
 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 {
-       switch (clock) {
-       case CLOCK_REALTIME:
-               if (do_hres(CLOCK_REALTIME, ts) == VCLOCK_NONE)
-                       goto fallback;
-               break;
-       case CLOCK_MONOTONIC:
-               if (do_hres(CLOCK_MONOTONIC, ts) == VCLOCK_NONE)
-                       goto fallback;
-               break;
-       case CLOCK_REALTIME_COARSE:
-               do_coarse(CLOCK_REALTIME_COARSE, ts);
-               break;
-       case CLOCK_MONOTONIC_COARSE:
-               do_coarse(CLOCK_MONOTONIC_COARSE, ts);
-               break;
-       default:
-               goto fallback;
-       }
+       unsigned int msk;
 
-       return 0;
-fallback:
+       /* Sort out negative (CPU/FD) and invalid clocks */
+       if (unlikely((unsigned int) clock >= MAX_CLOCKS))
+               return vdso_fallback_gettime(clock, ts);
+
+       /*
+        * Convert the clockid to a bitmask and use it to check which
+        * clocks are handled in the VDSO directly.
+        */
+       msk = 1U << clock;
+       if (likely(msk & VGTOD_HRES)) {
+               if (do_hres(clock, ts) != VCLOCK_NONE)
+                       return 0;
+       } else if (msk & VGTOD_COARSE) {
+               do_coarse(clock, ts);
+               return 0;
+       }
        return vdso_fallback_gettime(clock, ts);
 }
+
 int clock_gettime(clockid_t, struct timespec *)
        __attribute__((weak, alias("__vdso_clock_gettime")));