force_successful_syscall_return();
return addr;
}
+
+asmlinkage long
+ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
+{
+ /*
+ * ia64's clock_gettime() syscall is implemented as a vdso call
+ * fsys_clock_gettime(). Currently it handles only
+ * CLOCK_REALTIME and CLOCK_MONOTONIC. Both are based on
+ * 'ar.itc' counter which gets incremented at a constant
+ * frequency. It's usually 400MHz, ~2.5x times slower than CPU
+ * clock frequency. Which is almost a 1ns hrtimer, but not quite.
+ *
+ * Let's special-case these timers to report correct precision
+ * based on ITC frequency and not HZ frequency for supported
+ * clocks.
+ */
+ switch (which_clock) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
+ struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
+ return put_timespec64(&rtn_tp, tp);
+ }
+
+ return sys_clock_getres(which_clock, tp);
+}
228 common timer_delete sys_timer_delete
229 common clock_settime sys_clock_settime
230 common clock_gettime sys_clock_gettime
-231 common clock_getres sys_clock_getres
+231 common clock_getres ia64_clock_getres
232 common clock_nanosleep sys_clock_nanosleep
233 common fstatfs64 sys_fstatfs64
234 common statfs64 sys_statfs64