ANDROID: clock_gettime(CLOCK_BOOTTIME,) slows down >20x

clock_gettime(CLOCK_BOOTTIME,) slows down after significant
accumulation of suspend time creating a large offset between it and
CLOCK_MONOTONIC time.  The __iter_div_u64_rem() is only for the usage
of adding a few second+nanosecond times and saving cycles on more
expensive remainder and division operations, but iterates one second
at a time which quickly goes out of scale in CLOCK_BOOTTIME's case
since it was specified as nanoseconds only.

The fix is to split off seconds from the boot time and cap the
nanoseconds so that __iter_div_u64_rem does not iterate.

Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Bug: 72406285
Change-Id: Ia647ef1e76b7ba3b0c003028d4b3b955635adabb
Signed-off-by: khusika <khusikadhamar@gmail.com>
Signed-off-by: Carlos Jimenez (JavaShin-X) <javashin1986@gmail.com>
This commit is contained in:
Mark Salyzyn 2018-01-24 14:00:19 -08:00 committed by Cyber Knight
parent 12bb7b9cff
commit 8ee7b54c29
No known key found for this signature in database
GPG Key ID: 23BD4CCD326E9D64
6 changed files with 17 additions and 6 deletions

View File

@ -64,7 +64,8 @@ struct vdso_data {
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
u64 btm_nsec; /* monotonic to boot time */
u32 btm_sec; /* monotonic to boot time */
u32 btm_nsec;
/* Raw clocksource multipler */
u32 cs_raw_mult;
/* Raw time */

View File

@ -340,6 +340,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
if (!vdso_data->use_syscall) {
struct timespec btm = ktime_to_timespec(tk->offs_boot);
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
@ -350,7 +352,8 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot);
vdso_data->btm_sec = btm.tv_sec;
vdso_data->btm_nsec = btm.tv_nsec;
}
vdso_write_end(vdso_data);

View File

@ -45,7 +45,8 @@ struct vdso_data {
__u64 xtime_coarse_nsec;
__u64 wtm_clock_sec; /* Wall to monotonic time */
vdso_wtm_clock_nsec_t wtm_clock_nsec;
__u64 btm_nsec; /* monotonic to boot time */
__u32 btm_sec; /* monotonic to boot time */
__u32 btm_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
/* cs_* members must be adjacent and in this order (ldp accesses) */
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */

View File

@ -337,6 +337,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
struct timespec btm = ktime_to_timespec(tk->offs_boot);
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
@ -347,7 +349,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot);
vdso_data->btm_sec = btm.tv_sec;
vdso_data->btm_nsec = btm.tv_nsec;
}
smp_wmb();

View File

@ -23,6 +23,8 @@
#ifndef __VDSO_COMPILER_H
#define __VDSO_COMPILER_H
#include <generated/autoconf.h>
#undef CONFIG_64BIT
#include <asm/barrier.h> /* for isb() & dmb() */
#include <asm/param.h> /* for HZ */
#include <asm/unistd32.h>

View File

@ -252,7 +252,8 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd,
static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
{
u32 seq, mult, shift;
u64 nsec, cycle_last, wtm_nsec;
u64 nsec, cycle_last;
vdso_wtm_clock_nsec_t wtm_nsec;
#ifdef ARCH_CLOCK_FIXED_MASK
static const u64 mask = ARCH_CLOCK_FIXED_MASK;
#else
@ -277,7 +278,7 @@ static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
sec = vd->xtime_clock_sec;
nsec = vd->xtime_clock_snsec;
sec += vd->wtm_clock_sec;
sec += vd->wtm_clock_sec + vd->btm_sec;
wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
} while (unlikely(vdso_read_retry(vd, seq)));