1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 24c7ee8deSjohn stultz /* 34c7ee8deSjohn stultz * NTP state machine interfaces and logic. 44c7ee8deSjohn stultz * 54c7ee8deSjohn stultz * This code was mainly moved from kernel/timer.c and kernel/time.c 64c7ee8deSjohn stultz * Please see those files for relevant copyright info and historical 74c7ee8deSjohn stultz * changelogs. 84c7ee8deSjohn stultz */ 9aa0ac365SAlexey Dobriyan #include <linux/capability.h> 107dffa3c6SRoman Zippel #include <linux/clocksource.h> 11eb3f938fSMaciej W. Rozycki #include <linux/workqueue.h> 1253bbfa9eSIngo Molnar #include <linux/hrtimer.h> 1353bbfa9eSIngo Molnar #include <linux/jiffies.h> 1453bbfa9eSIngo Molnar #include <linux/math64.h> 1553bbfa9eSIngo Molnar #include <linux/timex.h> 1653bbfa9eSIngo Molnar #include <linux/time.h> 1753bbfa9eSIngo Molnar #include <linux/mm.h> 18025b40abSAlexander Gordeev #include <linux/module.h> 19023f333aSJason Gunthorpe #include <linux/rtc.h> 207e8eda73SOndrej Mosnacek #include <linux/audit.h> 214c7ee8deSjohn stultz 22aa6f9c59SJohn Stultz #include "ntp_internal.h" 230af86465SDengChao #include "timekeeping_internal.h" 240af86465SDengChao 2568f66f97SThomas Gleixner /** 2668f66f97SThomas Gleixner * struct ntp_data - Structure holding all NTP related state 2768f66f97SThomas Gleixner * @tick_usec: USER_HZ period in microseconds 28ec93ec22SThomas Gleixner * @tick_length: Adjusted tick length 29ec93ec22SThomas Gleixner * @tick_length_base: Base value for @tick_length 30bee18a23SThomas Gleixner * @time_state: State of the clock synchronization 31bee18a23SThomas Gleixner * @time_status: Clock status bits 32d5143554SThomas Gleixner * @time_offset: Time adjustment in nanoseconds 33d5143554SThomas Gleixner * @time_constant: PLL time constant 347891cf29SThomas Gleixner * @time_maxerror: Maximum error in microseconds holding the NTP sync distance 357891cf29SThomas Gleixner * (NTP dispersion + delay / 2) 367891cf29SThomas Gleixner * @time_esterror: Estimated error in microseconds holding NTP dispersion 37161b8ec2SThomas Gleixner * @time_freq: Frequency offset scaled nsecs/secs 38161b8ec2SThomas Gleixner * @time_reftime: Time at last adjustment in seconds 39*bb6400a2SThomas Gleixner * @time_adjust: Adjustment value 40*bb6400a2SThomas Gleixner * @ntp_tick_adj: Constant boot-param configurable NTP tick adjustment (upscaled) 41a076b214SJohn Stultz * 4268f66f97SThomas Gleixner * Protected by the timekeeping locks. 43b0ee7556SRoman Zippel */ 4468f66f97SThomas Gleixner struct ntp_data { 4568f66f97SThomas Gleixner unsigned long tick_usec; 46ec93ec22SThomas Gleixner u64 tick_length; 47ec93ec22SThomas Gleixner u64 tick_length_base; 48bee18a23SThomas Gleixner int time_state; 49bee18a23SThomas Gleixner int time_status; 50d5143554SThomas Gleixner s64 time_offset; 51d5143554SThomas Gleixner long time_constant; 527891cf29SThomas Gleixner long time_maxerror; 537891cf29SThomas Gleixner long time_esterror; 54161b8ec2SThomas Gleixner s64 time_freq; 55161b8ec2SThomas Gleixner time64_t time_reftime; 56*bb6400a2SThomas Gleixner long time_adjust; 57*bb6400a2SThomas Gleixner s64 ntp_tick_adj; 5868f66f97SThomas Gleixner }; 5953bbfa9eSIngo Molnar 6068f66f97SThomas Gleixner static struct ntp_data tk_ntp_data = { 6168f66f97SThomas Gleixner .tick_usec = USER_TICK_USEC, 62bee18a23SThomas Gleixner .time_state = TIME_OK, 63bee18a23SThomas Gleixner .time_status = STA_UNSYNC, 64d5143554SThomas Gleixner .time_constant = 2, 657891cf29SThomas Gleixner .time_maxerror = NTP_PHASE_LIMIT, 667891cf29SThomas Gleixner .time_esterror = NTP_PHASE_LIMIT, 6768f66f97SThomas Gleixner }; 6853bbfa9eSIngo Molnar 6990bf361cSJohn Stultz #define SECS_PER_DAY 86400 70bbd12676SIngo Molnar #define MAX_TICKADJ 500LL /* usecs */ 7153bbfa9eSIngo Molnar #define MAX_TICKADJ_SCALED \ 72bbd12676SIngo Molnar (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 73d897a4abSMiroslav Lichvar #define MAX_TAI_OFFSET 100000 744c7ee8deSjohn stultz 75833f32d7SJohn Stultz /* second value of the next pending leapsecond, or TIME64_MAX if no leap */ 76833f32d7SJohn Stultz static time64_t ntp_next_leap_sec = TIME64_MAX; 77833f32d7SJohn Stultz 78025b40abSAlexander Gordeev #ifdef CONFIG_NTP_PPS 79025b40abSAlexander Gordeev 80025b40abSAlexander Gordeev /* 81025b40abSAlexander Gordeev * The following variables are used when a pulse-per-second (PPS) signal 82025b40abSAlexander Gordeev * is available. They establish the engineering parameters of the clock 83025b40abSAlexander Gordeev * discipline loop when controlled by the PPS signal. 84025b40abSAlexander Gordeev */ 85025b40abSAlexander Gordeev #define PPS_VALID 10 /* PPS signal watchdog max (s) */ 86025b40abSAlexander Gordeev #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ 87025b40abSAlexander Gordeev #define PPS_INTMIN 2 /* min freq interval (s) (shift) */ 88025b40abSAlexander Gordeev #define PPS_INTMAX 8 /* max freq interval (s) (shift) */ 89025b40abSAlexander Gordeev #define PPS_INTCOUNT 4 /* number of consecutive good intervals to 90025b40abSAlexander Gordeev increase pps_shift or consecutive bad 91025b40abSAlexander Gordeev intervals to decrease it */ 92025b40abSAlexander Gordeev #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */ 93025b40abSAlexander Gordeev 94025b40abSAlexander Gordeev static int pps_valid; /* signal watchdog counter */ 95025b40abSAlexander Gordeev static long pps_tf[3]; /* phase median filter */ 96025b40abSAlexander Gordeev static long pps_jitter; /* current jitter (ns) */ 977ec88e4bSArnd Bergmann static struct timespec64 pps_fbase; /* beginning of the last freq interval */ 98025b40abSAlexander Gordeev static int pps_shift; /* current interval duration (s) (shift) */ 99025b40abSAlexander Gordeev static int pps_intcnt; /* interval counter */ 100025b40abSAlexander Gordeev static s64 pps_freq; /* frequency offset (scaled ns/s) */ 101025b40abSAlexander Gordeev static long pps_stabil; /* current stability (scaled ns/s) */ 102025b40abSAlexander Gordeev 103025b40abSAlexander Gordeev /* 104025b40abSAlexander Gordeev * PPS signal quality monitors 105025b40abSAlexander Gordeev */ 106025b40abSAlexander Gordeev static long pps_calcnt; /* calibration intervals */ 107025b40abSAlexander Gordeev static long pps_jitcnt; /* jitter limit exceeded */ 108025b40abSAlexander Gordeev static long pps_stbcnt; /* stability limit exceeded */ 109025b40abSAlexander Gordeev static long pps_errcnt; /* calibration errors */ 110025b40abSAlexander Gordeev 111025b40abSAlexander Gordeev 112a0581cdbSThomas Gleixner /* 113a0581cdbSThomas Gleixner * PPS kernel consumer compensates the whole phase error immediately. 114025b40abSAlexander Gordeev * Otherwise, reduce the offset by a fixed factor times the time constant. 115025b40abSAlexander Gordeev */ 116bee18a23SThomas Gleixner static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) 117025b40abSAlexander Gordeev { 118bee18a23SThomas Gleixner if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL) 119025b40abSAlexander Gordeev return offset; 120025b40abSAlexander Gordeev else 121d5143554SThomas Gleixner return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); 122025b40abSAlexander Gordeev } 123025b40abSAlexander Gordeev 124025b40abSAlexander Gordeev static inline void pps_reset_freq_interval(void) 125025b40abSAlexander Gordeev { 126a0581cdbSThomas Gleixner /* The PPS calibration interval may end surprisingly early */ 127025b40abSAlexander Gordeev pps_shift = PPS_INTMIN; 128025b40abSAlexander Gordeev pps_intcnt = 0; 129025b40abSAlexander Gordeev } 130025b40abSAlexander Gordeev 131025b40abSAlexander Gordeev /** 132025b40abSAlexander Gordeev * pps_clear - Clears the PPS state variables 133025b40abSAlexander Gordeev */ 134025b40abSAlexander Gordeev static inline void pps_clear(void) 135025b40abSAlexander Gordeev { 136025b40abSAlexander Gordeev pps_reset_freq_interval(); 137025b40abSAlexander Gordeev pps_tf[0] = 0; 138025b40abSAlexander Gordeev pps_tf[1] = 0; 139025b40abSAlexander Gordeev pps_tf[2] = 0; 140025b40abSAlexander Gordeev pps_fbase.tv_sec = pps_fbase.tv_nsec = 0; 141025b40abSAlexander Gordeev pps_freq = 0; 142025b40abSAlexander Gordeev } 143025b40abSAlexander Gordeev 144a0581cdbSThomas Gleixner /* 145a0581cdbSThomas Gleixner * Decrease pps_valid to indicate that another second has passed since the 146a0581cdbSThomas Gleixner * last PPS signal. When it reaches 0, indicate that PPS signal is missing. 147025b40abSAlexander Gordeev */ 148bee18a23SThomas Gleixner static inline void pps_dec_valid(struct ntp_data *ntpdata) 149025b40abSAlexander Gordeev { 150025b40abSAlexander Gordeev if (pps_valid > 0) 151025b40abSAlexander Gordeev pps_valid--; 152025b40abSAlexander Gordeev else { 153bee18a23SThomas Gleixner ntpdata->time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 154025b40abSAlexander Gordeev STA_PPSWANDER | STA_PPSERROR); 155025b40abSAlexander Gordeev pps_clear(); 156025b40abSAlexander Gordeev } 157025b40abSAlexander Gordeev } 158025b40abSAlexander Gordeev 159025b40abSAlexander Gordeev static inline void pps_set_freq(s64 freq) 160025b40abSAlexander Gordeev { 161025b40abSAlexander Gordeev pps_freq = freq; 162025b40abSAlexander Gordeev } 163025b40abSAlexander Gordeev 16448c3c65fSThomas Gleixner static inline bool is_error_status(int status) 165025b40abSAlexander Gordeev { 166ea54bca3SGeorge Spelvin return (status & (STA_UNSYNC|STA_CLOCKERR)) 167a0581cdbSThomas Gleixner /* 168a0581cdbSThomas Gleixner * PPS signal lost when either PPS time or PPS frequency 169a0581cdbSThomas Gleixner * synchronization requested 170025b40abSAlexander Gordeev */ 171ea54bca3SGeorge Spelvin || ((status & (STA_PPSFREQ|STA_PPSTIME)) 172ea54bca3SGeorge Spelvin && !(status & STA_PPSSIGNAL)) 173a0581cdbSThomas Gleixner /* 174a0581cdbSThomas Gleixner * PPS jitter exceeded when PPS time synchronization 175a0581cdbSThomas Gleixner * requested 176a0581cdbSThomas Gleixner */ 177ea54bca3SGeorge Spelvin || ((status & (STA_PPSTIME|STA_PPSJITTER)) 178025b40abSAlexander Gordeev == (STA_PPSTIME|STA_PPSJITTER)) 179a0581cdbSThomas Gleixner /* 180a0581cdbSThomas Gleixner * PPS wander exceeded or calibration error when PPS 181a0581cdbSThomas Gleixner * frequency synchronization requested 182025b40abSAlexander Gordeev */ 183ea54bca3SGeorge Spelvin || ((status & STA_PPSFREQ) 184ea54bca3SGeorge Spelvin && (status & (STA_PPSWANDER|STA_PPSERROR))); 185025b40abSAlexander Gordeev } 186025b40abSAlexander Gordeev 187bee18a23SThomas Gleixner static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) 188025b40abSAlexander Gordeev { 189025b40abSAlexander Gordeev txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) * 190025b40abSAlexander Gordeev PPM_SCALE_INV, NTP_SCALE_SHIFT); 191025b40abSAlexander Gordeev txc->jitter = pps_jitter; 192bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_NANO)) 193ead25417SDeepa Dinamani txc->jitter = pps_jitter / NSEC_PER_USEC; 194025b40abSAlexander Gordeev txc->shift = pps_shift; 195025b40abSAlexander Gordeev txc->stabil = pps_stabil; 196025b40abSAlexander Gordeev txc->jitcnt = pps_jitcnt; 197025b40abSAlexander Gordeev txc->calcnt = pps_calcnt; 198025b40abSAlexander Gordeev txc->errcnt = pps_errcnt; 199025b40abSAlexander Gordeev txc->stbcnt = pps_stbcnt; 200025b40abSAlexander Gordeev } 201025b40abSAlexander Gordeev 202025b40abSAlexander Gordeev #else /* !CONFIG_NTP_PPS */ 203025b40abSAlexander Gordeev 204d5143554SThomas Gleixner static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) 205025b40abSAlexander Gordeev { 206d5143554SThomas Gleixner return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); 207025b40abSAlexander Gordeev } 208025b40abSAlexander Gordeev 209025b40abSAlexander Gordeev static inline void pps_reset_freq_interval(void) {} 210025b40abSAlexander Gordeev static inline void pps_clear(void) {} 211bee18a23SThomas Gleixner static inline void pps_dec_valid(struct ntp_data *ntpdata) {} 212025b40abSAlexander Gordeev static inline void pps_set_freq(s64 freq) {} 213025b40abSAlexander Gordeev 21448c3c65fSThomas Gleixner static inline bool is_error_status(int status) 215025b40abSAlexander Gordeev { 216025b40abSAlexander Gordeev return status & (STA_UNSYNC|STA_CLOCKERR); 217025b40abSAlexander Gordeev } 218025b40abSAlexander Gordeev 219bee18a23SThomas Gleixner static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) 220025b40abSAlexander Gordeev { 221025b40abSAlexander Gordeev /* PPS is not implemented, so these are zero */ 222025b40abSAlexander Gordeev txc->ppsfreq = 0; 223025b40abSAlexander Gordeev txc->jitter = 0; 224025b40abSAlexander Gordeev txc->shift = 0; 225025b40abSAlexander Gordeev txc->stabil = 0; 226025b40abSAlexander Gordeev txc->jitcnt = 0; 227025b40abSAlexander Gordeev txc->calcnt = 0; 228025b40abSAlexander Gordeev txc->errcnt = 0; 229025b40abSAlexander Gordeev txc->stbcnt = 0; 230025b40abSAlexander Gordeev } 231025b40abSAlexander Gordeev 232025b40abSAlexander Gordeev #endif /* CONFIG_NTP_PPS */ 233025b40abSAlexander Gordeev 2349ce616aaSIngo Molnar /* 235a849a027SThomas Gleixner * Update tick_length and tick_length_base, based on tick_usec, ntp_tick_adj and 236a849a027SThomas Gleixner * time_freq: 2379ce616aaSIngo Molnar */ 23868f66f97SThomas Gleixner static void ntp_update_frequency(struct ntp_data *ntpdata) 23970bc42f9SAdrian Bunk { 24068f66f97SThomas Gleixner u64 second_length, new_base, tick_usec = (u64)ntpdata->tick_usec; 2419ce616aaSIngo Molnar 24268f66f97SThomas Gleixner second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << NTP_SCALE_SHIFT; 2439ce616aaSIngo Molnar 244*bb6400a2SThomas Gleixner second_length += ntpdata->ntp_tick_adj; 245161b8ec2SThomas Gleixner second_length += ntpdata->time_freq; 24670bc42f9SAdrian Bunk 247bc26c31dSIngo Molnar new_base = div_u64(second_length, NTP_INTERVAL_FREQ); 248fdcedf7bSjohn stultz 249fdcedf7bSjohn stultz /* 250a0581cdbSThomas Gleixner * Don't wait for the next second_overflow, apply the change to the 251a0581cdbSThomas Gleixner * tick length immediately: 252fdcedf7bSjohn stultz */ 253ec93ec22SThomas Gleixner ntpdata->tick_length += new_base - ntpdata->tick_length_base; 254ec93ec22SThomas Gleixner ntpdata->tick_length_base = new_base; 25570bc42f9SAdrian Bunk } 25670bc42f9SAdrian Bunk 257bee18a23SThomas Gleixner static inline s64 ntp_update_offset_fll(struct ntp_data *ntpdata, s64 offset64, long secs) 258f939890bSIngo Molnar { 259bee18a23SThomas Gleixner ntpdata->time_status &= ~STA_MODE; 260f939890bSIngo Molnar 261f939890bSIngo Molnar if (secs < MINSEC) 262478b7aabSIngo Molnar return 0; 263f939890bSIngo Molnar 264bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_FLL) && (secs <= MAXSEC)) 265478b7aabSIngo Molnar return 0; 266f939890bSIngo Molnar 267bee18a23SThomas Gleixner ntpdata->time_status |= STA_MODE; 268f939890bSIngo Molnar 269a078c6d0SSasha Levin return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); 270f939890bSIngo Molnar } 271f939890bSIngo Molnar 272bee18a23SThomas Gleixner static void ntp_update_offset(struct ntp_data *ntpdata, long offset) 273ee9851b2SRoman Zippel { 274136bccbcSThomas Gleixner s64 freq_adj, offset64; 275136bccbcSThomas Gleixner long secs, real_secs; 276ee9851b2SRoman Zippel 277bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_PLL)) 278ee9851b2SRoman Zippel return; 279ee9851b2SRoman Zippel 280bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_NANO)) { 28152d189f1SSasha Levin /* Make sure the multiplication below won't overflow */ 28252d189f1SSasha Levin offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); 2839f14f669SRoman Zippel offset *= NSEC_PER_USEC; 28452d189f1SSasha Levin } 285ee9851b2SRoman Zippel 286a0581cdbSThomas Gleixner /* Scale the phase adjustment and clamp to the operating range. */ 28752d189f1SSasha Levin offset = clamp(offset, -MAXPHASE, MAXPHASE); 288ee9851b2SRoman Zippel 289ee9851b2SRoman Zippel /* 290ee9851b2SRoman Zippel * Select how the frequency is to be controlled 291ee9851b2SRoman Zippel * and in which mode (PLL or FLL). 292ee9851b2SRoman Zippel */ 293136bccbcSThomas Gleixner real_secs = __ktime_get_real_seconds(); 294161b8ec2SThomas Gleixner secs = (long)(real_secs - ntpdata->time_reftime); 295bee18a23SThomas Gleixner if (unlikely(ntpdata->time_status & STA_FREQHOLD)) 296c7986acbSIngo Molnar secs = 0; 297c7986acbSIngo Molnar 298161b8ec2SThomas Gleixner ntpdata->time_reftime = real_secs; 299ee9851b2SRoman Zippel 300f939890bSIngo Molnar offset64 = offset; 301bee18a23SThomas Gleixner freq_adj = ntp_update_offset_fll(ntpdata, offset64, secs); 302f939890bSIngo Molnar 3038af3c153SMiroslav Lichvar /* 3048af3c153SMiroslav Lichvar * Clamp update interval to reduce PLL gain with low 3058af3c153SMiroslav Lichvar * sampling rate (e.g. intermittent network connection) 3068af3c153SMiroslav Lichvar * to avoid instability. 3078af3c153SMiroslav Lichvar */ 308d5143554SThomas Gleixner if (unlikely(secs > 1 << (SHIFT_PLL + 1 + ntpdata->time_constant))) 309d5143554SThomas Gleixner secs = 1 << (SHIFT_PLL + 1 + ntpdata->time_constant); 3108af3c153SMiroslav Lichvar 3118af3c153SMiroslav Lichvar freq_adj += (offset64 * secs) << 312d5143554SThomas Gleixner (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + ntpdata->time_constant)); 313f939890bSIngo Molnar 314161b8ec2SThomas Gleixner freq_adj = min(freq_adj + ntpdata->time_freq, MAXFREQ_SCALED); 315f939890bSIngo Molnar 316161b8ec2SThomas Gleixner ntpdata->time_freq = max(freq_adj, -MAXFREQ_SCALED); 3179f14f669SRoman Zippel 318d5143554SThomas Gleixner ntpdata->time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); 319ee9851b2SRoman Zippel } 320ee9851b2SRoman Zippel 32168f66f97SThomas Gleixner static void __ntp_clear(struct ntp_data *ntpdata) 322b0ee7556SRoman Zippel { 323a0581cdbSThomas Gleixner /* Stop active adjtime() */ 324*bb6400a2SThomas Gleixner ntpdata->time_adjust = 0; 325bee18a23SThomas Gleixner ntpdata->time_status |= STA_UNSYNC; 3267891cf29SThomas Gleixner ntpdata->time_maxerror = NTP_PHASE_LIMIT; 3277891cf29SThomas Gleixner ntpdata->time_esterror = NTP_PHASE_LIMIT; 328b0ee7556SRoman Zippel 32968f66f97SThomas Gleixner ntp_update_frequency(ntpdata); 330b0ee7556SRoman Zippel 331ec93ec22SThomas Gleixner ntpdata->tick_length = ntpdata->tick_length_base; 332d5143554SThomas Gleixner ntpdata->time_offset = 0; 333025b40abSAlexander Gordeev 334833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 335025b40abSAlexander Gordeev /* Clear PPS state variables */ 336025b40abSAlexander Gordeev pps_clear(); 337b0ee7556SRoman Zippel } 338b0ee7556SRoman Zippel 33968f66f97SThomas Gleixner /** 34068f66f97SThomas Gleixner * ntp_clear - Clears the NTP state variables 34168f66f97SThomas Gleixner */ 34268f66f97SThomas Gleixner void ntp_clear(void) 34368f66f97SThomas Gleixner { 34468f66f97SThomas Gleixner __ntp_clear(&tk_ntp_data); 34568f66f97SThomas Gleixner } 34668f66f97SThomas Gleixner 347ea7cf49aSJohn Stultz 348ea7cf49aSJohn Stultz u64 ntp_tick_length(void) 349ea7cf49aSJohn Stultz { 350ec93ec22SThomas Gleixner return tk_ntp_data.tick_length; 351ea7cf49aSJohn Stultz } 352ea7cf49aSJohn Stultz 353833f32d7SJohn Stultz /** 354833f32d7SJohn Stultz * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t 355833f32d7SJohn Stultz * 356833f32d7SJohn Stultz * Provides the time of the next leapsecond against CLOCK_REALTIME in 357833f32d7SJohn Stultz * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending. 358833f32d7SJohn Stultz */ 359833f32d7SJohn Stultz ktime_t ntp_get_next_leap(void) 360833f32d7SJohn Stultz { 361bee18a23SThomas Gleixner struct ntp_data *ntpdata = &tk_ntp_data; 362833f32d7SJohn Stultz ktime_t ret; 363833f32d7SJohn Stultz 364bee18a23SThomas Gleixner if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) 365833f32d7SJohn Stultz return ktime_set(ntp_next_leap_sec, 0); 3662456e855SThomas Gleixner ret = KTIME_MAX; 367833f32d7SJohn Stultz return ret; 368833f32d7SJohn Stultz } 369ea7cf49aSJohn Stultz 3704c7ee8deSjohn stultz /* 371a0581cdbSThomas Gleixner * This routine handles the overflow of the microsecond field 3724c7ee8deSjohn stultz * 3734c7ee8deSjohn stultz * The tricky bits of code to handle the accurate clock support 3744c7ee8deSjohn stultz * were provided by Dave Mills ([email protected]) of NTP fame. 3754c7ee8deSjohn stultz * They were originally developed for SUN and DEC kernels. 3764c7ee8deSjohn stultz * All the kudos should go to Dave for this stuff. 3776b43ae8aSJohn Stultz * 3786b43ae8aSJohn Stultz * Also handles leap second processing, and returns leap offset 3794c7ee8deSjohn stultz */ 380c7963487SDengChao int second_overflow(time64_t secs) 3814c7ee8deSjohn stultz { 382ec93ec22SThomas Gleixner struct ntp_data *ntpdata = &tk_ntp_data; 38339854fe8SIngo Molnar s64 delta; 3846b43ae8aSJohn Stultz int leap = 0; 385c7963487SDengChao s32 rem; 3864c7ee8deSjohn stultz 3876b43ae8aSJohn Stultz /* 3886b43ae8aSJohn Stultz * Leap second processing. If in leap-insert state at the end of the 3896b43ae8aSJohn Stultz * day, the system clock is set back one second; if in leap-delete 3906b43ae8aSJohn Stultz * state, the system clock is set ahead one second. 3916b43ae8aSJohn Stultz */ 392bee18a23SThomas Gleixner switch (ntpdata->time_state) { 3936b43ae8aSJohn Stultz case TIME_OK: 394bee18a23SThomas Gleixner if (ntpdata->time_status & STA_INS) { 395bee18a23SThomas Gleixner ntpdata->time_state = TIME_INS; 396c7963487SDengChao div_s64_rem(secs, SECS_PER_DAY, &rem); 397c7963487SDengChao ntp_next_leap_sec = secs + SECS_PER_DAY - rem; 398bee18a23SThomas Gleixner } else if (ntpdata->time_status & STA_DEL) { 399bee18a23SThomas Gleixner ntpdata->time_state = TIME_DEL; 400c7963487SDengChao div_s64_rem(secs + 1, SECS_PER_DAY, &rem); 401c7963487SDengChao ntp_next_leap_sec = secs + SECS_PER_DAY - rem; 402833f32d7SJohn Stultz } 4036b43ae8aSJohn Stultz break; 4046b43ae8aSJohn Stultz case TIME_INS: 405bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_INS)) { 406833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 407bee18a23SThomas Gleixner ntpdata->time_state = TIME_OK; 408c7963487SDengChao } else if (secs == ntp_next_leap_sec) { 4096b43ae8aSJohn Stultz leap = -1; 410bee18a23SThomas Gleixner ntpdata->time_state = TIME_OOP; 41138007dc0SAnna-Maria Behnsen pr_notice("Clock: inserting leap second 23:59:60 UTC\n"); 4126b43ae8aSJohn Stultz } 4136b43ae8aSJohn Stultz break; 4146b43ae8aSJohn Stultz case TIME_DEL: 415bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_DEL)) { 416833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 417bee18a23SThomas Gleixner ntpdata->time_state = TIME_OK; 418c7963487SDengChao } else if (secs == ntp_next_leap_sec) { 4196b43ae8aSJohn Stultz leap = 1; 420833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 421bee18a23SThomas Gleixner ntpdata->time_state = TIME_WAIT; 42238007dc0SAnna-Maria Behnsen pr_notice("Clock: deleting leap second 23:59:59 UTC\n"); 4236b43ae8aSJohn Stultz } 4246b43ae8aSJohn Stultz break; 4256b43ae8aSJohn Stultz case TIME_OOP: 426833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 427bee18a23SThomas Gleixner ntpdata->time_state = TIME_WAIT; 4286b43ae8aSJohn Stultz break; 4296b43ae8aSJohn Stultz case TIME_WAIT: 430bee18a23SThomas Gleixner if (!(ntpdata->time_status & (STA_INS | STA_DEL))) 431bee18a23SThomas Gleixner ntpdata->time_state = TIME_OK; 4326b43ae8aSJohn Stultz break; 4336b43ae8aSJohn Stultz } 4346b43ae8aSJohn Stultz 4354c7ee8deSjohn stultz /* Bump the maxerror field */ 4367891cf29SThomas Gleixner ntpdata->time_maxerror += MAXFREQ / NSEC_PER_USEC; 4377891cf29SThomas Gleixner if (ntpdata->time_maxerror > NTP_PHASE_LIMIT) { 4387891cf29SThomas Gleixner ntpdata->time_maxerror = NTP_PHASE_LIMIT; 439bee18a23SThomas Gleixner ntpdata->time_status |= STA_UNSYNC; 4404c7ee8deSjohn stultz } 4414c7ee8deSjohn stultz 442025b40abSAlexander Gordeev /* Compute the phase adjustment for the next second */ 443ec93ec22SThomas Gleixner ntpdata->tick_length = ntpdata->tick_length_base; 44439854fe8SIngo Molnar 445d5143554SThomas Gleixner delta = ntp_offset_chunk(ntpdata, ntpdata->time_offset); 446d5143554SThomas Gleixner ntpdata->time_offset -= delta; 447ec93ec22SThomas Gleixner ntpdata->tick_length += delta; 4488f807f8dSRoman Zippel 449025b40abSAlexander Gordeev /* Check PPS signal */ 450bee18a23SThomas Gleixner pps_dec_valid(ntpdata); 451025b40abSAlexander Gordeev 452*bb6400a2SThomas Gleixner if (!ntpdata->time_adjust) 453bd331268SJohn Stultz goto out; 4543c972c24SIngo Molnar 455*bb6400a2SThomas Gleixner if (ntpdata->time_adjust > MAX_TICKADJ) { 456*bb6400a2SThomas Gleixner ntpdata->time_adjust -= MAX_TICKADJ; 457ec93ec22SThomas Gleixner ntpdata->tick_length += MAX_TICKADJ_SCALED; 458bd331268SJohn Stultz goto out; 4593c972c24SIngo Molnar } 4603c972c24SIngo Molnar 461*bb6400a2SThomas Gleixner if (ntpdata->time_adjust < -MAX_TICKADJ) { 462*bb6400a2SThomas Gleixner ntpdata->time_adjust += MAX_TICKADJ; 463ec93ec22SThomas Gleixner ntpdata->tick_length -= MAX_TICKADJ_SCALED; 464bd331268SJohn Stultz goto out; 4653c972c24SIngo Molnar } 4663c972c24SIngo Molnar 467*bb6400a2SThomas Gleixner ntpdata->tick_length += (s64)(ntpdata->time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) 4683c972c24SIngo Molnar << NTP_SCALE_SHIFT; 469*bb6400a2SThomas Gleixner ntpdata->time_adjust = 0; 4706b43ae8aSJohn Stultz 471bd331268SJohn Stultz out: 4726b43ae8aSJohn Stultz return leap; 4734c7ee8deSjohn stultz } 4744c7ee8deSjohn stultz 475c9e6189fSThomas Gleixner #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) 4760f295b06SJason Gunthorpe static void sync_hw_clock(struct work_struct *work); 477c9e6189fSThomas Gleixner static DECLARE_WORK(sync_work, sync_hw_clock); 478c9e6189fSThomas Gleixner static struct hrtimer sync_hrtimer; 479e3fab2f3SGeert Uytterhoeven #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC) 4800f295b06SJason Gunthorpe 481c9e6189fSThomas Gleixner static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer) 4820f295b06SJason Gunthorpe { 48324c242ecSGeert Uytterhoeven queue_work(system_freezable_power_efficient_wq, &sync_work); 4840f295b06SJason Gunthorpe 485c9e6189fSThomas Gleixner return HRTIMER_NORESTART; 4860f295b06SJason Gunthorpe } 4870f295b06SJason Gunthorpe 488c9e6189fSThomas Gleixner static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry) 489c9e6189fSThomas Gleixner { 490c9e6189fSThomas Gleixner ktime_t exp = ktime_set(ktime_get_real_seconds(), 0); 4910f295b06SJason Gunthorpe 492c9e6189fSThomas Gleixner if (retry) 493e3fab2f3SGeert Uytterhoeven exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec); 494c9e6189fSThomas Gleixner else 495c9e6189fSThomas Gleixner exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec); 496c9e6189fSThomas Gleixner 497c9e6189fSThomas Gleixner hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS); 4980f295b06SJason Gunthorpe } 4990f295b06SJason Gunthorpe 50033e62e83SThomas Gleixner /* 50169eca258SThomas Gleixner * Check whether @now is correct versus the required time to update the RTC 50269eca258SThomas Gleixner * and calculate the value which needs to be written to the RTC so that the 50369eca258SThomas Gleixner * next seconds increment of the RTC after the write is aligned with the next 50469eca258SThomas Gleixner * seconds increment of clock REALTIME. 50533e62e83SThomas Gleixner * 50669eca258SThomas Gleixner * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds 50733e62e83SThomas Gleixner * 50869eca258SThomas Gleixner * t2.tv_nsec == 0 50969eca258SThomas Gleixner * tsched = t2 - set_offset_nsec 51069eca258SThomas Gleixner * newval = t2 - NSEC_PER_SEC 51169eca258SThomas Gleixner * 51269eca258SThomas Gleixner * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC 51369eca258SThomas Gleixner * 51469eca258SThomas Gleixner * As the execution of this code is not guaranteed to happen exactly at 51569eca258SThomas Gleixner * tsched this allows it to happen within a fuzzy region: 51669eca258SThomas Gleixner * 51769eca258SThomas Gleixner * abs(now - tsched) < FUZZ 51869eca258SThomas Gleixner * 51969eca258SThomas Gleixner * If @now is not inside the allowed window the function returns false. 52033e62e83SThomas Gleixner */ 52169eca258SThomas Gleixner static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, 52233e62e83SThomas Gleixner struct timespec64 *to_set, 52333e62e83SThomas Gleixner const struct timespec64 *now) 52433e62e83SThomas Gleixner { 5254bf07f65SIngo Molnar /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ 52633e62e83SThomas Gleixner const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; 52769eca258SThomas Gleixner struct timespec64 delay = {.tv_sec = -1, 52833e62e83SThomas Gleixner .tv_nsec = set_offset_nsec}; 52933e62e83SThomas Gleixner 53033e62e83SThomas Gleixner *to_set = timespec64_add(*now, delay); 53133e62e83SThomas Gleixner 53233e62e83SThomas Gleixner if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { 53333e62e83SThomas Gleixner to_set->tv_nsec = 0; 53433e62e83SThomas Gleixner return true; 53533e62e83SThomas Gleixner } 53633e62e83SThomas Gleixner 53733e62e83SThomas Gleixner if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { 53833e62e83SThomas Gleixner to_set->tv_sec++; 53933e62e83SThomas Gleixner to_set->tv_nsec = 0; 54033e62e83SThomas Gleixner return true; 54133e62e83SThomas Gleixner } 54233e62e83SThomas Gleixner return false; 54333e62e83SThomas Gleixner } 54433e62e83SThomas Gleixner 5453c00a1feSXunlei Pang #ifdef CONFIG_GENERIC_CMOS_UPDATE 5463c00a1feSXunlei Pang int __weak update_persistent_clock64(struct timespec64 now64) 5473c00a1feSXunlei Pang { 54892661788SArnd Bergmann return -ENODEV; 5493c00a1feSXunlei Pang } 55076e87d96SThomas Gleixner #else 55176e87d96SThomas Gleixner static inline int update_persistent_clock64(struct timespec64 now64) 55276e87d96SThomas Gleixner { 55376e87d96SThomas Gleixner return -ENODEV; 55476e87d96SThomas Gleixner } 5553c00a1feSXunlei Pang #endif 5563c00a1feSXunlei Pang 55776e87d96SThomas Gleixner #ifdef CONFIG_RTC_SYSTOHC 55876e87d96SThomas Gleixner /* Save NTP synchronized time to the RTC */ 55976e87d96SThomas Gleixner static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) 5604c7ee8deSjohn stultz { 56176e87d96SThomas Gleixner struct rtc_device *rtc; 56276e87d96SThomas Gleixner struct rtc_time tm; 56376e87d96SThomas Gleixner int err = -ENODEV; 5640f295b06SJason Gunthorpe 56576e87d96SThomas Gleixner rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); 56676e87d96SThomas Gleixner if (!rtc) 56776e87d96SThomas Gleixner return -ENODEV; 5680f295b06SJason Gunthorpe 56976e87d96SThomas Gleixner if (!rtc->ops || !rtc->ops->set_time) 57076e87d96SThomas Gleixner goto out_close; 57182644459SThomas Gleixner 57276e87d96SThomas Gleixner /* First call might not have the correct offset */ 57376e87d96SThomas Gleixner if (*offset_nsec == rtc->set_offset_nsec) { 57476e87d96SThomas Gleixner rtc_time64_to_tm(to_set->tv_sec, &tm); 57576e87d96SThomas Gleixner err = rtc_set_time(rtc, &tm); 57676e87d96SThomas Gleixner } else { 57776e87d96SThomas Gleixner /* Store the update offset and let the caller try again */ 57876e87d96SThomas Gleixner *offset_nsec = rtc->set_offset_nsec; 57976e87d96SThomas Gleixner err = -EAGAIN; 5800f295b06SJason Gunthorpe } 58176e87d96SThomas Gleixner out_close: 58276e87d96SThomas Gleixner rtc_class_close(rtc); 58376e87d96SThomas Gleixner return err; 584023f333aSJason Gunthorpe } 58576e87d96SThomas Gleixner #else 58676e87d96SThomas Gleixner static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) 58776e87d96SThomas Gleixner { 58876e87d96SThomas Gleixner return -ENODEV; 5894c7ee8deSjohn stultz } 59076e87d96SThomas Gleixner #endif 5910f295b06SJason Gunthorpe 59248c3c65fSThomas Gleixner /** 59348c3c65fSThomas Gleixner * ntp_synced - Tells whether the NTP status is not UNSYNC 59448c3c65fSThomas Gleixner * Returns: true if not UNSYNC, false otherwise 59548c3c65fSThomas Gleixner */ 59648c3c65fSThomas Gleixner static inline bool ntp_synced(void) 59748c3c65fSThomas Gleixner { 598bee18a23SThomas Gleixner return !(tk_ntp_data.time_status & STA_UNSYNC); 59948c3c65fSThomas Gleixner } 60048c3c65fSThomas Gleixner 6010f295b06SJason Gunthorpe /* 6020f295b06SJason Gunthorpe * If we have an externally synchronized Linux clock, then update RTC clock 6030f295b06SJason Gunthorpe * accordingly every ~11 minutes. Generally RTCs can only store second 6040f295b06SJason Gunthorpe * precision, but many RTCs will adjust the phase of their second tick to 6050f295b06SJason Gunthorpe * match the moment of update. This infrastructure arranges to call to the RTC 6060f295b06SJason Gunthorpe * set at the correct moment to phase synchronize the RTC second tick over 6070f295b06SJason Gunthorpe * with the kernel clock. 6080f295b06SJason Gunthorpe */ 6090f295b06SJason Gunthorpe static void sync_hw_clock(struct work_struct *work) 6100f295b06SJason Gunthorpe { 611c9e6189fSThomas Gleixner /* 61276e87d96SThomas Gleixner * The default synchronization offset is 500ms for the deprecated 61376e87d96SThomas Gleixner * update_persistent_clock64() under the assumption that it uses 61476e87d96SThomas Gleixner * the infamous CMOS clock (MC146818). 61576e87d96SThomas Gleixner */ 61676e87d96SThomas Gleixner static unsigned long offset_nsec = NSEC_PER_SEC / 2; 61776e87d96SThomas Gleixner struct timespec64 now, to_set; 61876e87d96SThomas Gleixner int res = -EAGAIN; 61976e87d96SThomas Gleixner 62076e87d96SThomas Gleixner /* 621c9e6189fSThomas Gleixner * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer() 622c9e6189fSThomas Gleixner * managed to schedule the work between the timer firing and the 623c9e6189fSThomas Gleixner * work being able to rearm the timer. Wait for the timer to expire. 624c9e6189fSThomas Gleixner */ 625c9e6189fSThomas Gleixner if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer)) 6260f295b06SJason Gunthorpe return; 6270f295b06SJason Gunthorpe 62876e87d96SThomas Gleixner ktime_get_real_ts64(&now); 62976e87d96SThomas Gleixner /* If @now is not in the allowed window, try again */ 63076e87d96SThomas Gleixner if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now)) 63176e87d96SThomas Gleixner goto rearm; 6320f295b06SJason Gunthorpe 63376e87d96SThomas Gleixner /* Take timezone adjusted RTCs into account */ 63476e87d96SThomas Gleixner if (persistent_clock_is_local) 63576e87d96SThomas Gleixner to_set.tv_sec -= (sys_tz.tz_minuteswest * 60); 63676e87d96SThomas Gleixner 63776e87d96SThomas Gleixner /* Try the legacy RTC first. */ 63876e87d96SThomas Gleixner res = update_persistent_clock64(to_set); 63976e87d96SThomas Gleixner if (res != -ENODEV) 64076e87d96SThomas Gleixner goto rearm; 64176e87d96SThomas Gleixner 64276e87d96SThomas Gleixner /* Try the RTC class */ 64376e87d96SThomas Gleixner res = update_rtc(&to_set, &offset_nsec); 64476e87d96SThomas Gleixner if (res == -ENODEV) 64576e87d96SThomas Gleixner return; 64676e87d96SThomas Gleixner rearm: 64776e87d96SThomas Gleixner sched_sync_hw_clock(offset_nsec, res != 0); 64882644459SThomas Gleixner } 64982644459SThomas Gleixner 65035b603f8SBenjamin ROBIN void ntp_notify_cmos_timer(bool offset_set) 65182644459SThomas Gleixner { 652c9e6189fSThomas Gleixner /* 65335b603f8SBenjamin ROBIN * If the time jumped (using ADJ_SETOFFSET) cancels sync timer, 65435b603f8SBenjamin ROBIN * which may have been running if the time was synchronized 65535b603f8SBenjamin ROBIN * prior to the ADJ_SETOFFSET call. 65635b603f8SBenjamin ROBIN */ 65735b603f8SBenjamin ROBIN if (offset_set) 65835b603f8SBenjamin ROBIN hrtimer_cancel(&sync_hrtimer); 65935b603f8SBenjamin ROBIN 66035b603f8SBenjamin ROBIN /* 661c9e6189fSThomas Gleixner * When the work is currently executed but has not yet the timer 662c9e6189fSThomas Gleixner * rearmed this queues the work immediately again. No big issue, 663c9e6189fSThomas Gleixner * just a pointless work scheduled. 664c9e6189fSThomas Gleixner */ 665c9e6189fSThomas Gleixner if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer)) 66624c242ecSGeert Uytterhoeven queue_work(system_freezable_power_efficient_wq, &sync_work); 66782644459SThomas Gleixner } 66882644459SThomas Gleixner 669c9e6189fSThomas Gleixner static void __init ntp_init_cmos_sync(void) 670c9e6189fSThomas Gleixner { 671c9e6189fSThomas Gleixner hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 672c9e6189fSThomas Gleixner sync_hrtimer.function = sync_timer_callback; 673c9e6189fSThomas Gleixner } 674c9e6189fSThomas Gleixner #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ 675c9e6189fSThomas Gleixner static inline void __init ntp_init_cmos_sync(void) { } 676c9e6189fSThomas Gleixner #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ 677c9e6189fSThomas Gleixner 67880f22571SIngo Molnar /* 67980f22571SIngo Molnar * Propagate a new txc->status value into the NTP state: 68080f22571SIngo Molnar */ 681bee18a23SThomas Gleixner static inline void process_adj_status(struct ntp_data *ntpdata, const struct __kernel_timex *txc) 68280f22571SIngo Molnar { 683bee18a23SThomas Gleixner if ((ntpdata->time_status & STA_PLL) && !(txc->status & STA_PLL)) { 684bee18a23SThomas Gleixner ntpdata->time_state = TIME_OK; 685bee18a23SThomas Gleixner ntpdata->time_status = STA_UNSYNC; 686833f32d7SJohn Stultz ntp_next_leap_sec = TIME64_MAX; 687a0581cdbSThomas Gleixner /* Restart PPS frequency calibration */ 688025b40abSAlexander Gordeev pps_reset_freq_interval(); 68980f22571SIngo Molnar } 69080f22571SIngo Molnar 69180f22571SIngo Molnar /* 69280f22571SIngo Molnar * If we turn on PLL adjustments then reset the 69380f22571SIngo Molnar * reference time to current time. 69480f22571SIngo Molnar */ 695bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_PLL) && (txc->status & STA_PLL)) 696161b8ec2SThomas Gleixner ntpdata->time_reftime = __ktime_get_real_seconds(); 69780f22571SIngo Molnar 698bee18a23SThomas Gleixner /* only set allowed bits */ 699bee18a23SThomas Gleixner ntpdata->time_status &= STA_RONLY; 700bee18a23SThomas Gleixner ntpdata->time_status |= txc->status & ~STA_RONLY; 70180f22571SIngo Molnar } 702cd5398beSRichard Cochran 70368f66f97SThomas Gleixner static inline void process_adjtimex_modes(struct ntp_data *ntpdata, const struct __kernel_timex *txc, 704ead25417SDeepa Dinamani s32 *time_tai) 70580f22571SIngo Molnar { 70680f22571SIngo Molnar if (txc->modes & ADJ_STATUS) 707bee18a23SThomas Gleixner process_adj_status(ntpdata, txc); 70880f22571SIngo Molnar 70980f22571SIngo Molnar if (txc->modes & ADJ_NANO) 710bee18a23SThomas Gleixner ntpdata->time_status |= STA_NANO; 711e9629165SIngo Molnar 71280f22571SIngo Molnar if (txc->modes & ADJ_MICRO) 713bee18a23SThomas Gleixner ntpdata->time_status &= ~STA_NANO; 71480f22571SIngo Molnar 71580f22571SIngo Molnar if (txc->modes & ADJ_FREQUENCY) { 716161b8ec2SThomas Gleixner ntpdata->time_freq = txc->freq * PPM_SCALE; 717161b8ec2SThomas Gleixner ntpdata->time_freq = min(ntpdata->time_freq, MAXFREQ_SCALED); 718161b8ec2SThomas Gleixner ntpdata->time_freq = max(ntpdata->time_freq, -MAXFREQ_SCALED); 719a0581cdbSThomas Gleixner /* Update pps_freq */ 720161b8ec2SThomas Gleixner pps_set_freq(ntpdata->time_freq); 72180f22571SIngo Molnar } 72280f22571SIngo Molnar 72380f22571SIngo Molnar if (txc->modes & ADJ_MAXERROR) 7247891cf29SThomas Gleixner ntpdata->time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); 725e9629165SIngo Molnar 72680f22571SIngo Molnar if (txc->modes & ADJ_ESTERROR) 7277891cf29SThomas Gleixner ntpdata->time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); 72880f22571SIngo Molnar 72980f22571SIngo Molnar if (txc->modes & ADJ_TIMECONST) { 730d5143554SThomas Gleixner ntpdata->time_constant = clamp(txc->constant, 0, MAXTC); 731bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_NANO)) 732d5143554SThomas Gleixner ntpdata->time_constant += 4; 733d5143554SThomas Gleixner ntpdata->time_constant = clamp(ntpdata->time_constant, 0, MAXTC); 73480f22571SIngo Molnar } 73580f22571SIngo Molnar 736bee18a23SThomas Gleixner if (txc->modes & ADJ_TAI && txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET) 737cc244ddaSJohn Stultz *time_tai = txc->constant; 73880f22571SIngo Molnar 73980f22571SIngo Molnar if (txc->modes & ADJ_OFFSET) 740bee18a23SThomas Gleixner ntp_update_offset(ntpdata, txc->offset); 741e9629165SIngo Molnar 74280f22571SIngo Molnar if (txc->modes & ADJ_TICK) 74368f66f97SThomas Gleixner ntpdata->tick_usec = txc->tick; 74480f22571SIngo Molnar 74580f22571SIngo Molnar if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 74668f66f97SThomas Gleixner ntp_update_frequency(ntpdata); 74780f22571SIngo Molnar } 74880f22571SIngo Molnar 749ad460967SJohn Stultz /* 750a0581cdbSThomas Gleixner * adjtimex() mainly allows reading (and writing, if superuser) of 751ad460967SJohn Stultz * kernel time-keeping variables. used by xntpd. 752ad460967SJohn Stultz */ 753ead25417SDeepa Dinamani int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, 7547e8eda73SOndrej Mosnacek s32 *time_tai, struct audit_ntp_data *ad) 755ad460967SJohn Stultz { 75668f66f97SThomas Gleixner struct ntp_data *ntpdata = &tk_ntp_data; 757ad460967SJohn Stultz int result; 758ad460967SJohn Stultz 759916c7a85SRoman Zippel if (txc->modes & ADJ_ADJTIME) { 760*bb6400a2SThomas Gleixner long save_adjust = ntpdata->time_adjust; 761916c7a85SRoman Zippel 762916c7a85SRoman Zippel if (!(txc->modes & ADJ_OFFSET_READONLY)) { 763916c7a85SRoman Zippel /* adjtime() is independent from ntp_adjtime() */ 764*bb6400a2SThomas Gleixner ntpdata->time_adjust = txc->offset; 76568f66f97SThomas Gleixner ntp_update_frequency(ntpdata); 7667e8eda73SOndrej Mosnacek 7677e8eda73SOndrej Mosnacek audit_ntp_set_old(ad, AUDIT_NTP_ADJUST, save_adjust); 768*bb6400a2SThomas Gleixner audit_ntp_set_new(ad, AUDIT_NTP_ADJUST, ntpdata->time_adjust); 769916c7a85SRoman Zippel } 770916c7a85SRoman Zippel txc->offset = save_adjust; 771e9629165SIngo Molnar } else { 77280f22571SIngo Molnar /* If there are input parameters, then process them: */ 7737e8eda73SOndrej Mosnacek if (txc->modes) { 774d5143554SThomas Gleixner audit_ntp_set_old(ad, AUDIT_NTP_OFFSET, ntpdata->time_offset); 775161b8ec2SThomas Gleixner audit_ntp_set_old(ad, AUDIT_NTP_FREQ, ntpdata->time_freq); 776bee18a23SThomas Gleixner audit_ntp_set_old(ad, AUDIT_NTP_STATUS, ntpdata->time_status); 7777e8eda73SOndrej Mosnacek audit_ntp_set_old(ad, AUDIT_NTP_TAI, *time_tai); 77868f66f97SThomas Gleixner audit_ntp_set_old(ad, AUDIT_NTP_TICK, ntpdata->tick_usec); 7797e8eda73SOndrej Mosnacek 78068f66f97SThomas Gleixner process_adjtimex_modes(ntpdata, txc, time_tai); 781eea83d89SRoman Zippel 782d5143554SThomas Gleixner audit_ntp_set_new(ad, AUDIT_NTP_OFFSET, ntpdata->time_offset); 783161b8ec2SThomas Gleixner audit_ntp_set_new(ad, AUDIT_NTP_FREQ, ntpdata->time_freq); 784bee18a23SThomas Gleixner audit_ntp_set_new(ad, AUDIT_NTP_STATUS, ntpdata->time_status); 7857e8eda73SOndrej Mosnacek audit_ntp_set_new(ad, AUDIT_NTP_TAI, *time_tai); 78668f66f97SThomas Gleixner audit_ntp_set_new(ad, AUDIT_NTP_TICK, ntpdata->tick_usec); 7877e8eda73SOndrej Mosnacek } 7887e8eda73SOndrej Mosnacek 789d5143554SThomas Gleixner txc->offset = shift_right(ntpdata->time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); 790bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_NANO)) 791ead25417SDeepa Dinamani txc->offset = (u32)txc->offset / NSEC_PER_USEC; 792e9629165SIngo Molnar } 793916c7a85SRoman Zippel 794bee18a23SThomas Gleixner result = ntpdata->time_state; 795bee18a23SThomas Gleixner if (is_error_status(ntpdata->time_status)) 796916c7a85SRoman Zippel result = TIME_ERROR; 797916c7a85SRoman Zippel 798161b8ec2SThomas Gleixner txc->freq = shift_right((ntpdata->time_freq >> PPM_SCALE_INV_SHIFT) * 7992b9d1496SIngo Molnar PPM_SCALE_INV, NTP_SCALE_SHIFT); 8007891cf29SThomas Gleixner txc->maxerror = ntpdata->time_maxerror; 8017891cf29SThomas Gleixner txc->esterror = ntpdata->time_esterror; 802bee18a23SThomas Gleixner txc->status = ntpdata->time_status; 803d5143554SThomas Gleixner txc->constant = ntpdata->time_constant; 80470bc42f9SAdrian Bunk txc->precision = 1; 805074b3b87SRoman Zippel txc->tolerance = MAXFREQ_SCALED / PPM_SCALE; 80668f66f97SThomas Gleixner txc->tick = ntpdata->tick_usec; 80787ace39bSJohn Stultz txc->tai = *time_tai; 8084c7ee8deSjohn stultz 809a0581cdbSThomas Gleixner /* Fill PPS status fields */ 810bee18a23SThomas Gleixner pps_fill_timex(ntpdata, txc); 811e9629165SIngo Molnar 8122f584134SArnd Bergmann txc->time.tv_sec = ts->tv_sec; 81387ace39bSJohn Stultz txc->time.tv_usec = ts->tv_nsec; 814bee18a23SThomas Gleixner if (!(ntpdata->time_status & STA_NANO)) 815ead25417SDeepa Dinamani txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC; 816ee9851b2SRoman Zippel 81796efdcf2SJohn Stultz /* Handle leapsec adjustments */ 81896efdcf2SJohn Stultz if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) { 819bee18a23SThomas Gleixner if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) { 82096efdcf2SJohn Stultz result = TIME_OOP; 82196efdcf2SJohn Stultz txc->tai++; 82296efdcf2SJohn Stultz txc->time.tv_sec--; 82396efdcf2SJohn Stultz } 824bee18a23SThomas Gleixner if ((ntpdata->time_state == TIME_DEL) && (ntpdata->time_status & STA_DEL)) { 82596efdcf2SJohn Stultz result = TIME_WAIT; 82696efdcf2SJohn Stultz txc->tai--; 82796efdcf2SJohn Stultz txc->time.tv_sec++; 82896efdcf2SJohn Stultz } 829bee18a23SThomas Gleixner if ((ntpdata->time_state == TIME_OOP) && (ts->tv_sec == ntp_next_leap_sec)) 83096efdcf2SJohn Stultz result = TIME_WAIT; 83196efdcf2SJohn Stultz } 83296efdcf2SJohn Stultz 833ee9851b2SRoman Zippel return result; 8344c7ee8deSjohn stultz } 83510a398d0SRoman Zippel 836025b40abSAlexander Gordeev #ifdef CONFIG_NTP_PPS 837025b40abSAlexander Gordeev 838a0581cdbSThomas Gleixner /* 839a0581cdbSThomas Gleixner * struct pps_normtime is basically a struct timespec, but it is 840025b40abSAlexander Gordeev * semantically different (and it is the reason why it was invented): 841025b40abSAlexander Gordeev * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] 842a0581cdbSThomas Gleixner * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) 843a0581cdbSThomas Gleixner */ 844025b40abSAlexander Gordeev struct pps_normtime { 8457ec88e4bSArnd Bergmann s64 sec; /* seconds */ 846025b40abSAlexander Gordeev long nsec; /* nanoseconds */ 847025b40abSAlexander Gordeev }; 848025b40abSAlexander Gordeev 849a0581cdbSThomas Gleixner /* 850a0581cdbSThomas Gleixner * Normalize the timestamp so that nsec is in the 851a0581cdbSThomas Gleixner * [ -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval 852a0581cdbSThomas Gleixner */ 8537ec88e4bSArnd Bergmann static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) 854025b40abSAlexander Gordeev { 855025b40abSAlexander Gordeev struct pps_normtime norm = { 856025b40abSAlexander Gordeev .sec = ts.tv_sec, 857025b40abSAlexander Gordeev .nsec = ts.tv_nsec 858025b40abSAlexander Gordeev }; 859025b40abSAlexander Gordeev 860025b40abSAlexander Gordeev if (norm.nsec > (NSEC_PER_SEC >> 1)) { 861025b40abSAlexander Gordeev norm.nsec -= NSEC_PER_SEC; 862025b40abSAlexander Gordeev norm.sec++; 863025b40abSAlexander Gordeev } 864025b40abSAlexander Gordeev 865025b40abSAlexander Gordeev return norm; 866025b40abSAlexander Gordeev } 867025b40abSAlexander Gordeev 868a0581cdbSThomas Gleixner /* Get current phase correction and jitter */ 869025b40abSAlexander Gordeev static inline long pps_phase_filter_get(long *jitter) 870025b40abSAlexander Gordeev { 871025b40abSAlexander Gordeev *jitter = pps_tf[0] - pps_tf[1]; 872025b40abSAlexander Gordeev if (*jitter < 0) 873025b40abSAlexander Gordeev *jitter = -*jitter; 874025b40abSAlexander Gordeev 875025b40abSAlexander Gordeev /* TODO: test various filters */ 876025b40abSAlexander Gordeev return pps_tf[0]; 877025b40abSAlexander Gordeev } 878025b40abSAlexander Gordeev 879a0581cdbSThomas Gleixner /* Add the sample to the phase filter */ 880025b40abSAlexander Gordeev static inline void pps_phase_filter_add(long err) 881025b40abSAlexander Gordeev { 882025b40abSAlexander Gordeev pps_tf[2] = pps_tf[1]; 883025b40abSAlexander Gordeev pps_tf[1] = pps_tf[0]; 884025b40abSAlexander Gordeev pps_tf[0] = err; 885025b40abSAlexander Gordeev } 886025b40abSAlexander Gordeev 887a0581cdbSThomas Gleixner /* 888a0581cdbSThomas Gleixner * Decrease frequency calibration interval length. It is halved after four 889a0581cdbSThomas Gleixner * consecutive unstable intervals. 890025b40abSAlexander Gordeev */ 891025b40abSAlexander Gordeev static inline void pps_dec_freq_interval(void) 892025b40abSAlexander Gordeev { 893025b40abSAlexander Gordeev if (--pps_intcnt <= -PPS_INTCOUNT) { 894025b40abSAlexander Gordeev pps_intcnt = -PPS_INTCOUNT; 895025b40abSAlexander Gordeev if (pps_shift > PPS_INTMIN) { 896025b40abSAlexander Gordeev pps_shift--; 897025b40abSAlexander Gordeev pps_intcnt = 0; 898025b40abSAlexander Gordeev } 899025b40abSAlexander Gordeev } 900025b40abSAlexander Gordeev } 901025b40abSAlexander Gordeev 902a0581cdbSThomas Gleixner /* 903a0581cdbSThomas Gleixner * Increase frequency calibration interval length. It is doubled after 904a0581cdbSThomas Gleixner * four consecutive stable intervals. 905025b40abSAlexander Gordeev */ 906025b40abSAlexander Gordeev static inline void pps_inc_freq_interval(void) 907025b40abSAlexander Gordeev { 908025b40abSAlexander Gordeev if (++pps_intcnt >= PPS_INTCOUNT) { 909025b40abSAlexander Gordeev pps_intcnt = PPS_INTCOUNT; 910025b40abSAlexander Gordeev if (pps_shift < PPS_INTMAX) { 911025b40abSAlexander Gordeev pps_shift++; 912025b40abSAlexander Gordeev pps_intcnt = 0; 913025b40abSAlexander Gordeev } 914025b40abSAlexander Gordeev } 915025b40abSAlexander Gordeev } 916025b40abSAlexander Gordeev 917a0581cdbSThomas Gleixner /* 918a0581cdbSThomas Gleixner * Update clock frequency based on MONOTONIC_RAW clock PPS signal 919025b40abSAlexander Gordeev * timestamps 920025b40abSAlexander Gordeev * 921025b40abSAlexander Gordeev * At the end of the calibration interval the difference between the 922025b40abSAlexander Gordeev * first and last MONOTONIC_RAW clock timestamps divided by the length 923025b40abSAlexander Gordeev * of the interval becomes the frequency update. If the interval was 924025b40abSAlexander Gordeev * too long, the data are discarded. 925025b40abSAlexander Gordeev * Returns the difference between old and new frequency values. 926025b40abSAlexander Gordeev */ 92768f66f97SThomas Gleixner static long hardpps_update_freq(struct ntp_data *ntpdata, struct pps_normtime freq_norm) 928025b40abSAlexander Gordeev { 929025b40abSAlexander Gordeev long delta, delta_mod; 930025b40abSAlexander Gordeev s64 ftemp; 931025b40abSAlexander Gordeev 932a0581cdbSThomas Gleixner /* Check if the frequency interval was too long */ 933025b40abSAlexander Gordeev if (freq_norm.sec > (2 << pps_shift)) { 934bee18a23SThomas Gleixner ntpdata->time_status |= STA_PPSERROR; 935025b40abSAlexander Gordeev pps_errcnt++; 936025b40abSAlexander Gordeev pps_dec_freq_interval(); 93738007dc0SAnna-Maria Behnsen printk_deferred(KERN_ERR "hardpps: PPSERROR: interval too long - %lld s\n", 938025b40abSAlexander Gordeev freq_norm.sec); 939025b40abSAlexander Gordeev return 0; 940025b40abSAlexander Gordeev } 941025b40abSAlexander Gordeev 942a0581cdbSThomas Gleixner /* 943a0581cdbSThomas Gleixner * Here the raw frequency offset and wander (stability) is 944a0581cdbSThomas Gleixner * calculated. If the wander is less than the wander threshold the 945a0581cdbSThomas Gleixner * interval is increased; otherwise it is decreased. 946025b40abSAlexander Gordeev */ 947025b40abSAlexander Gordeev ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, 948025b40abSAlexander Gordeev freq_norm.sec); 949025b40abSAlexander Gordeev delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); 950025b40abSAlexander Gordeev pps_freq = ftemp; 951025b40abSAlexander Gordeev if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { 95238007dc0SAnna-Maria Behnsen printk_deferred(KERN_WARNING "hardpps: PPSWANDER: change=%ld\n", delta); 953bee18a23SThomas Gleixner ntpdata->time_status |= STA_PPSWANDER; 954025b40abSAlexander Gordeev pps_stbcnt++; 955025b40abSAlexander Gordeev pps_dec_freq_interval(); 956a0581cdbSThomas Gleixner } else { 957a0581cdbSThomas Gleixner /* Good sample */ 958025b40abSAlexander Gordeev pps_inc_freq_interval(); 959025b40abSAlexander Gordeev } 960025b40abSAlexander Gordeev 961a0581cdbSThomas Gleixner /* 962a0581cdbSThomas Gleixner * The stability metric is calculated as the average of recent 963a0581cdbSThomas Gleixner * frequency changes, but is used only for performance monitoring 964025b40abSAlexander Gordeev */ 965025b40abSAlexander Gordeev delta_mod = delta; 966025b40abSAlexander Gordeev if (delta_mod < 0) 967025b40abSAlexander Gordeev delta_mod = -delta_mod; 96838007dc0SAnna-Maria Behnsen pps_stabil += (div_s64(((s64)delta_mod) << (NTP_SCALE_SHIFT - SHIFT_USEC), 969025b40abSAlexander Gordeev NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN; 970025b40abSAlexander Gordeev 971a0581cdbSThomas Gleixner /* If enabled, the system clock frequency is updated */ 972bee18a23SThomas Gleixner if ((ntpdata->time_status & STA_PPSFREQ) && !(ntpdata->time_status & STA_FREQHOLD)) { 973161b8ec2SThomas Gleixner ntpdata->time_freq = pps_freq; 97468f66f97SThomas Gleixner ntp_update_frequency(ntpdata); 975025b40abSAlexander Gordeev } 976025b40abSAlexander Gordeev 977025b40abSAlexander Gordeev return delta; 978025b40abSAlexander Gordeev } 979025b40abSAlexander Gordeev 980a0581cdbSThomas Gleixner /* Correct REALTIME clock phase error against PPS signal */ 981bee18a23SThomas Gleixner static void hardpps_update_phase(struct ntp_data *ntpdata, long error) 982025b40abSAlexander Gordeev { 983025b40abSAlexander Gordeev long correction = -error; 984025b40abSAlexander Gordeev long jitter; 985025b40abSAlexander Gordeev 986a0581cdbSThomas Gleixner /* Add the sample to the median filter */ 987025b40abSAlexander Gordeev pps_phase_filter_add(correction); 988025b40abSAlexander Gordeev correction = pps_phase_filter_get(&jitter); 989025b40abSAlexander Gordeev 990a0581cdbSThomas Gleixner /* 991a0581cdbSThomas Gleixner * Nominal jitter is due to PPS signal noise. If it exceeds the 992025b40abSAlexander Gordeev * threshold, the sample is discarded; otherwise, if so enabled, 993025b40abSAlexander Gordeev * the time offset is updated. 994025b40abSAlexander Gordeev */ 995025b40abSAlexander Gordeev if (jitter > (pps_jitter << PPS_POPCORN)) { 99638007dc0SAnna-Maria Behnsen printk_deferred(KERN_WARNING "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", 997025b40abSAlexander Gordeev jitter, (pps_jitter << PPS_POPCORN)); 998bee18a23SThomas Gleixner ntpdata->time_status |= STA_PPSJITTER; 999025b40abSAlexander Gordeev pps_jitcnt++; 1000bee18a23SThomas Gleixner } else if (ntpdata->time_status & STA_PPSTIME) { 1001a0581cdbSThomas Gleixner /* Correct the time using the phase offset */ 1002d5143554SThomas Gleixner ntpdata->time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, 1003d5143554SThomas Gleixner NTP_INTERVAL_FREQ); 1004a0581cdbSThomas Gleixner /* Cancel running adjtime() */ 1005*bb6400a2SThomas Gleixner ntpdata->time_adjust = 0; 1006025b40abSAlexander Gordeev } 1007a0581cdbSThomas Gleixner /* Update jitter */ 1008025b40abSAlexander Gordeev pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN; 1009025b40abSAlexander Gordeev } 1010025b40abSAlexander Gordeev 1011025b40abSAlexander Gordeev /* 1012aa6f9c59SJohn Stultz * __hardpps() - discipline CPU clock oscillator to external PPS signal 1013025b40abSAlexander Gordeev * 1014025b40abSAlexander Gordeev * This routine is called at each PPS signal arrival in order to 1015025b40abSAlexander Gordeev * discipline the CPU clock oscillator to the PPS signal. It takes two 1016025b40abSAlexander Gordeev * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former 1017025b40abSAlexander Gordeev * is used to correct clock phase error and the latter is used to 1018025b40abSAlexander Gordeev * correct the frequency. 1019025b40abSAlexander Gordeev * 1020025b40abSAlexander Gordeev * This code is based on David Mills's reference nanokernel 1021025b40abSAlexander Gordeev * implementation. It was mostly rewritten but keeps the same idea. 1022025b40abSAlexander Gordeev */ 10237ec88e4bSArnd Bergmann void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) 1024025b40abSAlexander Gordeev { 1025025b40abSAlexander Gordeev struct pps_normtime pts_norm, freq_norm; 102668f66f97SThomas Gleixner struct ntp_data *ntpdata = &tk_ntp_data; 1027025b40abSAlexander Gordeev 1028025b40abSAlexander Gordeev pts_norm = pps_normalize_ts(*phase_ts); 1029025b40abSAlexander Gordeev 1030a0581cdbSThomas Gleixner /* Clear the error bits, they will be set again if needed */ 1031bee18a23SThomas Gleixner ntpdata->time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1032025b40abSAlexander Gordeev 1033bee18a23SThomas Gleixner /* indicate signal presence */ 1034bee18a23SThomas Gleixner ntpdata->time_status |= STA_PPSSIGNAL; 1035025b40abSAlexander Gordeev pps_valid = PPS_VALID; 1036025b40abSAlexander Gordeev 1037a0581cdbSThomas Gleixner /* 1038a0581cdbSThomas Gleixner * When called for the first time, just start the frequency 1039a0581cdbSThomas Gleixner * interval 1040a0581cdbSThomas Gleixner */ 1041025b40abSAlexander Gordeev if (unlikely(pps_fbase.tv_sec == 0)) { 1042025b40abSAlexander Gordeev pps_fbase = *raw_ts; 1043025b40abSAlexander Gordeev return; 1044025b40abSAlexander Gordeev } 1045025b40abSAlexander Gordeev 1046a0581cdbSThomas Gleixner /* Ok, now we have a base for frequency calculation */ 10477ec88e4bSArnd Bergmann freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase)); 1048025b40abSAlexander Gordeev 1049a0581cdbSThomas Gleixner /* 1050a0581cdbSThomas Gleixner * Check that the signal is in the range 1051a0581cdbSThomas Gleixner * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it 1052a0581cdbSThomas Gleixner */ 105338007dc0SAnna-Maria Behnsen if ((freq_norm.sec == 0) || (freq_norm.nsec > MAXFREQ * freq_norm.sec) || 1054025b40abSAlexander Gordeev (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { 1055bee18a23SThomas Gleixner ntpdata->time_status |= STA_PPSJITTER; 1056a0581cdbSThomas Gleixner /* Restart the frequency calibration interval */ 1057025b40abSAlexander Gordeev pps_fbase = *raw_ts; 10586d9bcb62SJohn Stultz printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n"); 1059025b40abSAlexander Gordeev return; 1060025b40abSAlexander Gordeev } 1061025b40abSAlexander Gordeev 1062a0581cdbSThomas Gleixner /* Signal is ok. Check if the current frequency interval is finished */ 1063025b40abSAlexander Gordeev if (freq_norm.sec >= (1 << pps_shift)) { 1064025b40abSAlexander Gordeev pps_calcnt++; 1065a0581cdbSThomas Gleixner /* Restart the frequency calibration interval */ 1066025b40abSAlexander Gordeev pps_fbase = *raw_ts; 106768f66f97SThomas Gleixner hardpps_update_freq(ntpdata, freq_norm); 1068025b40abSAlexander Gordeev } 1069025b40abSAlexander Gordeev 1070bee18a23SThomas Gleixner hardpps_update_phase(ntpdata, pts_norm.nsec); 1071025b40abSAlexander Gordeev 1072025b40abSAlexander Gordeev } 1073025b40abSAlexander Gordeev #endif /* CONFIG_NTP_PPS */ 1074025b40abSAlexander Gordeev 107510a398d0SRoman Zippel static int __init ntp_tick_adj_setup(char *str) 107610a398d0SRoman Zippel { 1077*bb6400a2SThomas Gleixner int rc = kstrtos64(str, 0, &tk_ntp_data.ntp_tick_adj); 1078cdafb93fSFabian Frederick if (rc) 1079cdafb93fSFabian Frederick return rc; 1080069569e0SIngo Molnar 1081*bb6400a2SThomas Gleixner tk_ntp_data.ntp_tick_adj <<= NTP_SCALE_SHIFT; 108210a398d0SRoman Zippel return 1; 108310a398d0SRoman Zippel } 108410a398d0SRoman Zippel 108510a398d0SRoman Zippel __setup("ntp_tick_adj=", ntp_tick_adj_setup); 10867dffa3c6SRoman Zippel 10877dffa3c6SRoman Zippel void __init ntp_init(void) 10887dffa3c6SRoman Zippel { 10897dffa3c6SRoman Zippel ntp_clear(); 1090c9e6189fSThomas Gleixner ntp_init_cmos_sync(); 10917dffa3c6SRoman Zippel } 1092