xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/evutil_time.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: evutil_time.c,v 1.5 2016/01/08 21:35:40 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifdef _WIN32
33 #include <winsock2.h>
34 #define WIN32_LEAN_AND_MEAN
35 #include <windows.h>
36 #undef WIN32_LEAN_AND_MEAN
37 #endif
38 
39 #include <sys/types.h>
40 #ifdef EVENT__HAVE_STDLIB_H
41 #include <stdlib.h>
42 #endif
43 #include <errno.h>
44 #include <limits.h>
45 #ifndef EVENT__HAVE_GETTIMEOFDAY
46 #include <sys/timeb.h>
47 #endif
48 #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
49 	!defined(_WIN32)
50 #include <sys/select.h>
51 #endif
52 #include <time.h>
53 #include <sys/stat.h>
54 #include <string.h>
55 
56 #include "event2/util.h"
57 #include "util-internal.h"
58 #include "log-internal.h"
59 #include "mm-internal.h"
60 
61 #ifndef EVENT__HAVE_GETTIMEOFDAY
62 /* No gettimeofday; this must be windows. */
63 int
64 evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
65 {
66 #ifdef _MSC_VER
67 #define U64_LITERAL(n) n##ui64
68 #else
69 #define U64_LITERAL(n) n##llu
70 #endif
71 
72 	/* Conversion logic taken from Tor, which in turn took it
73 	 * from Perl.  GetSystemTimeAsFileTime returns its value as
74 	 * an unaligned (!) 64-bit value containing the number of
75 	 * 100-nanosecond intervals since 1 January 1601 UTC. */
76 #define EPOCH_BIAS U64_LITERAL(116444736000000000)
77 #define UNITS_PER_SEC U64_LITERAL(10000000)
78 #define USEC_PER_SEC U64_LITERAL(1000000)
79 #define UNITS_PER_USEC U64_LITERAL(10)
80 	union {
81 		FILETIME ft_ft;
82 		ev_uint64_t ft_64;
83 	} ft;
84 
85 	if (tv == NULL)
86 		return -1;
87 
88 	GetSystemTimeAsFileTime(&ft.ft_ft);
89 
90 	if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
91 		/* Time before the unix epoch. */
92 		return -1;
93 	}
94 	ft.ft_64 -= EPOCH_BIAS;
95 	tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
96 	tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
97 	return 0;
98 }
99 #endif
100 
101 #define MAX_SECONDS_IN_MSEC_LONG \
102 	(((LONG_MAX) - 999) / 1000)
103 
104 long
105 evutil_tv_to_msec_(const struct timeval *tv)
106 {
107 	if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
108 		return -1;
109 
110 	return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
111 }
112 
113 /*
114   Replacement for usleep on platforms that don't have one.  Not guaranteed to
115   be any more finegrained than 1 msec.
116  */
117 void
118 evutil_usleep_(const struct timeval *tv)
119 {
120 	if (!tv)
121 		return;
122 #if defined(_WIN32)
123 	{
124 		long msec = evutil_tv_to_msec_(tv);
125 		Sleep((DWORD)msec);
126 	}
127 #elif defined(EVENT__HAVE_NANOSLEEP)
128 	{
129 		struct timespec ts;
130 		ts.tv_sec = tv->tv_sec;
131 		ts.tv_nsec = tv->tv_usec*1000;
132 		nanosleep(&ts, NULL);
133 	}
134 #elif defined(EVENT__HAVE_USLEEP)
135 	/* Some systems don't like to usleep more than 999999 usec */
136 	sleep(tv->tv_sec);
137 	usleep(tv->tv_usec);
138 #else
139 	select(0, NULL, NULL, NULL, tv);
140 #endif
141 }
142 
143 /*
144    This function assumes it's called repeatedly with a
145    not-actually-so-monotonic time source whose outputs are in 'tv'. It
146    implements a trivial ratcheting mechanism so that the values never go
147    backwards.
148  */
149 static void
150 adjust_monotonic_time(struct evutil_monotonic_timer *base,
151     struct timeval *tv)
152 {
153 	evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
154 
155 	if (evutil_timercmp(tv, &base->last_time, <)) {
156 		/* Guess it wasn't monotonic after all. */
157 		struct timeval adjust;
158 		evutil_timersub(&base->last_time, tv, &adjust);
159 		evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
160 		    &base->adjust_monotonic_clock);
161 		*tv = base->last_time;
162 	}
163 	base->last_time = *tv;
164 }
165 
166 /*
167    Allocate a new struct evutil_monotonic_timer
168  */
169 struct evutil_monotonic_timer *
170 evutil_monotonic_timer_new(void)
171 {
172   struct evutil_monotonic_timer *p = NULL;
173 
174   p = mm_malloc(sizeof(*p));
175   if (!p) goto done;
176 
177   memset(p, 0, sizeof(*p));
178 
179  done:
180   return p;
181 }
182 
183 /*
184    Free a struct evutil_monotonic_timer
185  */
186 void
187 evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
188 {
189   if (timer) {
190     mm_free(timer);
191   }
192 }
193 
194 /*
195    Set up a struct evutil_monotonic_timer for initial use
196  */
197 int
198 evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
199                                 int flags)
200 {
201   return evutil_configure_monotonic_time_(timer, flags);
202 }
203 
204 /*
205    Query the current monotonic time
206  */
207 int
208 evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
209                          struct timeval *tp)
210 {
211   return evutil_gettime_monotonic_(timer, tp);
212 }
213 
214 
215 #if defined(HAVE_POSIX_MONOTONIC)
216 /* =====
217    The POSIX clock_gettime() interface provides a few ways to get at a
218    monotonic clock.  CLOCK_MONOTONIC is most widely supported.  Linux also
219    provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
220 
221    On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
222    Platforms don't agree about whether it should jump on a sleep/resume.
223  */
224 
225 int
226 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
227     int flags)
228 {
229 	/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.  You need to
230 	 * check for it at runtime, because some older kernel versions won't
231 	 * have it working. */
232 #ifdef CLOCK_MONOTONIC_COARSE
233 	const int precise = flags & EV_MONOT_PRECISE;
234 #endif
235 	const int fallback = flags & EV_MONOT_FALLBACK;
236 	struct timespec	ts;
237 
238 #ifdef CLOCK_MONOTONIC_COARSE
239 	if (CLOCK_MONOTONIC_COARSE < 0) {
240 		/* Technically speaking, nothing keeps CLOCK_* from being
241 		 * negative (as far as I know). This check and the one below
242 		 * make sure that it's safe for us to use -1 as an "unset"
243 		 * value. */
244 		event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
245 	}
246 	if (! precise && ! fallback) {
247 		if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
248 			base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
249 			return 0;
250 		}
251 	}
252 #endif
253 	if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
254 		base->monotonic_clock = CLOCK_MONOTONIC;
255 		return 0;
256 	}
257 
258 	if (CLOCK_MONOTONIC < 0) {
259 		event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
260 	}
261 
262 	base->monotonic_clock = -1;
263 	return 0;
264 }
265 
266 int
267 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
268     struct timeval *tp)
269 {
270 	struct timespec ts;
271 
272 	if (base->monotonic_clock < 0) {
273 		if (evutil_gettimeofday(tp, NULL) < 0)
274 			return -1;
275 		adjust_monotonic_time(base, tp);
276 		return 0;
277 	}
278 
279 	if (clock_gettime(base->monotonic_clock, &ts) == -1)
280 		return -1;
281 	tp->tv_sec = ts.tv_sec;
282 	tp->tv_usec = ts.tv_nsec / 1000;
283 
284 	return 0;
285 }
286 #endif
287 
288 #if defined(HAVE_MACH_MONOTONIC)
289 /* ======
290    Apple is a little late to the POSIX party.  And why not?  Instead of
291    clock_gettime(), they provide mach_absolute_time().  Its units are not
292    fixed; we need to use mach_timebase_info() to get the right functions to
293    convert its units into nanoseconds.
294 
295    To all appearances, mach_absolute_time() seems to be honest-to-goodness
296    monotonic.  Whether it stops during sleep or not is unspecified in
297    principle, and dependent on CPU architecture in practice.
298  */
299 
300 int
301 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
302     int flags)
303 {
304 	const int fallback = flags & EV_MONOT_FALLBACK;
305 	struct mach_timebase_info mi;
306 	memset(base, 0, sizeof(*base));
307 	/* OSX has mach_absolute_time() */
308 	if (!fallback &&
309 	    mach_timebase_info(&mi) == 0 &&
310 	    mach_absolute_time() != 0) {
311 		/* mach_timebase_info tells us how to convert
312 		 * mach_absolute_time() into nanoseconds, but we
313 		 * want to use microseconds instead. */
314 		mi.denom *= 1000;
315 		memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
316 	} else {
317 		base->mach_timebase_units.numer = 0;
318 	}
319 	return 0;
320 }
321 
322 int
323 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
324     struct timeval *tp)
325 {
326 	ev_uint64_t abstime, usec;
327 	if (base->mach_timebase_units.numer == 0) {
328 		if (evutil_gettimeofday(tp, NULL) < 0)
329 			return -1;
330 		adjust_monotonic_time(base, tp);
331 		return 0;
332 	}
333 
334 	abstime = mach_absolute_time();
335 	usec = (abstime * base->mach_timebase_units.numer)
336 	    / (base->mach_timebase_units.denom);
337 	tp->tv_sec = usec / 1000000;
338 	tp->tv_usec = usec % 1000000;
339 
340 	return 0;
341 }
342 #endif
343 
344 #if defined(HAVE_WIN32_MONOTONIC)
345 /* =====
346    Turn we now to Windows.  Want monontonic time on Windows?
347 
348    Windows has QueryPerformanceCounter(), which gives time most high-
349    resolution time.  It's a pity it's not so monotonic in practice; it's
350    also got some fun bugs, especially: with older Windowses, under
351    virtualizations, with funny hardware, on multiprocessor systems, and so
352    on.  PEP418 [1] has a nice roundup of the issues here.
353 
354    There's GetTickCount64() on Vista and later, which gives a number of 1-msec
355    ticks since startup.  The accuracy here might be as bad as 10-20 msec, I
356    hear.  There's an undocumented function (NtSetTimerResolution) that
357    allegedly increases the accuracy. Good luck!
358 
359    There's also GetTickCount(), which is only 32 bits, but seems to be
360    supported on pre-Vista versions of Windows.  Apparently, you can coax
361    another 14 bits out of it, giving you 2231 years before rollover.
362 
363    The less said about timeGetTime() the better.
364 
365    "We don't care.  We don't have to.  We're the Phone Company."
366             -- Lily Tomlin, SNL
367 
368    Our strategy, if precise timers are turned off, is to just use the best
369    GetTickCount equivalent available.  If we've been asked for precise timing,
370    then we mostly[2] assume that GetTickCount is monotonic, and correct
371    GetPerformanceCounter to approximate it.
372 
373    [1] http://www.python.org/dev/peps/pep-0418
374    [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
375        anyway, just in case it isn't.
376 
377  */
378 /*
379     Parts of our logic in the win32 timer code here are closely based on
380     BitTorrent's libUTP library.  That code is subject to the following
381     license:
382 
383       Copyright (c) 2010 BitTorrent, Inc.
384 
385       Permission is hereby granted, free of charge, to any person obtaining a
386       copy of this software and associated documentation files (the
387       "Software"), to deal in the Software without restriction, including
388       without limitation the rights to use, copy, modify, merge, publish,
389       distribute, sublicense, and/or sell copies of the Software, and to
390       permit persons to whom the Software is furnished to do so, subject to
391       the following conditions:
392 
393       The above copyright notice and this permission notice shall be included
394       in all copies or substantial portions of the Software.
395 
396       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
397       OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
398       MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
399       NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
400       LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
401       OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
402       WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
403 */
404 
405 static ev_uint64_t
406 evutil_GetTickCount_(struct evutil_monotonic_timer *base)
407 {
408 	if (base->GetTickCount64_fn) {
409 		/* Let's just use GetTickCount64 if we can. */
410 		return base->GetTickCount64_fn();
411 	} else if (base->GetTickCount_fn) {
412 		/* Greg Hazel assures me that this works, that BitTorrent has
413 		 * done it for years, and this it won't turn around and
414 		 * bite us.  He says they found it on some game programmers'
415 		 * forum some time around 2007.
416 		 */
417 		ev_uint64_t v = base->GetTickCount_fn();
418 		return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
419 	} else {
420 		/* Here's the fallback implementation. We have to use
421 		 * GetTickCount() with its given signature, so we only get
422 		 * 32 bits worth of milliseconds, which will roll ove every
423 		 * 49 days or so.  */
424 		DWORD ticks = GetTickCount();
425 		if (ticks < base->last_tick_count) {
426 			base->adjust_tick_count += ((ev_uint64_t)1) << 32;
427 		}
428 		base->last_tick_count = ticks;
429 		return ticks + base->adjust_tick_count;
430 	}
431 }
432 
433 int
434 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
435     int flags)
436 {
437 	const int precise = flags & EV_MONOT_PRECISE;
438 	const int fallback = flags & EV_MONOT_FALLBACK;
439 	HANDLE h;
440 	memset(base, 0, sizeof(*base));
441 
442 	h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
443 	if (h != NULL && !fallback) {
444 		base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
445 		base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
446 	}
447 
448 	base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
449 	if (precise && !fallback) {
450 		LARGE_INTEGER freq;
451 		if (QueryPerformanceFrequency(&freq)) {
452 			LARGE_INTEGER counter;
453 			QueryPerformanceCounter(&counter);
454 			base->first_counter = counter.QuadPart;
455 			base->usec_per_count = 1.0e6 / freq.QuadPart;
456 			base->use_performance_counter = 1;
457 		}
458 	}
459 
460 	return 0;
461 }
462 
463 static inline ev_int64_t
464 abs64(ev_int64_t i)
465 {
466 	return i < 0 ? -i : i;
467 }
468 
469 
470 int
471 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
472     struct timeval *tp)
473 {
474 	ev_uint64_t ticks = evutil_GetTickCount_(base);
475 	if (base->use_performance_counter) {
476 		/* Here's a trick we took from BitTorrent's libutp, at Greg
477 		 * Hazel's recommendation.  We use QueryPerformanceCounter for
478 		 * our high-resolution timer, but use GetTickCount*() to keep
479 		 * it sane, and adjust_monotonic_time() to keep it monotonic.
480 		 */
481 		LARGE_INTEGER counter;
482 		ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
483 		QueryPerformanceCounter(&counter);
484 		counter_elapsed = (ev_int64_t)
485 		    (counter.QuadPart - base->first_counter);
486 		ticks_elapsed = ticks - base->first_tick;
487 		/* TODO: This may upset VC6. If you need this to work with
488 		 * VC6, please supply an appropriate patch. */
489 		counter_usec_elapsed = (ev_int64_t)
490 		    (counter_elapsed * base->usec_per_count);
491 
492 		if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
493 			/* It appears that the QueryPerformanceCounter()
494 			 * result is more than 1 second away from
495 			 * GetTickCount() result. Let's adjust it to be as
496 			 * accurate as we can; adjust_monotnonic_time() below
497 			 * will keep it monotonic. */
498 			counter_usec_elapsed = ticks_elapsed * 1000;
499 			base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
500 		}
501 		tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
502 		tp->tv_usec = counter_usec_elapsed % 1000000;
503 
504 	} else {
505 		/* We're just using GetTickCount(). */
506 		tp->tv_sec = (time_t) (ticks / 1000);
507 		tp->tv_usec = (ticks % 1000) * 1000;
508 	}
509 	adjust_monotonic_time(base, tp);
510 
511 	return 0;
512 }
513 #endif
514 
515 #if defined(HAVE_FALLBACK_MONOTONIC)
516 /* =====
517    And if none of the other options work, let's just use gettimeofday(), and
518    ratchet it forward so that it acts like a monotonic timer, whether it
519    wants to or not.
520  */
521 
522 int
523 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
524     int precise)
525 {
526 	memset(base, 0, sizeof(*base));
527 	return 0;
528 }
529 
530 int
531 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
532     struct timeval *tp)
533 {
534 	if (evutil_gettimeofday(tp, NULL) < 0)
535 		return -1;
536 	adjust_monotonic_time(base, tp);
537 	return 0;
538 
539 }
540 #endif
541