1 /* $NetBSD: evutil_time.c,v 1.1.1.2 2021/04/07 02:43:13 christos Exp $ */ 2 /* 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include <sys/cdefs.h> 30 __RCSID("$NetBSD: evutil_time.c,v 1.1.1.2 2021/04/07 02:43:13 christos Exp $"); 31 #include "evconfig-private.h" 32 33 #ifdef _WIN32 34 #include <winsock2.h> 35 #define WIN32_LEAN_AND_MEAN 36 #include <windows.h> 37 #undef WIN32_LEAN_AND_MEAN 38 #endif 39 40 #include <sys/types.h> 41 #ifdef EVENT__HAVE_STDLIB_H 42 #include <stdlib.h> 43 #endif 44 #include <errno.h> 45 #include <limits.h> 46 #ifndef EVENT__HAVE_GETTIMEOFDAY 47 #include <sys/timeb.h> 48 #endif 49 #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT__HAVE_USLEEP) && \ 50 !defined(_WIN32) 51 #include <sys/select.h> 52 #endif 53 #include <time.h> 54 #include <sys/stat.h> 55 #include <string.h> 56 57 /** evutil_usleep_() */ 58 #if defined(_WIN32) 59 #elif defined(EVENT__HAVE_NANOSLEEP) 60 #elif defined(EVENT__HAVE_USLEEP) 61 #include <unistd.h> 62 #endif 63 64 #include "event2/util.h" 65 #include "util-internal.h" 66 #include "log-internal.h" 67 #include "mm-internal.h" 68 69 #ifndef EVENT__HAVE_GETTIMEOFDAY 70 /* No gettimeofday; this must be windows. */ 71 72 typedef void (WINAPI *GetSystemTimePreciseAsFileTime_fn_t) (LPFILETIME); 73 74 int 75 evutil_gettimeofday(struct timeval *tv, struct timezone *tz) 76 { 77 #ifdef _MSC_VER 78 #define U64_LITERAL(n) n##ui64 79 #else 80 #define U64_LITERAL(n) n##llu 81 #endif 82 83 /* Conversion logic taken from Tor, which in turn took it 84 * from Perl. GetSystemTimeAsFileTime returns its value as 85 * an unaligned (!) 64-bit value containing the number of 86 * 100-nanosecond intervals since 1 January 1601 UTC. */ 87 #define EPOCH_BIAS U64_LITERAL(116444736000000000) 88 #define UNITS_PER_SEC U64_LITERAL(10000000) 89 #define USEC_PER_SEC U64_LITERAL(1000000) 90 #define UNITS_PER_USEC U64_LITERAL(10) 91 union { 92 FILETIME ft_ft; 93 ev_uint64_t ft_64; 94 } ft; 95 96 if (tv == NULL) 97 return -1; 98 99 static GetSystemTimePreciseAsFileTime_fn_t GetSystemTimePreciseAsFileTime_fn = NULL; 100 static int check_precise = 1; 101 102 if (EVUTIL_UNLIKELY(check_precise)) { 103 HMODULE h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); 104 if (h != NULL) 105 GetSystemTimePreciseAsFileTime_fn = 106 (GetSystemTimePreciseAsFileTime_fn_t) 107 GetProcAddress(h, "GetSystemTimePreciseAsFileTime"); 108 check_precise = 0; 109 } 110 111 if (GetSystemTimePreciseAsFileTime_fn != NULL) 112 GetSystemTimePreciseAsFileTime_fn(&ft.ft_ft); 113 else 114 GetSystemTimeAsFileTime(&ft.ft_ft); 115 116 if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) { 117 /* Time before the unix epoch. */ 118 return -1; 119 } 120 ft.ft_64 -= EPOCH_BIAS; 121 tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC); 122 tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC); 123 return 0; 124 } 125 #endif 126 127 #define MAX_SECONDS_IN_MSEC_LONG \ 128 (((LONG_MAX) - 999) / 1000) 129 130 long 131 evutil_tv_to_msec_(const struct timeval *tv) 132 { 133 if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG) 134 return -1; 135 136 return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000); 137 } 138 139 /* 140 Replacement for usleep on platforms that don't have one. Not guaranteed to 141 be any more finegrained than 1 msec. 142 */ 143 void 144 evutil_usleep_(const struct timeval *tv) 145 { 146 if (!tv) 147 return; 148 #if defined(_WIN32) 149 { 150 __int64 usec; 151 LARGE_INTEGER li; 152 HANDLE timer; 153 154 usec = tv->tv_sec * 1000000LL + tv->tv_usec; 155 if (!usec) 156 return; 157 158 li.QuadPart = -10LL * usec; 159 timer = CreateWaitableTimer(NULL, TRUE, NULL); 160 if (!timer) 161 return; 162 163 SetWaitableTimer(timer, &li, 0, NULL, NULL, 0); 164 WaitForSingleObject(timer, INFINITE); 165 CloseHandle(timer); 166 } 167 #elif defined(EVENT__HAVE_NANOSLEEP) 168 { 169 struct timespec ts; 170 ts.tv_sec = tv->tv_sec; 171 ts.tv_nsec = tv->tv_usec*1000; 172 nanosleep(&ts, NULL); 173 } 174 #elif defined(EVENT__HAVE_USLEEP) 175 /* Some systems don't like to usleep more than 999999 usec */ 176 sleep(tv->tv_sec); 177 usleep(tv->tv_usec); 178 #else 179 { 180 struct timeval tv2 = *tv; 181 select(0, NULL, NULL, NULL, &tv2); 182 } 183 #endif 184 } 185 186 int 187 evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm) 188 { 189 static const char *DAYS[] = 190 { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; 191 static const char *MONTHS[] = 192 { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; 193 194 time_t t = time(NULL); 195 196 #if defined(EVENT__HAVE__GMTIME64_S) || !defined(_WIN32) 197 struct tm sys; 198 #endif 199 200 /* If `tm` is null, set system's current time. */ 201 if (tm == NULL) { 202 #if !defined(_WIN32) 203 gmtime_r(&t, &sys); 204 tm = &sys; 205 /** detect _gmtime64()/_gmtime64_s() */ 206 #elif defined(EVENT__HAVE__GMTIME64_S) 207 errno_t err; 208 err = _gmtime64_s(&sys, &t); 209 if (err) { 210 event_errx(1, "Invalid argument to _gmtime64_s"); 211 } else { 212 tm = &sys; 213 } 214 #elif defined(EVENT__HAVE__GMTIME64) 215 tm = _gmtime64(&t); 216 #else 217 tm = gmtime(&t); 218 #endif 219 } 220 221 return evutil_snprintf( 222 date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT", 223 DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon], 224 1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); 225 } 226 227 /* 228 This function assumes it's called repeatedly with a 229 not-actually-so-monotonic time source whose outputs are in 'tv'. It 230 implements a trivial ratcheting mechanism so that the values never go 231 backwards. 232 */ 233 static void 234 adjust_monotonic_time(struct evutil_monotonic_timer *base, 235 struct timeval *tv) 236 { 237 evutil_timeradd(tv, &base->adjust_monotonic_clock, tv); 238 239 if (evutil_timercmp(tv, &base->last_time, <)) { 240 /* Guess it wasn't monotonic after all. */ 241 struct timeval adjust; 242 evutil_timersub(&base->last_time, tv, &adjust); 243 evutil_timeradd(&adjust, &base->adjust_monotonic_clock, 244 &base->adjust_monotonic_clock); 245 *tv = base->last_time; 246 } 247 base->last_time = *tv; 248 } 249 250 /* 251 Allocate a new struct evutil_monotonic_timer 252 */ 253 struct evutil_monotonic_timer * 254 evutil_monotonic_timer_new(void) 255 { 256 struct evutil_monotonic_timer *p = NULL; 257 258 p = mm_malloc(sizeof(*p)); 259 if (!p) goto done; 260 261 memset(p, 0, sizeof(*p)); 262 263 done: 264 return p; 265 } 266 267 /* 268 Free a struct evutil_monotonic_timer 269 */ 270 void 271 evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer) 272 { 273 if (timer) { 274 mm_free(timer); 275 } 276 } 277 278 /* 279 Set up a struct evutil_monotonic_timer for initial use 280 */ 281 int 282 evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer, 283 int flags) 284 { 285 return evutil_configure_monotonic_time_(timer, flags); 286 } 287 288 /* 289 Query the current monotonic time 290 */ 291 int 292 evutil_gettime_monotonic(struct evutil_monotonic_timer *timer, 293 struct timeval *tp) 294 { 295 return evutil_gettime_monotonic_(timer, tp); 296 } 297 298 299 #if defined(HAVE_POSIX_MONOTONIC) 300 /* ===== 301 The POSIX clock_gettime() interface provides a few ways to get at a 302 monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also 303 provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec. 304 305 On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic. 306 Platforms don't agree about whether it should jump on a sleep/resume. 307 */ 308 309 int 310 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 311 int flags) 312 { 313 /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to 314 * check for it at runtime, because some older kernel versions won't 315 * have it working. */ 316 #ifdef CLOCK_MONOTONIC_COARSE 317 const int precise = flags & EV_MONOT_PRECISE; 318 #endif 319 const int fallback = flags & EV_MONOT_FALLBACK; 320 struct timespec ts; 321 322 #ifdef CLOCK_MONOTONIC_COARSE 323 if (CLOCK_MONOTONIC_COARSE < 0) { 324 /* Technically speaking, nothing keeps CLOCK_* from being 325 * negative (as far as I know). This check and the one below 326 * make sure that it's safe for us to use -1 as an "unset" 327 * value. */ 328 event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"); 329 } 330 if (! precise && ! fallback) { 331 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) { 332 base->monotonic_clock = CLOCK_MONOTONIC_COARSE; 333 return 0; 334 } 335 } 336 #endif 337 if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { 338 base->monotonic_clock = CLOCK_MONOTONIC; 339 return 0; 340 } 341 342 if (CLOCK_MONOTONIC < 0) { 343 event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0"); 344 } 345 346 base->monotonic_clock = -1; 347 return 0; 348 } 349 350 int 351 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 352 struct timeval *tp) 353 { 354 struct timespec ts; 355 356 if (base->monotonic_clock < 0) { 357 if (evutil_gettimeofday(tp, NULL) < 0) 358 return -1; 359 adjust_monotonic_time(base, tp); 360 return 0; 361 } 362 363 if (clock_gettime(base->monotonic_clock, &ts) == -1) 364 return -1; 365 tp->tv_sec = ts.tv_sec; 366 tp->tv_usec = ts.tv_nsec / 1000; 367 368 return 0; 369 } 370 #endif 371 372 #if defined(HAVE_MACH_MONOTONIC) 373 /* ====== 374 Apple is a little late to the POSIX party. And why not? Instead of 375 clock_gettime(), they provide mach_absolute_time(). Its units are not 376 fixed; we need to use mach_timebase_info() to get the right functions to 377 convert its units into nanoseconds. 378 379 To all appearances, mach_absolute_time() seems to be honest-to-goodness 380 monotonic. Whether it stops during sleep or not is unspecified in 381 principle, and dependent on CPU architecture in practice. 382 */ 383 384 int 385 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 386 int flags) 387 { 388 const int fallback = flags & EV_MONOT_FALLBACK; 389 struct mach_timebase_info mi; 390 memset(base, 0, sizeof(*base)); 391 /* OSX has mach_absolute_time() */ 392 if (!fallback && 393 mach_timebase_info(&mi) == 0 && 394 mach_absolute_time() != 0) { 395 /* mach_timebase_info tells us how to convert 396 * mach_absolute_time() into nanoseconds, but we 397 * want to use microseconds instead. */ 398 mi.denom *= 1000; 399 memcpy(&base->mach_timebase_units, &mi, sizeof(mi)); 400 } else { 401 base->mach_timebase_units.numer = 0; 402 } 403 return 0; 404 } 405 406 int 407 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 408 struct timeval *tp) 409 { 410 ev_uint64_t abstime, usec; 411 if (base->mach_timebase_units.numer == 0) { 412 if (evutil_gettimeofday(tp, NULL) < 0) 413 return -1; 414 adjust_monotonic_time(base, tp); 415 return 0; 416 } 417 418 abstime = mach_absolute_time(); 419 usec = (abstime * base->mach_timebase_units.numer) 420 / (base->mach_timebase_units.denom); 421 tp->tv_sec = usec / 1000000; 422 tp->tv_usec = usec % 1000000; 423 424 return 0; 425 } 426 #endif 427 428 #if defined(HAVE_WIN32_MONOTONIC) 429 /* ===== 430 Turn we now to Windows. Want monontonic time on Windows? 431 432 Windows has QueryPerformanceCounter(), which gives time most high- 433 resolution time. It's a pity it's not so monotonic in practice; it's 434 also got some fun bugs, especially: with older Windowses, under 435 virtualizations, with funny hardware, on multiprocessor systems, and so 436 on. PEP418 [1] has a nice roundup of the issues here. 437 438 There's GetTickCount64() on Vista and later, which gives a number of 1-msec 439 ticks since startup. The accuracy here might be as bad as 10-20 msec, I 440 hear. There's an undocumented function (NtSetTimerResolution) that 441 allegedly increases the accuracy. Good luck! 442 443 There's also GetTickCount(), which is only 32 bits, but seems to be 444 supported on pre-Vista versions of Windows. Apparently, you can coax 445 another 14 bits out of it, giving you 2231 years before rollover. 446 447 The less said about timeGetTime() the better. 448 449 "We don't care. We don't have to. We're the Phone Company." 450 -- Lily Tomlin, SNL 451 452 Our strategy, if precise timers are turned off, is to just use the best 453 GetTickCount equivalent available. If we've been asked for precise timing, 454 then we mostly[2] assume that GetTickCount is monotonic, and correct 455 GetPerformanceCounter to approximate it. 456 457 [1] http://www.python.org/dev/peps/pep-0418 458 [2] Of course, we feed the Windows stuff into adjust_monotonic_time() 459 anyway, just in case it isn't. 460 461 */ 462 /* 463 Parts of our logic in the win32 timer code here are closely based on 464 BitTorrent's libUTP library. That code is subject to the following 465 license: 466 467 Copyright (c) 2010 BitTorrent, Inc. 468 469 Permission is hereby granted, free of charge, to any person obtaining a 470 copy of this software and associated documentation files (the 471 "Software"), to deal in the Software without restriction, including 472 without limitation the rights to use, copy, modify, merge, publish, 473 distribute, sublicense, and/or sell copies of the Software, and to 474 permit persons to whom the Software is furnished to do so, subject to 475 the following conditions: 476 477 The above copyright notice and this permission notice shall be included 478 in all copies or substantial portions of the Software. 479 480 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 481 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 482 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 483 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 484 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 485 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 486 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 487 */ 488 489 static ev_uint64_t 490 evutil_GetTickCount_(struct evutil_monotonic_timer *base) 491 { 492 if (base->GetTickCount64_fn) { 493 /* Let's just use GetTickCount64 if we can. */ 494 return base->GetTickCount64_fn(); 495 } else if (base->GetTickCount_fn) { 496 /* Greg Hazel assures me that this works, that BitTorrent has 497 * done it for years, and this it won't turn around and 498 * bite us. He says they found it on some game programmers' 499 * forum some time around 2007. 500 */ 501 ev_uint64_t v = base->GetTickCount_fn(); 502 return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000); 503 } else { 504 /* Here's the fallback implementation. We have to use 505 * GetTickCount() with its given signature, so we only get 506 * 32 bits worth of milliseconds, which will roll ove every 507 * 49 days or so. */ 508 DWORD ticks = GetTickCount(); 509 if (ticks < base->last_tick_count) { 510 base->adjust_tick_count += ((ev_uint64_t)1) << 32; 511 } 512 base->last_tick_count = ticks; 513 return ticks + base->adjust_tick_count; 514 } 515 } 516 517 int 518 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 519 int flags) 520 { 521 const int precise = flags & EV_MONOT_PRECISE; 522 const int fallback = flags & EV_MONOT_FALLBACK; 523 HANDLE h; 524 memset(base, 0, sizeof(*base)); 525 526 h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); 527 if (h != NULL && !fallback) { 528 base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64"); 529 base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount"); 530 } 531 532 base->first_tick = base->last_tick_count = evutil_GetTickCount_(base); 533 if (precise && !fallback) { 534 LARGE_INTEGER freq; 535 if (QueryPerformanceFrequency(&freq)) { 536 LARGE_INTEGER counter; 537 QueryPerformanceCounter(&counter); 538 base->first_counter = counter.QuadPart; 539 base->usec_per_count = 1.0e6 / freq.QuadPart; 540 base->use_performance_counter = 1; 541 } 542 } 543 544 return 0; 545 } 546 547 static inline ev_int64_t 548 abs64(ev_int64_t i) 549 { 550 return i < 0 ? -i : i; 551 } 552 553 554 int 555 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 556 struct timeval *tp) 557 { 558 ev_uint64_t ticks = evutil_GetTickCount_(base); 559 if (base->use_performance_counter) { 560 /* Here's a trick we took from BitTorrent's libutp, at Greg 561 * Hazel's recommendation. We use QueryPerformanceCounter for 562 * our high-resolution timer, but use GetTickCount*() to keep 563 * it sane, and adjust_monotonic_time() to keep it monotonic. 564 */ 565 LARGE_INTEGER counter; 566 ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed; 567 QueryPerformanceCounter(&counter); 568 counter_elapsed = (ev_int64_t) 569 (counter.QuadPart - base->first_counter); 570 ticks_elapsed = ticks - base->first_tick; 571 /* TODO: This may upset VC6. If you need this to work with 572 * VC6, please supply an appropriate patch. */ 573 counter_usec_elapsed = (ev_int64_t) 574 (counter_elapsed * base->usec_per_count); 575 576 if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) { 577 /* It appears that the QueryPerformanceCounter() 578 * result is more than 1 second away from 579 * GetTickCount() result. Let's adjust it to be as 580 * accurate as we can; adjust_monotnonic_time() below 581 * will keep it monotonic. */ 582 counter_usec_elapsed = ticks_elapsed * 1000; 583 base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count); 584 } 585 tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000); 586 tp->tv_usec = counter_usec_elapsed % 1000000; 587 588 } else { 589 /* We're just using GetTickCount(). */ 590 tp->tv_sec = (time_t) (ticks / 1000); 591 tp->tv_usec = (ticks % 1000) * 1000; 592 } 593 adjust_monotonic_time(base, tp); 594 595 return 0; 596 } 597 #endif 598 599 #if defined(HAVE_FALLBACK_MONOTONIC) 600 /* ===== 601 And if none of the other options work, let's just use gettimeofday(), and 602 ratchet it forward so that it acts like a monotonic timer, whether it 603 wants to or not. 604 */ 605 606 int 607 evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, 608 int precise) 609 { 610 memset(base, 0, sizeof(*base)); 611 return 0; 612 } 613 614 int 615 evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, 616 struct timeval *tp) 617 { 618 if (evutil_gettimeofday(tp, NULL) < 0) 619 return -1; 620 adjust_monotonic_time(base, tp); 621 return 0; 622 623 } 624 #endif 625