1 /* $NetBSD: subr_time.c,v 1.38 2023/07/08 20:02:10 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.38 2023/07/08 20:02:10 riastradh Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/kauth.h> 42 #include <sys/lwp.h> 43 #include <sys/timex.h> 44 #include <sys/time.h> 45 #include <sys/timetc.h> 46 #include <sys/intr.h> 47 48 /* 49 * Compute number of hz until specified time. Used to compute second 50 * argument to callout_reset() from an absolute time. 51 */ 52 int 53 tvhzto(const struct timeval *tvp) 54 { 55 struct timeval now, tv; 56 57 tv = *tvp; /* Don't modify original tvp. */ 58 getmicrotime(&now); 59 timersub(&tv, &now, &tv); 60 return tvtohz(&tv); 61 } 62 63 /* 64 * Compute number of ticks in the specified amount of time. 65 */ 66 int 67 tvtohz(const struct timeval *tv) 68 { 69 unsigned long ticks; 70 long sec, usec; 71 72 /* 73 * If the number of usecs in the whole seconds part of the time 74 * difference fits in a long, then the total number of usecs will 75 * fit in an unsigned long. Compute the total and convert it to 76 * ticks, rounding up and adding 1 to allow for the current tick 77 * to expire. Rounding also depends on unsigned long arithmetic 78 * to avoid overflow. 79 * 80 * Otherwise, if the number of ticks in the whole seconds part of 81 * the time difference fits in a long, then convert the parts to 82 * ticks separately and add, using similar rounding methods and 83 * overflow avoidance. This method would work in the previous 84 * case, but it is slightly slower and assumes that hz is integral. 85 * 86 * Otherwise, round the time difference down to the maximum 87 * representable value. 88 * 89 * If ints are 32-bit, then the maximum value for any timeout in 90 * 10ms ticks is 248 days. 91 */ 92 sec = tv->tv_sec; 93 usec = tv->tv_usec; 94 95 KASSERT(usec >= 0); 96 KASSERT(usec < 1000000); 97 98 /* catch overflows in conversion time_t->int */ 99 if (tv->tv_sec > INT_MAX) 100 return INT_MAX; 101 if (tv->tv_sec < 0) 102 return 0; 103 104 if (sec < 0 || (sec == 0 && usec == 0)) { 105 /* 106 * Would expire now or in the past. Return 0 ticks. 107 * This is different from the legacy tvhzto() interface, 108 * and callers need to check for it. 109 */ 110 ticks = 0; 111 } else if (sec <= (LONG_MAX / 1000000)) 112 ticks = (((sec * 1000000) + (unsigned long)usec + (tick - 1)) 113 / tick) + 1; 114 else if (sec <= (LONG_MAX / hz)) 115 ticks = (sec * hz) + 116 (((unsigned long)usec + (tick - 1)) / tick) + 1; 117 else 118 ticks = LONG_MAX; 119 120 if (ticks > INT_MAX) 121 ticks = INT_MAX; 122 123 return ((int)ticks); 124 } 125 126 int 127 tshzto(const struct timespec *tsp) 128 { 129 struct timespec now, ts; 130 131 ts = *tsp; /* Don't modify original tsp. */ 132 getnanotime(&now); 133 timespecsub(&ts, &now, &ts); 134 return tstohz(&ts); 135 } 136 137 int 138 tshztoup(const struct timespec *tsp) 139 { 140 struct timespec now, ts; 141 142 ts = *tsp; /* Don't modify original tsp. */ 143 getnanouptime(&now); 144 timespecsub(&ts, &now, &ts); 145 return tstohz(&ts); 146 } 147 148 /* 149 * Compute number of ticks in the specified amount of time. 150 */ 151 int 152 tstohz(const struct timespec *ts) 153 { 154 struct timeval tv; 155 156 /* 157 * usec has great enough resolution for hz, so convert to a 158 * timeval and use tvtohz() above. 159 */ 160 TIMESPEC_TO_TIMEVAL(&tv, ts); 161 return tvtohz(&tv); 162 } 163 164 /* 165 * Check that a proposed value to load into the .it_value or 166 * .it_interval part of an interval timer is acceptable, and 167 * fix it to have at least minimal value (i.e. if it is less 168 * than the resolution of the clock, round it up.). We don't 169 * timeout the 0,0 value because this means to disable the 170 * timer or the interval. 171 */ 172 int 173 itimerfix(struct timeval *tv) 174 { 175 176 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000) 177 return EINVAL; 178 if (tv->tv_sec < 0) 179 return ETIMEDOUT; 180 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 181 tv->tv_usec = tick; 182 return 0; 183 } 184 185 int 186 itimespecfix(struct timespec *ts) 187 { 188 189 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 190 return EINVAL; 191 if (ts->tv_sec < 0) 192 return ETIMEDOUT; 193 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 194 ts->tv_nsec = tick * 1000; 195 return 0; 196 } 197 198 int 199 inittimeleft(struct timespec *ts, struct timespec *sleepts) 200 { 201 202 if (itimespecfix(ts)) { 203 return -1; 204 } 205 KASSERT(ts->tv_sec >= 0); 206 getnanouptime(sleepts); 207 return 0; 208 } 209 210 int 211 gettimeleft(struct timespec *ts, struct timespec *sleepts) 212 { 213 struct timespec now, sleptts; 214 215 KASSERT(ts->tv_sec >= 0); 216 217 /* 218 * Reduce ts by elapsed time based on monotonic time scale. 219 */ 220 getnanouptime(&now); 221 KASSERT(timespeccmp(sleepts, &now, <=)); 222 timespecsub(&now, sleepts, &sleptts); 223 *sleepts = now; 224 225 if (timespeccmp(ts, &sleptts, <=)) { /* timed out */ 226 timespecclear(ts); 227 return 0; 228 } 229 timespecsub(ts, &sleptts, ts); 230 231 return tstohz(ts); 232 } 233 234 void 235 clock_timeleft(clockid_t clockid, struct timespec *ts, struct timespec *sleepts) 236 { 237 struct timespec sleptts; 238 239 clock_gettime1(clockid, &sleptts); 240 timespecadd(ts, sleepts, ts); 241 timespecsub(ts, &sleptts, ts); 242 *sleepts = sleptts; 243 } 244 245 int 246 clock_gettime1(clockid_t clock_id, struct timespec *ts) 247 { 248 int error; 249 struct proc *p; 250 251 #define CPUCLOCK_ID_MASK (~(CLOCK_THREAD_CPUTIME_ID|CLOCK_PROCESS_CPUTIME_ID)) 252 if (clock_id & CLOCK_PROCESS_CPUTIME_ID) { 253 pid_t pid = clock_id & CPUCLOCK_ID_MASK; 254 struct timeval cputime; 255 256 mutex_enter(&proc_lock); 257 p = pid == 0 ? curproc : proc_find(pid); 258 if (p == NULL) { 259 mutex_exit(&proc_lock); 260 return ESRCH; 261 } 262 mutex_enter(p->p_lock); 263 calcru(p, /*usertime*/NULL, /*systime*/NULL, /*intrtime*/NULL, 264 &cputime); 265 mutex_exit(p->p_lock); 266 mutex_exit(&proc_lock); 267 268 // XXX: Perhaps create a special kauth type 269 error = kauth_authorize_process(kauth_cred_get(), 270 KAUTH_PROCESS_PTRACE, p, 271 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 272 if (error) 273 return error; 274 275 TIMEVAL_TO_TIMESPEC(&cputime, ts); 276 return 0; 277 } else if (clock_id & CLOCK_THREAD_CPUTIME_ID) { 278 struct lwp *l; 279 lwpid_t lid = clock_id & CPUCLOCK_ID_MASK; 280 struct bintime tm = {0, 0}; 281 282 p = curproc; 283 mutex_enter(p->p_lock); 284 l = lid == 0 ? curlwp : lwp_find(p, lid); 285 if (l == NULL) { 286 mutex_exit(p->p_lock); 287 return ESRCH; 288 } 289 addrulwp(l, &tm); 290 mutex_exit(p->p_lock); 291 292 bintime2timespec(&tm, ts); 293 return 0; 294 } 295 296 switch (clock_id) { 297 case CLOCK_REALTIME: 298 nanotime(ts); 299 break; 300 case CLOCK_MONOTONIC: 301 nanouptime(ts); 302 break; 303 default: 304 return EINVAL; 305 } 306 307 return 0; 308 } 309 310 /* 311 * Calculate delta and convert from struct timespec to the ticks. 312 */ 313 int 314 ts2timo(clockid_t clock_id, int flags, struct timespec *ts, 315 int *timo, struct timespec *start) 316 { 317 int error; 318 struct timespec tsd; 319 320 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000L) 321 return EINVAL; 322 323 if ((flags & TIMER_ABSTIME) != 0 || start != NULL) { 324 error = clock_gettime1(clock_id, &tsd); 325 if (error != 0) 326 return error; 327 if (start != NULL) 328 *start = tsd; 329 } 330 331 if ((flags & TIMER_ABSTIME) != 0) { 332 if (!timespecsubok(ts, &tsd)) 333 return EINVAL; 334 timespecsub(ts, &tsd, ts); 335 } 336 337 error = itimespecfix(ts); 338 if (error != 0) 339 return error; 340 341 if (ts->tv_sec == 0 && ts->tv_nsec == 0) 342 return ETIMEDOUT; 343 344 *timo = tstohz(ts); 345 KASSERT(*timo > 0); 346 347 return 0; 348 } 349 350 bool 351 timespecaddok(const struct timespec *tsp, const struct timespec *usp) 352 { 353 enum { TIME_MIN = __type_min(time_t), TIME_MAX = __type_max(time_t) }; 354 time_t a = tsp->tv_sec; 355 time_t b = usp->tv_sec; 356 bool carry; 357 358 /* 359 * Caller is responsible for guaranteeing valid timespec 360 * inputs. Any user-controlled inputs must be validated or 361 * adjusted. 362 */ 363 KASSERT(tsp->tv_nsec >= 0); 364 KASSERT(usp->tv_nsec >= 0); 365 KASSERT(tsp->tv_nsec < 1000000000L); 366 KASSERT(usp->tv_nsec < 1000000000L); 367 CTASSERT(1000000000L <= __type_max(long) - 1000000000L); 368 369 /* 370 * Fail if a + b + carry overflows TIME_MAX, or if a + b 371 * overflows TIME_MIN because timespecadd adds the carry after 372 * computing a + b. 373 * 374 * Break it into two mutually exclusive and exhaustive cases: 375 * I. a >= 0 376 * II. a < 0 377 */ 378 carry = (tsp->tv_nsec + usp->tv_nsec >= 1000000000L); 379 if (a >= 0) { 380 /* 381 * Case I: a >= 0. If b < 0, then b + 1 <= 0, so 382 * 383 * a + b + 1 <= a + 0 <= TIME_MAX, 384 * 385 * and 386 * 387 * a + b >= 0 + b = b >= TIME_MIN, 388 * 389 * so this can't overflow. 390 * 391 * If b >= 0, then a + b + carry >= a + b >= 0, so 392 * negative results and thus results below TIME_MIN are 393 * impossible; we need only avoid 394 * 395 * a + b + carry > TIME_MAX, 396 * 397 * which we will do by rejecting if 398 * 399 * b > TIME_MAX - a - carry, 400 * 401 * which in turn is incidentally always false if b < 0 402 * so we don't need extra logic to discriminate on the 403 * b >= 0 and b < 0 cases. 404 * 405 * Since 0 <= a <= TIME_MAX, we know 406 * 407 * 0 <= TIME_MAX - a <= TIME_MAX, 408 * 409 * and hence 410 * 411 * -1 <= TIME_MAX - a - 1 < TIME_MAX. 412 * 413 * So we can compute TIME_MAX - a - carry (i.e., either 414 * TIME_MAX - a or TIME_MAX - a - 1) safely without 415 * overflow. 416 */ 417 if (b > TIME_MAX - a - carry) 418 return false; 419 } else { 420 /* 421 * Case II: a < 0. If b >= 0, then since a + 1 <= 0, 422 * we have 423 * 424 * a + b + 1 <= b <= TIME_MAX, 425 * 426 * and 427 * 428 * a + b >= a >= TIME_MIN, 429 * 430 * so this can't overflow. 431 * 432 * If b < 0, then the intermediate a + b is negative 433 * and the outcome a + b + 1 is nonpositive, so we need 434 * only avoid 435 * 436 * a + b < TIME_MIN, 437 * 438 * which we will do by rejecting if 439 * 440 * a < TIME_MIN - b. 441 * 442 * (Reminder: The carry is added afterward in 443 * timespecadd, so to avoid overflow it is not enough 444 * to merely reject a + b + carry < TIME_MIN.) 445 * 446 * It is safe to compute the difference TIME_MIN - b 447 * because b is negative, so the result lies in 448 * (TIME_MIN, 0]. 449 */ 450 if (b < 0 && a < TIME_MIN - b) 451 return false; 452 } 453 454 return true; 455 } 456 457 bool 458 timespecsubok(const struct timespec *tsp, const struct timespec *usp) 459 { 460 enum { TIME_MIN = __type_min(time_t), TIME_MAX = __type_max(time_t) }; 461 time_t a = tsp->tv_sec, b = usp->tv_sec; 462 bool borrow; 463 464 /* 465 * Caller is responsible for guaranteeing valid timespec 466 * inputs. Any user-controlled inputs must be validated or 467 * adjusted. 468 */ 469 KASSERT(tsp->tv_nsec >= 0); 470 KASSERT(usp->tv_nsec >= 0); 471 KASSERT(tsp->tv_nsec < 1000000000L); 472 KASSERT(usp->tv_nsec < 1000000000L); 473 CTASSERT(1000000000L <= __type_max(long) - 1000000000L); 474 475 /* 476 * Fail if a - b - borrow overflows TIME_MIN, or if a - b 477 * overflows TIME_MAX because timespecsub subtracts the borrow 478 * after computing a - b. 479 * 480 * Break it into two mutually exclusive and exhaustive cases: 481 * I. a < 0 482 * II. a >= 0 483 */ 484 borrow = (tsp->tv_nsec - usp->tv_nsec < 0); 485 if (a < 0) { 486 /* 487 * Case I: a < 0. If b < 0, then -b - 1 >= 0, so 488 * 489 * a - b - 1 >= a + 0 >= TIME_MIN, 490 * 491 * and, since a <= -1, provided that TIME_MIN <= 492 * -TIME_MAX - 1 so that TIME_MAX <= -TIME_MIN - 1 (in 493 * fact, equality holds, under the assumption of 494 * two's-complement arithmetic), 495 * 496 * a - b <= -1 - b = -b - 1 <= TIME_MAX, 497 * 498 * so this can't overflow. 499 */ 500 CTASSERT(TIME_MIN <= -TIME_MAX - 1); 501 502 /* 503 * If b >= 0, then a - b - borrow <= a - b < 0, so 504 * positive results and thus results above TIME_MAX are 505 * impossible; we need only avoid 506 * 507 * a - b - borrow < TIME_MIN, 508 * 509 * which we will do by rejecting if 510 * 511 * a < TIME_MIN + b + borrow. 512 * 513 * The right-hand side is safe to evaluate for any 514 * values of b and borrow as long as TIME_MIN + 515 * TIME_MAX + 1 <= TIME_MAX, i.e., TIME_MIN <= -1. 516 * (Note: If time_t were unsigned, this would fail!) 517 * 518 * Note: Unlike Case I in timespecaddok, this criterion 519 * does not work for b < 0, nor can the roles of a and 520 * b in the inequality be reversed (e.g., -b < TIME_MIN 521 * - a + borrow) without extra cases like checking for 522 * b = TEST_MIN. 523 */ 524 CTASSERT(TIME_MIN < -1); 525 if (b >= 0 && a < TIME_MIN + b + borrow) 526 return false; 527 } else { 528 /* 529 * Case II: a >= 0. If b >= 0, then 530 * 531 * a - b <= a <= TIME_MAX, 532 * 533 * and, provided TIME_MIN <= -TIME_MAX - 1 (in fact, 534 * equality holds, under the assumption of 535 * two's-complement arithmetic) 536 * 537 * a - b - 1 >= -b - 1 >= -TIME_MAX - 1 >= TIME_MIN, 538 * 539 * so this can't overflow. 540 */ 541 CTASSERT(TIME_MIN <= -TIME_MAX - 1); 542 543 /* 544 * If b < 0, then a - b >= a >= 0, so negative results 545 * and thus results below TIME_MIN are impossible; we 546 * need only avoid 547 * 548 * a - b > TIME_MAX, 549 * 550 * which we will do by rejecting if 551 * 552 * a > TIME_MAX + b. 553 * 554 * (Reminder: The borrow is subtracted afterward in 555 * timespecsub, so to avoid overflow it is not enough 556 * to merely reject a - b - borrow > TIME_MAX.) 557 * 558 * It is safe to compute the sum TIME_MAX + b because b 559 * is negative, so the result lies in [0, TIME_MAX). 560 */ 561 if (b < 0 && a > TIME_MAX + b) 562 return false; 563 } 564 565 return true; 566 } 567