xref: /openbsd-src/sys/kern/kern_time.c (revision 33b792a3c1c87b47219fdf9a73548c4003214de3)
1 /*	$OpenBSD: kern_time.c,v 1.28 2002/02/17 06:11:05 art Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
37  */
38 
39 #include <sys/param.h>
40 #include <sys/resourcevar.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/signalvar.h>
46 
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 
50 #if defined(NFSCLIENT) || defined(NFSSERVER)
51 #include <nfs/rpcv2.h>
52 #include <nfs/nfsproto.h>
53 #include <nfs/nfs_var.h>
54 #endif
55 
56 #include <machine/cpu.h>
57 
58 void	settime __P((struct timeval *));
59 void	itimerround __P((struct timeval *));
60 
61 /*
62  * Time of day and interval timer support.
63  *
64  * These routines provide the kernel entry points to get and set
65  * the time-of-day and per-process interval timers.  Subroutines
66  * here provide support for adding and subtracting timeval structures
67  * and decrementing interval timers, optionally reloading the interval
68  * timers when they expire.
69  */
70 
71 /* This function is used by clock_settime and settimeofday */
72 void
73 settime(tv)
74 	struct timeval *tv;
75 {
76 	struct timeval delta;
77 	int s;
78 
79 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
80 	s = splclock();
81 	timersub(tv, &time, &delta);
82 	time = *tv;
83 	(void) spllowersoftclock();
84 	timeradd(&boottime, &delta, &boottime);
85 	timeradd(&runtime, &delta, &runtime);
86 	splx(s);
87 	resettodr();
88 }
89 
90 /* ARGSUSED */
91 int
92 sys_clock_gettime(p, v, retval)
93 	struct proc *p;
94 	void *v;
95 	register_t *retval;
96 {
97 	register struct sys_clock_gettime_args /* {
98 		syscallarg(clockid_t) clock_id;
99 		syscallarg(struct timespec *) tp;
100 	} */ *uap = v;
101 	clockid_t clock_id;
102 	struct timeval atv;
103 	struct timespec ats;
104 
105 	clock_id = SCARG(uap, clock_id);
106 	if (clock_id != CLOCK_REALTIME)
107 		return (EINVAL);
108 
109 	microtime(&atv);
110 	TIMEVAL_TO_TIMESPEC(&atv,&ats);
111 
112 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
113 }
114 
115 /* ARGSUSED */
116 int
117 sys_clock_settime(p, v, retval)
118 	struct proc *p;
119 	void *v;
120 	register_t *retval;
121 {
122 	register struct sys_clock_settime_args /* {
123 		syscallarg(clockid_t) clock_id;
124 		syscallarg(const struct timespec *) tp;
125 	} */ *uap = v;
126 	clockid_t clock_id;
127 	struct timeval atv;
128 	struct timespec ats;
129 	int error;
130 
131 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
132 		return (error);
133 
134 	clock_id = SCARG(uap, clock_id);
135 	if (clock_id != CLOCK_REALTIME)
136 		return (EINVAL);
137 
138 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
139 		return (error);
140 
141 	TIMESPEC_TO_TIMEVAL(&atv,&ats);
142 
143 	/*
144 	 * If the system is secure, we do not allow the time to be
145 	 * set to an earlier value (it may be slowed using adjtime,
146 	 * but not set back). This feature prevent interlopers from
147 	 * setting arbitrary time stamps on files.
148 	 */
149 	if (securelevel > 1 && timercmp(&atv, &time, <))
150 		return (EPERM);
151 	settime(&atv);
152 
153 	return (0);
154 }
155 
156 int
157 sys_clock_getres(p, v, retval)
158 	struct proc *p;
159 	void *v;
160 	register_t *retval;
161 {
162 	register struct sys_clock_getres_args /* {
163 		syscallarg(clockid_t) clock_id;
164 		syscallarg(struct timespec *) tp;
165 	} */ *uap = v;
166 	clockid_t clock_id;
167 	struct timespec ts;
168 	int error = 0;
169 
170 	clock_id = SCARG(uap, clock_id);
171 	if (clock_id != CLOCK_REALTIME)
172 		return (EINVAL);
173 
174 	if (SCARG(uap, tp)) {
175 		ts.tv_sec = 0;
176 		ts.tv_nsec = 1000000000 / hz;
177 
178 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
179 	}
180 
181 	return error;
182 }
183 
184 /* ARGSUSED */
185 int
186 sys_nanosleep(p, v, retval)
187 	struct proc *p;
188 	void *v;
189 	register_t *retval;
190 {
191 	static int nanowait;
192 	struct sys_nanosleep_args/* {
193 		syscallarg(const struct timespec *) rqtp;
194 		syscallarg(struct timespec *) rmtp;
195 	} */ *uap = v;
196 	struct timespec rqt;
197 	struct timespec rmt;
198 	struct timeval stv, etv, atv;
199 	int error, s, timo;
200 
201 	error = copyin((const void *)SCARG(uap, rqtp), (void *)&rqt,
202 	    sizeof(struct timespec));
203 	if (error)
204 		return (error);
205 
206 	TIMESPEC_TO_TIMEVAL(&atv,&rqt)
207 	if (itimerfix(&atv))
208 		return (EINVAL);
209 
210 	if (SCARG(uap, rmtp)) {
211 		s = splclock();
212 		stv = mono_time;
213 		splx(s);
214 	}
215 
216 	timo = tvtohz(&atv);
217 
218 	/* Avoid sleeping forever. */
219 	if (timo <= 0)
220 		timo = 1;
221 
222 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
223 	if (error == ERESTART)
224 		error = EINTR;
225 	if (error == EWOULDBLOCK)
226 		error = 0;
227 
228 	if (SCARG(uap, rmtp)) {
229 		int error;
230 
231 		s = splclock();
232 		etv = mono_time;
233 		splx(s);
234 
235 		timersub(&etv, &stv, &stv);
236 		timersub(&atv, &stv, &atv);
237 
238 		if (atv.tv_sec < 0)
239 			timerclear(&atv);
240 
241 		TIMEVAL_TO_TIMESPEC(&atv, &rmt);
242 		error = copyout((void *)&rmt, (void *)SCARG(uap,rmtp),
243 		    sizeof(rmt));
244 		if (error)
245 			return (error);
246 	}
247 
248 	return error;
249 }
250 
251 /* ARGSUSED */
252 int
253 sys_gettimeofday(p, v, retval)
254 	struct proc *p;
255 	void *v;
256 	register_t *retval;
257 {
258 	register struct sys_gettimeofday_args /* {
259 		syscallarg(struct timeval *) tp;
260 		syscallarg(struct timezone *) tzp;
261 	} */ *uap = v;
262 	struct timeval atv;
263 	int error = 0;
264 
265 	if (SCARG(uap, tp)) {
266 		microtime(&atv);
267 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, tp),
268 		    sizeof (atv))))
269 			return (error);
270 	}
271 	if (SCARG(uap, tzp))
272 		error = copyout((void *)&tz, (void *)SCARG(uap, tzp),
273 		    sizeof (tz));
274 	return (error);
275 }
276 
277 /* ARGSUSED */
278 int
279 sys_settimeofday(p, v, retval)
280 	struct proc *p;
281 	void *v;
282 	register_t *retval;
283 {
284 	struct sys_settimeofday_args /* {
285 		syscallarg(struct timeval *) tv;
286 		syscallarg(struct timezone *) tzp;
287 	} */ *uap = v;
288 	struct timeval atv;
289 	struct timezone atz;
290 	int error;
291 
292 	if ((error = suser(p->p_ucred, &p->p_acflag)))
293 		return (error);
294 	/* Verify all parameters before changing time. */
295 	if (SCARG(uap, tv) && (error = copyin((void *)SCARG(uap, tv),
296 	    (void *)&atv, sizeof(atv))))
297 		return (error);
298 	if (SCARG(uap, tzp) && (error = copyin((void *)SCARG(uap, tzp),
299 	    (void *)&atz, sizeof(atz))))
300 		return (error);
301 	if (SCARG(uap, tv)) {
302 		/*
303 		 * Don't allow the time to be set forward so far it will wrap
304 		 * and become negative, thus allowing an attacker to bypass
305 		 * the next check below.  The cutoff is 1 year before rollover
306 		 * occurs, so even if the attacker uses adjtime(2) to move
307 		 * the time past the cutoff, it will take a very long time
308 		 * to get to the wrap point.
309 		 *
310 		 * XXX: we check against INT_MAX since on 64-bit
311 		 *	platforms, sizeof(int) != sizeof(long) and
312 		 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
313 		 */
314 		if (atv.tv_sec > INT_MAX - 365*24*60*60) {
315 			printf("denied attempt to set clock forward to %ld\n",
316 			    atv.tv_sec);
317 			return (EPERM);
318 		}
319 		/*
320 		 * If the system is secure, we do not allow the time to be
321 		 * set to an earlier value (it may be slowed using adjtime,
322 		 * but not set back). This feature prevent interlopers from
323 		 * setting arbitrary time stamps on files.
324 		 */
325 		if (securelevel > 1 && timercmp(&atv, &time, <)) {
326 			printf("denied attempt to set clock back %ld seconds\n",
327 			    time.tv_sec - atv.tv_sec);
328 			return (EPERM);
329 		}
330 		settime(&atv);
331 	}
332 	if (SCARG(uap, tzp))
333 		tz = atz;
334 	return (0);
335 }
336 
337 int	tickdelta;			/* current clock skew, us. per tick */
338 long	timedelta;			/* unapplied time correction, us. */
339 long	bigadj = 1000000;		/* use 10x skew above bigadj us. */
340 
341 /* ARGSUSED */
342 int
343 sys_adjtime(p, v, retval)
344 	struct proc *p;
345 	void *v;
346 	register_t *retval;
347 {
348 	register struct sys_adjtime_args /* {
349 		syscallarg(struct timeval *) delta;
350 		syscallarg(struct timeval *) olddelta;
351 	} */ *uap = v;
352 	struct timeval atv;
353 	register long ndelta, ntickdelta, odelta;
354 	int s, error;
355 
356 	if ((error = suser(p->p_ucred, &p->p_acflag)))
357 		return (error);
358 	if ((error = copyin((void *)SCARG(uap, delta), (void *)&atv,
359 	    sizeof(struct timeval))))
360 		return (error);
361 
362 	/*
363 	 * Compute the total correction and the rate at which to apply it.
364 	 * Round the adjustment down to a whole multiple of the per-tick
365 	 * delta, so that after some number of incremental changes in
366 	 * hardclock(), tickdelta will become zero, lest the correction
367 	 * overshoot and start taking us away from the desired final time.
368 	 */
369 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
370 	if (ndelta > bigadj)
371 		ntickdelta = 10 * tickadj;
372 	else
373 		ntickdelta = tickadj;
374 	if (ndelta % ntickdelta)
375 		ndelta = ndelta / ntickdelta * ntickdelta;
376 
377 	/*
378 	 * To make hardclock()'s job easier, make the per-tick delta negative
379 	 * if we want time to run slower; then hardclock can simply compute
380 	 * tick + tickdelta, and subtract tickdelta from timedelta.
381 	 */
382 	if (ndelta < 0)
383 		ntickdelta = -ntickdelta;
384 	s = splclock();
385 	odelta = timedelta;
386 	timedelta = ndelta;
387 	tickdelta = ntickdelta;
388 	splx(s);
389 
390 	if (SCARG(uap, olddelta)) {
391 		atv.tv_sec = odelta / 1000000;
392 		atv.tv_usec = odelta % 1000000;
393 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, olddelta),
394 		    sizeof(struct timeval))))
395 			return (error);
396 	}
397 	return (0);
398 }
399 
400 /*
401  * Get value of an interval timer.  The process virtual and
402  * profiling virtual time timers are kept in the p_stats area, since
403  * they can be swapped out.  These are kept internally in the
404  * way they are specified externally: in time until they expire.
405  *
406  * The real time interval timer is kept in the process table slot
407  * for the process, and its value (it_value) is kept as an
408  * absolute time rather than as a delta, so that it is easy to keep
409  * periodic real-time signals from drifting.
410  *
411  * Virtual time timers are processed in the hardclock() routine of
412  * kern_clock.c.  The real time timer is processed by a timeout
413  * routine, called from the softclock() routine.  Since a callout
414  * may be delayed in real time due to interrupt processing in the system,
415  * it is possible for the real time timeout routine (realitexpire, given below),
416  * to be delayed in real time past when it is supposed to occur.  It
417  * does not suffice, therefore, to reload the real timer .it_value from the
418  * real time timers .it_interval.  Rather, we compute the next time in
419  * absolute time the timer should go off.
420  */
421 /* ARGSUSED */
422 int
423 sys_getitimer(p, v, retval)
424 	struct proc *p;
425 	void *v;
426 	register_t *retval;
427 {
428 	register struct sys_getitimer_args /* {
429 		syscallarg(u_int) which;
430 		syscallarg(struct itimerval *) itv;
431 	} */ *uap = v;
432 	struct itimerval aitv;
433 	int s;
434 
435 	if (SCARG(uap, which) > ITIMER_PROF)
436 		return (EINVAL);
437 	s = splclock();
438 	if (SCARG(uap, which) == ITIMER_REAL) {
439 		/*
440 		 * Convert from absolute to relative time in .it_value
441 		 * part of real time timer.  If time for real time timer
442 		 * has passed return 0, else return difference between
443 		 * current time and time for the timer to go off.
444 		 */
445 		aitv = p->p_realtimer;
446 		if (timerisset(&aitv.it_value)) {
447 			if (timercmp(&aitv.it_value, &time, <))
448 				timerclear(&aitv.it_value);
449 			else
450 				timersub(&aitv.it_value, &time,
451 				    &aitv.it_value);
452 		}
453 	} else
454 		aitv = p->p_stats->p_timer[SCARG(uap, which)];
455 	splx(s);
456 	return (copyout((void *)&aitv, (void *)SCARG(uap, itv),
457 	    sizeof (struct itimerval)));
458 }
459 
460 /* ARGSUSED */
461 int
462 sys_setitimer(p, v, retval)
463 	struct proc *p;
464 	register void *v;
465 	register_t *retval;
466 {
467 	register struct sys_setitimer_args /* {
468 		syscallarg(u_int) which;
469 		syscallarg(struct itimerval *) itv;
470 		syscallarg(struct itimerval *) oitv;
471 	} */ *uap = v;
472 	struct itimerval aitv;
473 	register const struct itimerval *itvp;
474 	int s, error;
475 	int timo;
476 
477 	if (SCARG(uap, which) > ITIMER_PROF)
478 		return (EINVAL);
479 	itvp = SCARG(uap, itv);
480 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
481 	    sizeof(struct itimerval))))
482 		return (error);
483 	if ((SCARG(uap, itv) = SCARG(uap, oitv)) &&
484 	    (error = sys_getitimer(p, uap, retval)))
485 		return (error);
486 	if (itvp == 0)
487 		return (0);
488 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
489 		return (EINVAL);
490 	s = splclock();
491 	if (SCARG(uap, which) == ITIMER_REAL) {
492 		timeout_del(&p->p_realit_to);
493 		if (timerisset(&aitv.it_value)) {
494 			timeradd(&aitv.it_value, &time, &aitv.it_value);
495 			timo = hzto(&aitv.it_value);
496 			if (timo <= 0)
497 				timo = 1;
498 			timeout_add(&p->p_realit_to, timo);
499 		}
500 		p->p_realtimer = aitv;
501 	} else {
502 		itimerround(&aitv.it_interval);
503 		p->p_stats->p_timer[SCARG(uap, which)] = aitv;
504 	}
505 	splx(s);
506 	return (0);
507 }
508 
509 /*
510  * Real interval timer expired:
511  * send process whose timer expired an alarm signal.
512  * If time is not set up to reload, then just return.
513  * Else compute next time timer should go off which is > current time.
514  * This is where delay in processing this timeout causes multiple
515  * SIGALRM calls to be compressed into one.
516  */
517 void
518 realitexpire(arg)
519 	void *arg;
520 {
521 	register struct proc *p;
522 	int s, timo;
523 
524 	p = (struct proc *)arg;
525 	psignal(p, SIGALRM);
526 	if (!timerisset(&p->p_realtimer.it_interval)) {
527 		timerclear(&p->p_realtimer.it_value);
528 		return;
529 	}
530 	for (;;) {
531 		s = splclock();
532 		timeradd(&p->p_realtimer.it_value,
533 		    &p->p_realtimer.it_interval, &p->p_realtimer.it_value);
534 		if (timercmp(&p->p_realtimer.it_value, &time, >)) {
535 			timo = hzto(&p->p_realtimer.it_value);
536 			if (timo <= 0)
537 				timo = 1;
538 			timeout_add(&p->p_realit_to, timo);
539 			splx(s);
540 			return;
541 		}
542 		splx(s);
543 	}
544 }
545 
546 /*
547  * Check that a proposed value to load into the .it_value or
548  * .it_interval part of an interval timer is acceptable.
549  */
550 int
551 itimerfix(tv)
552 	struct timeval *tv;
553 {
554 
555 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
556 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
557 		return (EINVAL);
558 
559 	return (0);
560 }
561 
562 /*
563  * Timer interval smaller than the resolution of the system clock are
564  * rounded up.
565  */
566 void
567 itimerround(tv)
568 	struct timeval *tv;
569 {
570 	if (tv->tv_sec == 0 && tv->tv_usec < tick)
571 		tv->tv_usec = tick;
572 }
573 
574 /*
575  * Decrement an interval timer by a specified number
576  * of microseconds, which must be less than a second,
577  * i.e. < 1000000.  If the timer expires, then reload
578  * it.  In this case, carry over (usec - old value) to
579  * reduce the value reloaded into the timer so that
580  * the timer does not drift.  This routine assumes
581  * that it is called in a context where the timers
582  * on which it is operating cannot change in value.
583  */
584 int
585 itimerdecr(itp, usec)
586 	register struct itimerval *itp;
587 	int usec;
588 {
589 
590 	if (itp->it_value.tv_usec < usec) {
591 		if (itp->it_value.tv_sec == 0) {
592 			/* expired, and already in next interval */
593 			usec -= itp->it_value.tv_usec;
594 			goto expire;
595 		}
596 		itp->it_value.tv_usec += 1000000;
597 		itp->it_value.tv_sec--;
598 	}
599 	itp->it_value.tv_usec -= usec;
600 	usec = 0;
601 	if (timerisset(&itp->it_value))
602 		return (1);
603 	/* expired, exactly at end of interval */
604 expire:
605 	if (timerisset(&itp->it_interval)) {
606 		itp->it_value = itp->it_interval;
607 		itp->it_value.tv_usec -= usec;
608 		if (itp->it_value.tv_usec < 0) {
609 			itp->it_value.tv_usec += 1000000;
610 			itp->it_value.tv_sec--;
611 		}
612 	} else
613 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
614 	return (0);
615 }
616 
617 /*
618  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
619  * for usage and rationale.
620  */
621 int
622 ratecheck(lasttime, mininterval)
623 	struct timeval *lasttime;
624 	const struct timeval *mininterval;
625 {
626 	struct timeval tv, delta;
627 	int s, rv = 0;
628 
629 	s = splclock();
630 	tv = mono_time;
631 	splx(s);
632 
633 	timersub(&tv, lasttime, &delta);
634 
635 	/*
636 	 * check for 0,0 is so that the message will be seen at least once,
637 	 * even if interval is huge.
638 	 */
639 	if (timercmp(&delta, mininterval, >=) ||
640 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
641 		*lasttime = tv;
642 		rv = 1;
643 	}
644 
645 	return (rv);
646 }
647 
648 /*
649  * ppsratecheck(): packets (or events) per second limitation.
650  */
651 int
652 ppsratecheck(lasttime, curpps, maxpps)
653 	struct timeval *lasttime;
654 	int *curpps;
655 	int maxpps;	/* maximum pps allowed */
656 {
657 	struct timeval tv, delta;
658 	int s, rv;
659 
660 	s = splclock();
661 	tv = mono_time;
662 	splx(s);
663 
664 	timersub(&tv, lasttime, &delta);
665 
666 	/*
667 	 * check for 0,0 is so that the message will be seen at least once.
668 	 * if more than one second have passed since the last update of
669 	 * lasttime, reset the counter.
670 	 *
671 	 * we do increment *curpps even in *curpps < maxpps case, as some may
672 	 * try to use *curpps for stat purposes as well.
673 	 */
674 	if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
675 	    delta.tv_sec >= 1) {
676 		*lasttime = tv;
677 		*curpps = 0;
678 		rv = 1;
679 	} else if (maxpps < 0)
680 		rv = 1;
681 	else if (*curpps < maxpps)
682 		rv = 1;
683 	else
684 		rv = 0;
685 
686 #if 1 /*DIAGNOSTIC?*/
687 	/* be careful about wrap-around */
688 	if (*curpps + 1 > *curpps)
689 		*curpps = *curpps + 1;
690 #else
691 	/*
692 	 * assume that there's not too many calls to this function.
693 	 * not sure if the assumption holds, as it depends on *caller's*
694 	 * behavior, not the behavior of this function.
695 	 * IMHO it is wrong to make assumption on the caller's behavior,
696 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
697 	 */
698 	*curpps = *curpps + 1;
699 #endif
700 
701 	return (rv);
702 }
703