xref: /netbsd-src/external/bsd/ntp/dist/ntpd/ntp_refclock.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: ntp_refclock.c,v 1.13 2020/05/25 20:47:25 christos Exp $	*/
2 
3 /*
4  * ntp_refclock - processing support for reference clocks
5  */
6 #ifdef HAVE_CONFIG_H
7 # include <config.h>
8 #endif
9 
10 #include "ntpd.h"
11 #include "ntp_io.h"
12 #include "ntp_unixtime.h"
13 #include "ntp_tty.h"
14 #include "ntp_refclock.h"
15 #include "ntp_stdlib.h"
16 #include "ntp_assert.h"
17 #include "timespecops.h"
18 
19 #include <stdio.h>
20 
21 #ifdef HAVE_SYS_IOCTL_H
22 # include <sys/ioctl.h>
23 #endif /* HAVE_SYS_IOCTL_H */
24 
25 #ifdef REFCLOCK
26 
27 #ifdef KERNEL_PLL
28 #include "ntp_syscall.h"
29 #endif /* KERNEL_PLL */
30 
31 #ifdef HAVE_PPSAPI
32 #include "ppsapi_timepps.h"
33 #include "refclock_atom.h"
34 #endif /* HAVE_PPSAPI */
35 
36 /*
37  * Reference clock support is provided here by maintaining the fiction
38  * that the clock is actually a peer.  As no packets are exchanged with
39  * a reference clock, however, we replace the transmit, receive and
40  * packet procedures with separate code to simulate them.  Routines
41  * refclock_transmit() and refclock_receive() maintain the peer
42  * variables in a state analogous to an actual peer and pass reference
43  * clock data on through the filters.  Routines refclock_peer() and
44  * refclock_unpeer() are called to initialize and terminate reference
45  * clock associations.  A set of utility routines is included to open
46  * serial devices, process sample data, and to perform various debugging
47  * functions.
48  *
49  * The main interface used by these routines is the refclockproc
50  * structure, which contains for most drivers the decimal equivalants
51  * of the year, day, month, hour, second and millisecond/microsecond
52  * decoded from the ASCII timecode.  Additional information includes
53  * the receive timestamp, exception report, statistics tallies, etc.
54  * In addition, there may be a driver-specific unit structure used for
55  * local control of the device.
56  *
57  * The support routines are passed a pointer to the peer structure,
58  * which is used for all peer-specific processing and contains a
59  * pointer to the refclockproc structure, which in turn contains a
60  * pointer to the unit structure, if used.  The peer structure is
61  * identified by an interface address in the dotted quad form
62  * 127.127.t.u, where t is the clock type and u the unit.
63  */
64 #define FUDGEFAC	.1	/* fudge correction factor */
65 #define LF		0x0a	/* ASCII LF */
66 
67 int	cal_enable;		/* enable refclock calibrate */
68 
69 /*
70  * Forward declarations
71  */
72 static int  refclock_cmpl_fp (const void *, const void *);
73 static int  refclock_sample (struct refclockproc *);
74 static int  refclock_ioctl(int, u_int);
75 static void refclock_checkburst(struct peer *, struct refclockproc *);
76 
77 /* circular buffer functions
78  *
79  * circular buffer management comes in two flovours:
80  * for powers of two, and all others.
81  */
82 
83 #if MAXSTAGE & (MAXSTAGE - 1)
84 
85 static void clk_add_sample(
86 	struct refclockproc * const	pp,
87 	double				sv
88 	)
89 {
90 	pp->coderecv = (pp->coderecv + 1) % MAXSTAGE;
91 	if (pp->coderecv == pp->codeproc)
92 		pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
93 	pp->filter[pp->coderecv] = sv;
94 }
95 
96 static double clk_pop_sample(
97 	struct refclockproc * const	pp
98 	)
99 {
100 	if (pp->coderecv == pp->codeproc)
101 		return 0; /* Maybe a NaN would be better? */
102 	pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
103 	return pp->filter[pp->codeproc];
104 }
105 
106 static inline u_int clk_cnt_sample(
107 	struct refclockproc * const	pp
108 	)
109 {
110 	u_int retv = pp->coderecv - pp->codeproc;
111 	if (retv > MAXSTAGE)
112 		retv += MAXSTAGE;
113 	return retv;
114 }
115 
116 #else
117 
118 static inline void clk_add_sample(
119 	struct refclockproc * const	pp,
120 	double				sv
121 	)
122 {
123 	pp->coderecv  = (pp->coderecv + 1) & (MAXSTAGE - 1);
124 	if (pp->coderecv == pp->codeproc)
125 		pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
126 	pp->filter[pp->coderecv] = sv;
127 }
128 
129 static inline double clk_pop_sample(
130 	struct refclockproc * const	pp
131 	)
132 {
133 	if (pp->coderecv == pp->codeproc)
134 		return 0; /* Maybe a NaN would be better? */
135 	pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
136 	return pp->filter[pp->codeproc];
137 }
138 
139 static inline u_int clk_cnt_sample(
140 	struct refclockproc * const	pp
141 	)
142 {
143 	return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
144 }
145 
146 #endif
147 
148 /*
149  * refclock_report - note the occurance of an event
150  *
151  * This routine presently just remembers the report and logs it, but
152  * does nothing heroic for the trap handler. It tries to be a good
153  * citizen and bothers the system log only if things change.
154  */
155 void
156 refclock_report(
157 	struct peer *peer,
158 	int code
159 	)
160 {
161 	struct refclockproc *pp;
162 
163 	pp = peer->procptr;
164 	if (pp == NULL)
165 		return;
166 
167 	switch (code) {
168 
169 	case CEVNT_TIMEOUT:
170 		pp->noreply++;
171 		break;
172 
173 	case CEVNT_BADREPLY:
174 		pp->badformat++;
175 		break;
176 
177 	case CEVNT_FAULT:
178 		break;
179 
180 	case CEVNT_BADDATE:
181 	case CEVNT_BADTIME:
182 		pp->baddata++;
183 		break;
184 
185 	default:
186 		/* ignore others */
187 		break;
188 	}
189 	if ((code != CEVNT_NOMINAL) && (pp->lastevent < 15))
190 		pp->lastevent++;
191 	if (pp->currentstatus != code) {
192 		pp->currentstatus = (u_char)code;
193 		report_event(PEVNT_CLOCK, peer, ceventstr(code));
194 	}
195 }
196 
197 
198 /*
199  * init_refclock - initialize the reference clock drivers
200  *
201  * This routine calls each of the drivers in turn to initialize internal
202  * variables, if necessary. Most drivers have nothing to say at this
203  * point.
204  */
205 void
206 init_refclock(void)
207 {
208 	int i;
209 
210 	for (i = 0; i < (int)num_refclock_conf; i++)
211 		if (refclock_conf[i]->clock_init != noentry)
212 			(refclock_conf[i]->clock_init)();
213 }
214 
215 
216 /*
217  * refclock_newpeer - initialize and start a reference clock
218  *
219  * This routine allocates and initializes the interface structure which
220  * supports a reference clock in the form of an ordinary NTP peer. A
221  * driver-specific support routine completes the initialization, if
222  * used. Default peer variables which identify the clock and establish
223  * its reference ID and stratum are set here. It returns one if success
224  * and zero if the clock address is invalid or already running,
225  * insufficient resources are available or the driver declares a bum
226  * rap.
227  */
228 int
229 refclock_newpeer(
230 	struct peer *peer	/* peer structure pointer */
231 	)
232 {
233 	struct refclockproc *pp;
234 	u_char clktype;
235 	int unit;
236 
237 	/*
238 	 * Check for valid clock address. If already running, shut it
239 	 * down first.
240 	 */
241 	if (!ISREFCLOCKADR(&peer->srcadr)) {
242 		msyslog(LOG_ERR,
243 			"refclock_newpeer: clock address %s invalid",
244 			stoa(&peer->srcadr));
245 		return (0);
246 	}
247 	clktype = (u_char)REFCLOCKTYPE(&peer->srcadr);
248 	unit = REFCLOCKUNIT(&peer->srcadr);
249 	if (clktype >= num_refclock_conf ||
250 		refclock_conf[clktype]->clock_start == noentry) {
251 		msyslog(LOG_ERR,
252 			"refclock_newpeer: clock type %d invalid\n",
253 			clktype);
254 		return (0);
255 	}
256 
257 	/*
258 	 * Allocate and initialize interface structure
259 	 */
260 	pp = emalloc_zero(sizeof(*pp));
261 	peer->procptr = pp;
262 
263 	/*
264 	 * Initialize structures
265 	 */
266 	peer->refclktype = clktype;
267 	peer->refclkunit = (u_char)unit;
268 	peer->flags |= FLAG_REFCLOCK;
269 	peer->leap = LEAP_NOTINSYNC;
270 	peer->stratum = STRATUM_REFCLOCK;
271 	peer->ppoll = peer->maxpoll;
272 	pp->type = clktype;
273 	pp->conf = refclock_conf[clktype];
274 	pp->timestarted = current_time;
275 	pp->io.fd = -1;
276 
277 	/*
278 	 * Set peer.pmode based on the hmode. For appearances only.
279 	 */
280 	switch (peer->hmode) {
281 	case MODE_ACTIVE:
282 		peer->pmode = MODE_PASSIVE;
283 		break;
284 
285 	default:
286 		peer->pmode = MODE_SERVER;
287 		break;
288 	}
289 
290 	/*
291 	 * Do driver dependent initialization. The above defaults
292 	 * can be wiggled, then finish up for consistency.
293 	 */
294 	if (!((refclock_conf[clktype]->clock_start)(unit, peer))) {
295 		refclock_unpeer(peer);
296 		return (0);
297 	}
298 	peer->refid = pp->refid;
299 	return (1);
300 }
301 
302 
303 /*
304  * refclock_unpeer - shut down a clock
305  */
306 void
307 refclock_unpeer(
308 	struct peer *peer	/* peer structure pointer */
309 	)
310 {
311 	u_char clktype;
312 	int unit;
313 
314 	/*
315 	 * Wiggle the driver to release its resources, then give back
316 	 * the interface structure.
317 	 */
318 	if (NULL == peer->procptr)
319 		return;
320 
321 	clktype = peer->refclktype;
322 	unit = peer->refclkunit;
323 	if (refclock_conf[clktype]->clock_shutdown != noentry)
324 		(refclock_conf[clktype]->clock_shutdown)(unit, peer);
325 	free(peer->procptr);
326 	peer->procptr = NULL;
327 }
328 
329 
330 /*
331  * refclock_timer - called once per second for housekeeping.
332  */
333 void
334 refclock_timer(
335 	struct peer *p
336 	)
337 {
338 	struct refclockproc *	pp;
339 	int			unit;
340 
341 	unit = p->refclkunit;
342 	pp = p->procptr;
343 	if (pp->conf->clock_timer != noentry)
344 		(*pp->conf->clock_timer)(unit, p);
345 	if (pp->action != NULL && pp->nextaction <= current_time)
346 		(*pp->action)(p);
347 }
348 
349 
350 /*
351  * refclock_transmit - simulate the transmit procedure
352  *
353  * This routine implements the NTP transmit procedure for a reference
354  * clock. This provides a mechanism to call the driver at the NTP poll
355  * interval, as well as provides a reachability mechanism to detect a
356  * broken radio or other madness.
357  */
358 void
359 refclock_transmit(
360 	struct peer *peer	/* peer structure pointer */
361 	)
362 {
363 	u_char clktype;
364 	int unit;
365 
366 	clktype = peer->refclktype;
367 	unit = peer->refclkunit;
368 	peer->sent++;
369 	get_systime(&peer->xmt);
370 
371 	/*
372 	 * This is a ripoff of the peer transmit routine, but
373 	 * specialized for reference clocks. We do a little less
374 	 * protocol here and call the driver-specific transmit routine.
375 	 */
376 	if (peer->burst == 0) {
377 		u_char oreach;
378 #ifdef DEBUG
379 		if (debug)
380 			printf("refclock_transmit: at %ld %s\n",
381 			    current_time, stoa(&(peer->srcadr)));
382 #endif
383 
384 		/*
385 		 * Update reachability and poll variables like the
386 		 * network code.
387 		 */
388 		oreach = peer->reach & 0xfe;
389 		peer->reach <<= 1;
390 		if (!(peer->reach & 0x0f))
391 			clock_filter(peer, 0., 0., MAXDISPERSE);
392 		peer->outdate = current_time;
393 		if (!peer->reach) {
394 			if (oreach) {
395 				report_event(PEVNT_UNREACH, peer, NULL);
396 				peer->timereachable = current_time;
397 			}
398 		} else {
399 			if (peer->flags & FLAG_BURST)
400 				peer->burst = NSTAGE;
401 		}
402 	} else {
403 		peer->burst--;
404 	}
405 	peer->procptr->inpoll = TRUE;
406 	if (refclock_conf[clktype]->clock_poll != noentry)
407 		(refclock_conf[clktype]->clock_poll)(unit, peer);
408 	poll_update(peer, peer->hpoll, 0);
409 }
410 
411 
412 /*
413  * Compare two doubles - used with qsort()
414  */
415 static int
416 refclock_cmpl_fp(
417 	const void *p1,
418 	const void *p2
419 	)
420 {
421 	const double *dp1 = (const double *)p1;
422 	const double *dp2 = (const double *)p2;
423 
424 	if (*dp1 < *dp2)
425 		return -1;
426 	if (*dp1 > *dp2)
427 		return 1;
428 	return 0;
429 }
430 
431 /*
432  * Get number of available samples
433  */
434 int
435 refclock_samples_avail(
436 	struct refclockproc const * pp
437 	)
438 {
439 	u_int	na;
440 
441 #   if MAXSTAGE & (MAXSTAGE - 1)
442 
443 	na = pp->coderecv - pp->codeproc;
444 	if (na > MAXSTAGE)
445 		na += MAXSTAGE;
446 
447 #   else
448 
449 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
450 
451 #   endif
452 	return na;
453 }
454 
455 /*
456  * Expire (remove) samples from the tail (oldest samples removed)
457  *
458  * Returns number of samples deleted
459  */
460 int
461 refclock_samples_expire(
462 	struct refclockproc * pp,
463 	int                   nd
464 	)
465 {
466 	u_int	na;
467 
468 	if (nd <= 0)
469 		return 0;
470 
471 #   if MAXSTAGE & (MAXSTAGE - 1)
472 
473 	na = pp->coderecv - pp->codeproc;
474 	if (na > MAXSTAGE)
475 		na += MAXSTAGE;
476 	if ((u_int)nd < na)
477 		nd = na;
478 	pp->codeproc = (pp->codeproc + nd) % MAXSTAGE;
479 
480 #   else
481 
482 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
483 	if ((u_int)nd > na)
484 		nd = (int)na;
485 	pp->codeproc = (pp->codeproc + nd) & (MAXSTAGE - 1);
486 
487 #   endif
488 	return nd;
489 }
490 
491 /*
492  * refclock_process_offset - update median filter
493  *
494  * This routine uses the given offset and timestamps to construct a new
495  * entry in the median filter circular buffer. Samples that overflow the
496  * filter are quietly discarded.
497  */
498 void
499 refclock_process_offset(
500 	struct refclockproc *pp,	/* refclock structure pointer */
501 	l_fp lasttim,			/* last timecode timestamp */
502 	l_fp lastrec,			/* last receive timestamp */
503 	double fudge
504 	)
505 {
506 	l_fp lftemp;
507 	double doffset;
508 
509 	pp->lastrec = lastrec;
510 	lftemp = lasttim;
511 	L_SUB(&lftemp, &lastrec);
512 	LFPTOD(&lftemp, doffset);
513 	clk_add_sample(pp, doffset + fudge);
514 	refclock_checkburst(pp->io.srcclock, pp);
515 }
516 
517 
518 /*
519  * refclock_process - process a sample from the clock
520  * refclock_process_f - refclock_process with other than time1 fudge
521  *
522  * This routine converts the timecode in the form days, hours, minutes,
523  * seconds and milliseconds/microseconds to internal timestamp format,
524  * then constructs a new entry in the median filter circular buffer.
525  * Return success (1) if the data are correct and consistent with the
526  * conventional calendar.
527  *
528  * Important for PPS users: Normally, the pp->lastrec is set to the
529  * system time when the on-time character is received and the pp->year,
530  * ..., pp->second decoded and the seconds fraction pp->nsec in
531  * nanoseconds). When a PPS offset is available, pp->nsec is forced to
532  * zero and the fraction for pp->lastrec is set to the PPS offset.
533  */
534 int
535 refclock_process_f(
536 	struct refclockproc *pp,	/* refclock structure pointer */
537 	double fudge
538 	)
539 {
540 	l_fp offset, ltemp;
541 
542 	/*
543 	 * Compute the timecode timestamp from the days, hours, minutes,
544 	 * seconds and milliseconds/microseconds of the timecode. Use
545 	 * clocktime() for the aggregate seconds and the msec/usec for
546 	 * the fraction, when present. Note that this code relies on the
547 	 * file system time for the years and does not use the years of
548 	 * the timecode.
549 	 */
550 	if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT,
551 		pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui))
552 		return (0);
553 
554 	offset.l_uf = 0;
555 	DTOLFP(pp->nsec / 1e9, &ltemp);
556 	L_ADD(&offset, &ltemp);
557 	refclock_process_offset(pp, offset, pp->lastrec, fudge);
558 	return (1);
559 }
560 
561 
562 int
563 refclock_process(
564 	struct refclockproc *pp		/* refclock structure pointer */
565 )
566 {
567 	return refclock_process_f(pp, pp->fudgetime1);
568 }
569 
570 
571 /*
572  * refclock_sample - process a pile of samples from the clock
573  *
574  * This routine implements a recursive median filter to suppress spikes
575  * in the data, as well as determine a performance statistic. It
576  * calculates the mean offset and RMS jitter. A time adjustment
577  * fudgetime1 can be added to the final offset to compensate for various
578  * systematic errors. The routine returns the number of samples
579  * processed, which could be zero.
580  */
581 static int
582 refclock_sample(
583 	struct refclockproc *pp		/* refclock structure pointer */
584 	)
585 {
586 	size_t	i, j, k, m, n;
587 	double	off[MAXSTAGE];
588 	double	offset;
589 
590 	/*
591 	 * Copy the raw offsets and sort into ascending order. Don't do
592 	 * anything if the buffer is empty.
593 	 */
594 	n = 0;
595 	while (pp->codeproc != pp->coderecv)
596 		off[n++] = clk_pop_sample(pp);
597 	if (n == 0)
598 		return (0);
599 
600 	if (n > 1)
601 		qsort(off, n, sizeof(off[0]), refclock_cmpl_fp);
602 
603 	/*
604 	 * Reject the furthest from the median of the samples until
605 	 * approximately 60 percent of the samples remain.
606 	 */
607 	i = 0; j = n;
608 	m = n - (n * 4) / 10;
609 	while ((j - i) > m) {
610 		offset = off[(j + i) / 2];
611 		if (off[j - 1] - offset < offset - off[i])
612 			i++;	/* reject low end */
613 		else
614 			j--;	/* reject high end */
615 	}
616 
617 	/*
618 	 * Determine the offset and jitter.
619 	 */
620 	pp->offset = 0;
621 	pp->jitter = 0;
622 	for (k = i; k < j; k++) {
623 		pp->offset += off[k];
624 		if (k > i)
625 			pp->jitter += SQUARE(off[k] - off[k - 1]);
626 	}
627 	pp->offset /= m;
628 	pp->jitter = max(SQRT(pp->jitter / m), LOGTOD(sys_precision));
629 
630 	/*
631 	 * If the source has a jitter that cannot be estimated, because
632 	 * it is not statistic jitter, the source will be detected as
633 	 * falseticker sooner or later.  Enforcing a minimal jitter value
634 	 * avoids a too low estimation while still detecting higher jitter.
635 	 *
636 	 * Note that this changes the refclock samples and ends up in the
637 	 * clock dispersion, not the clock jitter, despite being called
638 	 * jitter.  To see the modified values, check the NTP clock variable
639 	 * "filtdisp", not "jitter".
640 	 */
641 	pp->jitter = max(pp->jitter, pp->fudgeminjitter);
642 
643 #ifdef DEBUG
644 	if (debug)
645 		printf(
646 		    "refclock_sample: n %d offset %.6f disp %.6f jitter %.6f\n",
647 		    (int)n, pp->offset, pp->disp, pp->jitter);
648 #endif
649 	return (int)n;
650 }
651 
652 
653 /*
654  * refclock_receive - simulate the receive and packet procedures
655  *
656  * This routine simulates the NTP receive and packet procedures for a
657  * reference clock. This provides a mechanism in which the ordinary NTP
658  * filter, selection and combining algorithms can be used to suppress
659  * misbehaving radios and to mitigate between them when more than one is
660  * available for backup.
661  */
662 void
663 refclock_receive(
664 	struct peer *peer	/* peer structure pointer */
665 	)
666 {
667 	struct refclockproc *pp;
668 
669 #ifdef DEBUG
670 	if (debug)
671 		printf("refclock_receive: at %lu %s\n",
672 		    current_time, stoa(&peer->srcadr));
673 #endif
674 
675 	/*
676 	 * Do a little sanity dance and update the peer structure. Groom
677 	 * the median filter samples and give the data to the clock
678 	 * filter.
679 	 */
680 	pp = peer->procptr;
681 	pp->inpoll = FALSE;
682 	peer->leap = pp->leap;
683 	if (peer->leap == LEAP_NOTINSYNC)
684 		return;
685 
686 	peer->received++;
687 	peer->timereceived = current_time;
688 	if (!peer->reach) {
689 		report_event(PEVNT_REACH, peer, NULL);
690 		peer->timereachable = current_time;
691 	}
692 	peer->reach = (peer->reach << (peer->reach & 1)) | 1;
693 	peer->reftime = pp->lastref;
694 	peer->aorg = pp->lastrec;
695 	peer->rootdisp = pp->disp;
696 	get_systime(&peer->dst);
697 	if (!refclock_sample(pp))
698 		return;
699 
700 	clock_filter(peer, pp->offset, 0., pp->jitter);
701 	if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer !=
702 	    NULL) {
703 		if (sys_peer->refclktype == REFCLK_ATOM_PPS &&
704 		    peer->refclktype != REFCLK_ATOM_PPS)
705 			pp->fudgetime1 -= pp->offset * FUDGEFAC;
706 	}
707 }
708 
709 
710 /*
711  * refclock_gtlin - groom next input line and extract timestamp
712  *
713  * This routine processes the timecode received from the clock and
714  * strips the parity bit and control characters. It returns the number
715  * of characters in the line followed by a NULL character ('\0'), which
716  * is not included in the count. In case of an empty line, the previous
717  * line is preserved.
718  */
719 int
720 refclock_gtlin(
721 	struct recvbuf *rbufp,	/* receive buffer pointer */
722 	char	*lineptr,	/* current line pointer */
723 	int	bmax,		/* remaining characters in line */
724 	l_fp	*tsptr		/* pointer to timestamp returned */
725 	)
726 {
727 	const char *sp, *spend;
728 	char	   *dp, *dpend;
729 	int         dlen;
730 
731 	if (bmax <= 0)
732 		return (0);
733 
734 	dp    = lineptr;
735 	dpend = dp + bmax - 1; /* leave room for NUL pad */
736 	sp    = (const char *)rbufp->recv_buffer;
737 	spend = sp + rbufp->recv_length;
738 
739 	while (sp != spend && dp != dpend) {
740 		char c;
741 
742 		c = *sp++ & 0x7f;
743 		if (c >= 0x20 && c < 0x7f)
744 			*dp++ = c;
745 	}
746 	/* Get length of data written to the destination buffer. If
747 	 * zero, do *not* place a NUL byte to preserve the previous
748 	 * buffer content.
749 	 */
750 	dlen = dp - lineptr;
751 	if (dlen)
752 	    *dp  = '\0';
753 	*tsptr = rbufp->recv_time;
754 	DPRINTF(2, ("refclock_gtlin: fd %d time %s timecode %d %s\n",
755 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), dlen,
756 		    (dlen != 0)
757 			? lineptr
758 			: ""));
759 	return (dlen);
760 }
761 
762 
763 /*
764  * refclock_gtraw - get next line/chunk of data
765  *
766  * This routine returns the raw data received from the clock in both
767  * canonical or raw modes. The terminal interface routines map CR to LF.
768  * In canonical mode this results in two lines, one containing data
769  * followed by LF and another containing only LF. In raw mode the
770  * interface routines can deliver arbitraty chunks of data from one
771  * character to a maximum specified by the calling routine. In either
772  * mode the routine returns the number of characters in the line
773  * followed by a NULL character ('\0'), which is not included in the
774  * count.
775  *
776  * *tsptr receives a copy of the buffer timestamp.
777  */
778 int
779 refclock_gtraw(
780 	struct recvbuf *rbufp,	/* receive buffer pointer */
781 	char	*lineptr,	/* current line pointer */
782 	int	bmax,		/* remaining characters in line */
783 	l_fp	*tsptr		/* pointer to timestamp returned */
784 	)
785 {
786 	if (bmax <= 0)
787 		return (0);
788 	bmax -= 1; /* leave room for trailing NUL */
789 	if (bmax > rbufp->recv_length)
790 		bmax = rbufp->recv_length;
791 	memcpy(lineptr, rbufp->recv_buffer, bmax);
792 	lineptr[bmax] = '\0';
793 
794 	*tsptr = rbufp->recv_time;
795 	DPRINTF(2, ("refclock_gtraw: fd %d time %s timecode %d %s\n",
796 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), bmax,
797 		    lineptr));
798 	return (bmax);
799 }
800 
801 
802 /*
803  * indicate_refclock_packet()
804  *
805  * Passes a fragment of refclock input read from the device to the
806  * driver direct input routine, which may consume it (batch it for
807  * queuing once a logical unit is assembled).  If it is not so
808  * consumed, queue it for the driver's receive entrypoint.
809  *
810  * The return value is TRUE if the data has been consumed as a fragment
811  * and should not be counted as a received packet.
812  */
813 int
814 indicate_refclock_packet(
815 	struct refclockio *	rio,
816 	struct recvbuf *	rb
817 	)
818 {
819 	/* Does this refclock use direct input routine? */
820 	if (rio->io_input != NULL && (*rio->io_input)(rb) == 0) {
821 		/*
822 		 * data was consumed - nothing to pass up
823 		 * into block input machine
824 		 */
825 		freerecvbuf(rb);
826 
827 		return TRUE;
828 	}
829 	add_full_recv_buffer(rb);
830 
831 	return FALSE;
832 }
833 
834 
835 /*
836  * process_refclock_packet()
837  *
838  * Used for deferred processing of 'io_input' on systems where threading
839  * is used (notably Windows). This is acting as a trampoline to make the
840  * real calls to the refclock functions.
841  */
842 #ifdef HAVE_IO_COMPLETION_PORT
843 void
844 process_refclock_packet(
845 	struct recvbuf * rb
846 	)
847 {
848 	struct refclockio * rio;
849 
850 	/* get the refclockio structure from the receive buffer */
851 	rio  = &rb->recv_peer->procptr->io;
852 
853 	/* call 'clock_recv' if either there is no input function or the
854 	 * raw input function tells us to feed the packet to the
855 	 * receiver.
856 	 */
857 	if (rio->io_input == NULL || (*rio->io_input)(rb) != 0) {
858 		rio->recvcount++;
859 		packets_received++;
860 		handler_pkts++;
861 		(*rio->clock_recv)(rb);
862 	}
863 }
864 #endif	/* HAVE_IO_COMPLETION_PORT */
865 
866 
867 /*
868  * The following code does not apply to WINNT & VMS ...
869  */
870 #if !defined(SYS_VXWORKS) && !defined(SYS_WINNT)
871 #if defined(HAVE_TERMIOS) || defined(HAVE_SYSV_TTYS) || defined(HAVE_BSD_TTYS)
872 
873 /*
874  * refclock_open - open serial port for reference clock
875  *
876  * This routine opens a serial port for I/O and sets default options. It
877  * returns the file descriptor if successful, or logs an error and
878  * returns -1.
879  */
880 int
881 refclock_open(
882 	const char	*dev,	/* device name pointer */
883 	u_int		speed,	/* serial port speed (code) */
884 	u_int		lflags	/* line discipline flags */
885 	)
886 {
887 	int	fd;
888 	int	omode;
889 #ifdef O_NONBLOCK
890 	char	trash[128];	/* litter bin for old input data */
891 #endif
892 
893 	/*
894 	 * Open serial port and set default options
895 	 */
896 	omode = O_RDWR;
897 #ifdef O_NONBLOCK
898 	omode |= O_NONBLOCK;
899 #endif
900 #ifdef O_NOCTTY
901 	omode |= O_NOCTTY;
902 #endif
903 
904 	fd = open(dev, omode, 0777);
905 	/* refclock_open() long returned 0 on failure, avoid it. */
906 	if (0 == fd) {
907 		fd = dup(0);
908 		SAVE_ERRNO(
909 			close(0);
910 		)
911 	}
912 	if (fd < 0) {
913 		SAVE_ERRNO(
914 			msyslog(LOG_ERR, "refclock_open %s: %m", dev);
915 		)
916 		return -1;
917 	}
918 	if (!refclock_setup(fd, speed, lflags)) {
919 		close(fd);
920 		return -1;
921 	}
922 	if (!refclock_ioctl(fd, lflags)) {
923 		close(fd);
924 		return -1;
925 	}
926 #ifdef O_NONBLOCK
927 	/*
928 	 * We want to make sure there is no pending trash in the input
929 	 * buffer. Since we have non-blocking IO available, this is a
930 	 * good moment to read and dump all available outdated stuff
931 	 * that might have become toxic for the driver.
932 	 */
933 	while (read(fd, trash, sizeof(trash)) > 0 || errno == EINTR)
934 		/*NOP*/;
935 #endif
936 	return fd;
937 }
938 
939 
940 /*
941  * refclock_setup - initialize terminal interface structure
942  */
943 int
944 refclock_setup(
945 	int	fd,		/* file descriptor */
946 	u_int	speed,		/* serial port speed (code) */
947 	u_int	lflags		/* line discipline flags */
948 	)
949 {
950 	int	i;
951 	TTY	ttyb, *ttyp;
952 
953 	/*
954 	 * By default, the serial line port is initialized in canonical
955 	 * (line-oriented) mode at specified line speed, 8 bits and no
956 	 * parity. LF ends the line and CR is mapped to LF. The break,
957 	 * erase and kill functions are disabled. There is a different
958 	 * section for each terminal interface, as selected at compile
959 	 * time. The flag bits can be used to set raw mode and echo.
960 	 */
961 	ttyp = &ttyb;
962 #ifdef HAVE_TERMIOS
963 
964 	/*
965 	 * POSIX serial line parameters (termios interface)
966 	 */
967 	if (tcgetattr(fd, ttyp) < 0) {
968 		SAVE_ERRNO(
969 			msyslog(LOG_ERR,
970 				"refclock_setup fd %d tcgetattr: %m",
971 				fd);
972 		)
973 		return FALSE;
974 	}
975 
976 	/*
977 	 * Set canonical mode and local connection; set specified speed,
978 	 * 8 bits and no parity; map CR to NL; ignore break.
979 	 */
980 	if (speed) {
981 		u_int	ltemp = 0;
982 
983 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
984 		ttyp->c_oflag = 0;
985 		ttyp->c_cflag = CS8 | CLOCAL | CREAD;
986 		if (lflags & LDISC_7O1) {
987 			/* HP Z3801A needs 7-bit, odd parity */
988 			ttyp->c_cflag = CS7 | PARENB | PARODD | CLOCAL | CREAD;
989 		}
990 		cfsetispeed(&ttyb, speed);
991 		cfsetospeed(&ttyb, speed);
992 		for (i = 0; i < NCCS; ++i)
993 			ttyp->c_cc[i] = '\0';
994 
995 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
996 
997 		/*
998 		 * If we have modem control, check to see if modem leads
999 		 * are active; if so, set remote connection. This is
1000 		 * necessary for the kernel pps mods to work.
1001 		 */
1002 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1003 			msyslog(LOG_ERR,
1004 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1005 #ifdef DEBUG
1006 		if (debug)
1007 			printf("refclock_setup fd %d modem status: 0x%x\n",
1008 			    fd, ltemp);
1009 #endif
1010 		if (ltemp & TIOCM_DSR && lflags & LDISC_REMOTE)
1011 			ttyp->c_cflag &= ~CLOCAL;
1012 #endif /* TIOCMGET */
1013 	}
1014 
1015 	/*
1016 	 * Set raw and echo modes. These can be changed on-fly.
1017 	 */
1018 	ttyp->c_lflag = ICANON;
1019 	if (lflags & LDISC_RAW) {
1020 		ttyp->c_lflag = 0;
1021 		ttyp->c_iflag = 0;
1022 		ttyp->c_cc[VMIN] = 1;
1023 	}
1024 	if (lflags & LDISC_ECHO)
1025 		ttyp->c_lflag |= ECHO;
1026 	if (tcsetattr(fd, TCSANOW, ttyp) < 0) {
1027 		SAVE_ERRNO(
1028 			msyslog(LOG_ERR,
1029 				"refclock_setup fd %d TCSANOW: %m",
1030 				fd);
1031 		)
1032 		return FALSE;
1033 	}
1034 
1035 	/*
1036 	 * flush input and output buffers to discard any outdated stuff
1037 	 * that might have become toxic for the driver. Failing to do so
1038 	 * is logged, but we keep our fingers crossed otherwise.
1039 	 */
1040 	if (tcflush(fd, TCIOFLUSH) < 0)
1041 		msyslog(LOG_ERR, "refclock_setup fd %d tcflush(): %m",
1042 			fd);
1043 #endif /* HAVE_TERMIOS */
1044 
1045 #ifdef HAVE_SYSV_TTYS
1046 
1047 	/*
1048 	 * System V serial line parameters (termio interface)
1049 	 *
1050 	 */
1051 	if (ioctl(fd, TCGETA, ttyp) < 0) {
1052 		SAVE_ERRNO(
1053 			msyslog(LOG_ERR,
1054 				"refclock_setup fd %d TCGETA: %m",
1055 				fd);
1056 		)
1057 		return FALSE;
1058 	}
1059 
1060 	/*
1061 	 * Set canonical mode and local connection; set specified speed,
1062 	 * 8 bits and no parity; map CR to NL; ignore break.
1063 	 */
1064 	if (speed) {
1065 		u_int	ltemp = 0;
1066 
1067 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1068 		ttyp->c_oflag = 0;
1069 		ttyp->c_cflag = speed | CS8 | CLOCAL | CREAD;
1070 		for (i = 0; i < NCCS; ++i)
1071 			ttyp->c_cc[i] = '\0';
1072 
1073 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1074 
1075 		/*
1076 		 * If we have modem control, check to see if modem leads
1077 		 * are active; if so, set remote connection. This is
1078 		 * necessary for the kernel pps mods to work.
1079 		 */
1080 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1081 			msyslog(LOG_ERR,
1082 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1083 #ifdef DEBUG
1084 		if (debug)
1085 			printf("refclock_setup fd %d modem status: %x\n",
1086 			    fd, ltemp);
1087 #endif
1088 		if (ltemp & TIOCM_DSR)
1089 			ttyp->c_cflag &= ~CLOCAL;
1090 #endif /* TIOCMGET */
1091 	}
1092 
1093 	/*
1094 	 * Set raw and echo modes. These can be changed on-fly.
1095 	 */
1096 	ttyp->c_lflag = ICANON;
1097 	if (lflags & LDISC_RAW) {
1098 		ttyp->c_lflag = 0;
1099 		ttyp->c_iflag = 0;
1100 		ttyp->c_cc[VMIN] = 1;
1101 	}
1102 	if (ioctl(fd, TCSETA, ttyp) < 0) {
1103 		SAVE_ERRNO(
1104 			msyslog(LOG_ERR,
1105 				"refclock_setup fd %d TCSETA: %m", fd);
1106 		)
1107 		return FALSE;
1108 	}
1109 #endif /* HAVE_SYSV_TTYS */
1110 
1111 #ifdef HAVE_BSD_TTYS
1112 
1113 	/*
1114 	 * 4.3bsd serial line parameters (sgttyb interface)
1115 	 */
1116 	if (ioctl(fd, TIOCGETP, (char *)ttyp) < 0) {
1117 		SAVE_ERRNO(
1118 			msyslog(LOG_ERR,
1119 				"refclock_setup fd %d TIOCGETP: %m",
1120 				fd);
1121 		)
1122 		return FALSE;
1123 	}
1124 	if (speed)
1125 		ttyp->sg_ispeed = ttyp->sg_ospeed = speed;
1126 	ttyp->sg_flags = EVENP | ODDP | CRMOD;
1127 	if (ioctl(fd, TIOCSETP, (char *)ttyp) < 0) {
1128 		SAVE_ERRNO(
1129 			msyslog(LOG_ERR, "refclock_setup TIOCSETP: %m");
1130 		)
1131 		return FALSE;
1132 	}
1133 #endif /* HAVE_BSD_TTYS */
1134 	return(1);
1135 }
1136 #endif /* HAVE_TERMIOS || HAVE_SYSV_TTYS || HAVE_BSD_TTYS */
1137 
1138 
1139 /*
1140  * refclock_ioctl - set serial port control functions
1141  *
1142  * This routine attempts to hide the internal, system-specific details
1143  * of serial ports. It can handle POSIX (termios), SYSV (termio) and BSD
1144  * (sgtty) interfaces with varying degrees of success. The routine sets
1145  * up optional features such as tty_clk. The routine returns TRUE if
1146  * successful.
1147  */
1148 int
1149 refclock_ioctl(
1150 	int	fd, 		/* file descriptor */
1151 	u_int	lflags		/* line discipline flags */
1152 	)
1153 {
1154 	/*
1155 	 * simply return TRUE if no UNIX line discipline is supported
1156 	 */
1157 	DPRINTF(1, ("refclock_ioctl: fd %d flags 0x%x\n", fd, lflags));
1158 
1159 	return TRUE;
1160 }
1161 #endif /* !defined(SYS_VXWORKS) && !defined(SYS_WINNT) */
1162 
1163 
1164 /*
1165  * refclock_control - set and/or return clock values
1166  *
1167  * This routine is used mainly for debugging. It returns designated
1168  * values from the interface structure that can be displayed using
1169  * ntpdc and the clockstat command. It can also be used to initialize
1170  * configuration variables, such as fudgetimes, fudgevalues, reference
1171  * ID and stratum.
1172  */
1173 void
1174 refclock_control(
1175 	sockaddr_u *srcadr,
1176 	const struct refclockstat *in,
1177 	struct refclockstat *out
1178 	)
1179 {
1180 	struct peer *peer;
1181 	struct refclockproc *pp;
1182 	u_char clktype;
1183 	int unit;
1184 
1185 	/*
1186 	 * Check for valid address and running peer
1187 	 */
1188 	if (!ISREFCLOCKADR(srcadr))
1189 		return;
1190 
1191 	clktype = (u_char)REFCLOCKTYPE(srcadr);
1192 	unit = REFCLOCKUNIT(srcadr);
1193 
1194 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1195 
1196 	if (NULL == peer)
1197 		return;
1198 
1199 	INSIST(peer->procptr != NULL);
1200 	pp = peer->procptr;
1201 
1202 	/*
1203 	 * Initialize requested data
1204 	 */
1205 	if (in != NULL) {
1206 		if (in->haveflags & CLK_HAVETIME1)
1207 			pp->fudgetime1 = in->fudgetime1;
1208 		if (in->haveflags & CLK_HAVETIME2)
1209 			pp->fudgetime2 = in->fudgetime2;
1210 		if (in->haveflags & CLK_HAVEVAL1)
1211 			peer->stratum = pp->stratum = (u_char)in->fudgeval1;
1212 		if (in->haveflags & CLK_HAVEVAL2)
1213 			peer->refid = pp->refid = in->fudgeval2;
1214 		if (in->haveflags & CLK_HAVEFLAG1) {
1215 			pp->sloppyclockflag &= ~CLK_FLAG1;
1216 			pp->sloppyclockflag |= in->flags & CLK_FLAG1;
1217 		}
1218 		if (in->haveflags & CLK_HAVEFLAG2) {
1219 			pp->sloppyclockflag &= ~CLK_FLAG2;
1220 			pp->sloppyclockflag |= in->flags & CLK_FLAG2;
1221 		}
1222 		if (in->haveflags & CLK_HAVEFLAG3) {
1223 			pp->sloppyclockflag &= ~CLK_FLAG3;
1224 			pp->sloppyclockflag |= in->flags & CLK_FLAG3;
1225 		}
1226 		if (in->haveflags & CLK_HAVEFLAG4) {
1227 			pp->sloppyclockflag &= ~CLK_FLAG4;
1228 			pp->sloppyclockflag |= in->flags & CLK_FLAG4;
1229 		}
1230 		if (in->haveflags & CLK_HAVEMINJIT)
1231 			pp->fudgeminjitter = in->fudgeminjitter;
1232 	}
1233 
1234 	/*
1235 	 * Readback requested data
1236 	 */
1237 	if (out != NULL) {
1238 		out->fudgeval1 = pp->stratum;
1239 		out->fudgeval2 = pp->refid;
1240 		out->haveflags = CLK_HAVEVAL1 | CLK_HAVEVAL2;
1241 		out->fudgetime1 = pp->fudgetime1;
1242 		if (0.0 != out->fudgetime1)
1243 			out->haveflags |= CLK_HAVETIME1;
1244 		out->fudgetime2 = pp->fudgetime2;
1245 		if (0.0 != out->fudgetime2)
1246 			out->haveflags |= CLK_HAVETIME2;
1247 		out->flags = (u_char) pp->sloppyclockflag;
1248 		if (CLK_FLAG1 & out->flags)
1249 			out->haveflags |= CLK_HAVEFLAG1;
1250 		if (CLK_FLAG2 & out->flags)
1251 			out->haveflags |= CLK_HAVEFLAG2;
1252 		if (CLK_FLAG3 & out->flags)
1253 			out->haveflags |= CLK_HAVEFLAG3;
1254 		if (CLK_FLAG4 & out->flags)
1255 			out->haveflags |= CLK_HAVEFLAG4;
1256 		out->fudgeminjitter = pp->fudgeminjitter;
1257 		if (0.0 != out->fudgeminjitter)
1258 			out->haveflags |= CLK_HAVEMINJIT;
1259 
1260 		out->timereset = current_time - pp->timestarted;
1261 		out->polls = pp->polls;
1262 		out->noresponse = pp->noreply;
1263 		out->badformat = pp->badformat;
1264 		out->baddata = pp->baddata;
1265 
1266 		out->lastevent = pp->lastevent;
1267 		out->currentstatus = pp->currentstatus;
1268 		out->type = pp->type;
1269 		out->clockdesc = pp->clockdesc;
1270 		out->lencode = (u_short)pp->lencode;
1271 		out->p_lastcode = pp->a_lastcode;
1272 	}
1273 
1274 	/*
1275 	 * Give the stuff to the clock
1276 	 */
1277 	if (refclock_conf[clktype]->clock_control != noentry)
1278 		(refclock_conf[clktype]->clock_control)(unit, in, out, peer);
1279 }
1280 
1281 
1282 /*
1283  * refclock_buginfo - return debugging info
1284  *
1285  * This routine is used mainly for debugging. It returns designated
1286  * values from the interface structure that can be displayed using
1287  * ntpdc and the clkbug command.
1288  */
1289 void
1290 refclock_buginfo(
1291 	sockaddr_u *srcadr,	/* clock address */
1292 	struct refclockbug *bug /* output structure */
1293 	)
1294 {
1295 	struct peer *peer;
1296 	struct refclockproc *pp;
1297 	int clktype;
1298 	int unit;
1299 	unsigned u;
1300 
1301 	/*
1302 	 * Check for valid address and peer structure
1303 	 */
1304 	if (!ISREFCLOCKADR(srcadr))
1305 		return;
1306 
1307 	clktype = (u_char) REFCLOCKTYPE(srcadr);
1308 	unit = REFCLOCKUNIT(srcadr);
1309 
1310 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1311 
1312 	if (NULL == peer || NULL == peer->procptr)
1313 		return;
1314 
1315 	pp = peer->procptr;
1316 
1317 	/*
1318 	 * Copy structure values
1319 	 */
1320 	bug->nvalues = 8;
1321 	bug->svalues = 0x0000003f;
1322 	bug->values[0] = pp->year;
1323 	bug->values[1] = pp->day;
1324 	bug->values[2] = pp->hour;
1325 	bug->values[3] = pp->minute;
1326 	bug->values[4] = pp->second;
1327 	bug->values[5] = pp->nsec;
1328 	bug->values[6] = pp->yearstart;
1329 	bug->values[7] = pp->coderecv;
1330 	bug->stimes = 0xfffffffc;
1331 	bug->times[0] = pp->lastref;
1332 	bug->times[1] = pp->lastrec;
1333 	for (u = 2; u < bug->ntimes; u++)
1334 		DTOLFP(pp->filter[u - 2], &bug->times[u]);
1335 
1336 	/*
1337 	 * Give the stuff to the clock
1338 	 */
1339 	if (refclock_conf[clktype]->clock_buginfo != noentry)
1340 		(refclock_conf[clktype]->clock_buginfo)(unit, bug, peer);
1341 }
1342 
1343 
1344 #ifdef HAVE_PPSAPI
1345 /*
1346  * refclock_ppsapi - initialize/update ppsapi
1347  *
1348  * This routine is called after the fudge command to open the PPSAPI
1349  * interface for later parameter setting after the fudge command.
1350  */
1351 int
1352 refclock_ppsapi(
1353 	int	fddev,			/* fd device */
1354 	struct refclock_atom *ap	/* atom structure pointer */
1355 	)
1356 {
1357 	if (ap->handle == 0) {
1358 		if (time_pps_create(fddev, &ap->handle) < 0) {
1359 			msyslog(LOG_ERR,
1360 			    "refclock_ppsapi: time_pps_create: %m");
1361 			return (0);
1362 		}
1363 		ZERO(ap->ts); /* [Bug 2689] defined INIT state */
1364 	}
1365 	return (1);
1366 }
1367 
1368 
1369 /*
1370  * refclock_params - set ppsapi parameters
1371  *
1372  * This routine is called to set the PPSAPI parameters after the fudge
1373  * command.
1374  */
1375 int
1376 refclock_params(
1377 	int	mode,			/* mode bits */
1378 	struct refclock_atom *ap	/* atom structure pointer */
1379 	)
1380 {
1381 	ZERO(ap->pps_params);
1382 	ap->pps_params.api_version = PPS_API_VERS_1;
1383 
1384 	/*
1385 	 * Solaris serial ports provide PPS pulse capture only on the
1386 	 * assert edge. FreeBSD serial ports provide capture on the
1387 	 * clear edge, while FreeBSD parallel ports provide capture
1388 	 * on the assert edge. Your mileage may vary.
1389 	 */
1390 	if (mode & CLK_FLAG2)
1391 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTURECLEAR;
1392 	else
1393 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTUREASSERT;
1394 	if (time_pps_setparams(ap->handle, &ap->pps_params) < 0) {
1395 		msyslog(LOG_ERR,
1396 		    "refclock_params: time_pps_setparams: %m");
1397 		return (0);
1398 	}
1399 
1400 	/*
1401 	 * If flag3 is lit, select the kernel PPS if we can.
1402 	 *
1403 	 * Note: EOPNOTSUPP is the only 'legal' error code we deal with;
1404 	 * it is part of the 'if we can' strategy.  Any other error
1405 	 * indicates something more sinister and makes this function fail.
1406 	 */
1407 	if (mode & CLK_FLAG3) {
1408 		if (time_pps_kcbind(ap->handle, PPS_KC_HARDPPS,
1409 		    ap->pps_params.mode & ~PPS_TSFMT_TSPEC,
1410 		    PPS_TSFMT_TSPEC) < 0)
1411 		{
1412 			if (errno != EOPNOTSUPP) {
1413 				msyslog(LOG_ERR,
1414 					"refclock_params: time_pps_kcbind: %m");
1415 				return (0);
1416 			}
1417 		} else {
1418 			hardpps_enable = 1;
1419 		}
1420 	}
1421 	return (1);
1422 }
1423 
1424 
1425 /*
1426  * refclock_pps - called once per second
1427  *
1428  * This routine is called once per second. It snatches the PPS
1429  * timestamp from the kernel and saves the sign-extended fraction in
1430  * a circular buffer for processing at the next poll event.
1431  */
1432 int
1433 refclock_pps(
1434 	struct peer *peer,		/* peer structure pointer */
1435 	struct refclock_atom *ap,	/* atom structure pointer */
1436 	int	mode			/* mode bits */
1437 	)
1438 {
1439 	struct refclockproc *pp;
1440 	pps_info_t pps_info;
1441 	struct timespec timeout;
1442 	double	dtemp, dcorr, trash;
1443 
1444 	/*
1445 	 * We require the clock to be synchronized before setting the
1446 	 * parameters. When the parameters have been set, fetch the
1447 	 * most recent PPS timestamp.
1448 	 */
1449 	pp = peer->procptr;
1450 	if (ap->handle == 0)
1451 		return (0);
1452 
1453 	if (ap->pps_params.mode == 0 && sys_leap != LEAP_NOTINSYNC) {
1454 		if (refclock_params(pp->sloppyclockflag, ap) < 1)
1455 			return (0);
1456 	}
1457 	ZERO(timeout);
1458 	ZERO(pps_info);
1459 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, &pps_info,
1460 	    &timeout) < 0) {
1461 		refclock_report(peer, CEVNT_FAULT);
1462 		return (0);
1463 	}
1464 	timeout = ap->ts;	/* save old timestamp for check */
1465 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1466 		ap->ts = pps_info.assert_timestamp;
1467 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1468 		ap->ts = pps_info.clear_timestamp;
1469 	else
1470 		return (0);
1471 
1472 	/* [Bug 2689] Discard the first sample we read -- if the PPS
1473 	 * source is currently down / disconnected, we have read a
1474 	 * potentially *very* stale value here. So if our old TS value
1475 	 * is all-zero, we consider this sample unrealiable and drop it.
1476 	 *
1477 	 * Note 1: a better check would compare the PPS time stamp to
1478 	 * the current system time and drop it if it's more than say 3s
1479 	 * away.
1480 	 *
1481 	 * Note 2: If we ever again get an all-zero PPS sample, the next
1482 	 * one will be discarded. This can happen every 136yrs and is
1483 	 * unlikely to be ever observed.
1484 	 */
1485 	if (0 == (timeout.tv_sec | timeout.tv_nsec))
1486 		return (0);
1487 
1488 	/* If the PPS source fails to deliver a new sample between
1489 	 * polls, it regurgitates the last sample. We do not want to
1490 	 * process the same sample multiple times.
1491 	 */
1492 	if (0 == memcmp(&timeout, &ap->ts, sizeof(timeout)))
1493 		return (0);
1494 
1495 	/*
1496 	 * Convert to signed fraction offset, apply fudge and properly
1497 	 * fold the correction into the [-0.5s,0.5s] range. Handle
1498 	 * excessive fudge times, too.
1499 	 */
1500 	dtemp = ap->ts.tv_nsec / 1e9;
1501 	dcorr = modf((pp->fudgetime1 - dtemp), &trash);
1502 	if (dcorr > 0.5)
1503 		dcorr -= 1.0;
1504 	else if (dcorr < -0.5)
1505 		dcorr += 1.0;
1506 
1507 	/* phase gate check: avoid wobbling by +/-1s when too close to
1508 	 * the switch-over point. We allow +/-400ms max phase deviation.
1509 	 * The trade-off is clear: The smaller the limit, the less
1510 	 * sensitive to sampling noise the clock becomes. OTOH the
1511 	 * system must get into phase gate range by other means for the
1512 	 * PPS clock to lock in.
1513 	 */
1514 	if (fabs(dcorr) > 0.4)
1515 		return (0);
1516 
1517 	/*
1518 	 * record this time stamp and stuff in median filter
1519 	 */
1520 	pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970;
1521 	pp->lastrec.l_uf = (u_int32)(dtemp * FRAC);
1522 	clk_add_sample(pp, dcorr);
1523 	refclock_checkburst(peer, pp);
1524 
1525 #ifdef DEBUG
1526 	if (debug > 1)
1527 		printf("refclock_pps: %lu %f %f\n", current_time,
1528 		    dcorr, pp->fudgetime1);
1529 #endif
1530 	return (1);
1531 }
1532 #endif /* HAVE_PPSAPI */
1533 
1534 
1535 /*
1536  * -------------------------------------------------------------------
1537  * refclock_ppsaugment(...) -- correlate with PPS edge
1538  *
1539  * This function is used to correlate a receive time stamp with a PPS
1540  * edge time stamp. It applies the necessary fudges and then tries to
1541  * move the receive time stamp to the corresponding edge. This can warp
1542  * into future, if a transmission delay of more than 500ms is not
1543  * compensated with a corresponding fudge time2 value, because then the
1544  * next PPS edge is nearer than the last. (Similiar to what the PPS ATOM
1545  * driver does, but we deal with full time stamps here, not just phase
1546  * shift information.) Likewise, a negative fudge time2 value must be
1547  * used if the reference time stamp correlates with the *following* PPS
1548  * pulse.
1549  *
1550  * Note that the receive time fudge value only needs to move the receive
1551  * stamp near a PPS edge but that close proximity is not required;
1552  * +/-100ms precision should be enough. But since the fudge value will
1553  * probably also be used to compensate the transmission delay when no
1554  * PPS edge can be related to the time stamp, it's best to get it as
1555  * close as possible.
1556  *
1557  * It should also be noted that the typical use case is matching to the
1558  * preceeding edge, as most units relate their sentences to the current
1559  * second.
1560  *
1561  * The function returns FALSE if there is no correlation possible, TRUE
1562  * otherwise.  Reason for failures are:
1563  *
1564  *  - no PPS/ATOM unit given
1565  *  - PPS stamp is stale (that is, the difference between the PPS stamp
1566  *    and the corrected time stamp would exceed two seconds)
1567  *  - The phase difference is too close to 0.5, and the decision wether
1568  *    to move up or down is too sensitive to noise.
1569  *
1570  * On output, the receive time stamp is updated with the 'fixed' receive
1571  * time.
1572  * -------------------------------------------------------------------
1573  */
1574 
1575 int/*BOOL*/
1576 refclock_ppsaugment(
1577 	const struct refclock_atom * ap	    ,	/* for PPS io	  */
1578 	l_fp 			   * rcvtime ,
1579 	double			     rcvfudge,	/* i/o read fudge */
1580 	double			     ppsfudge	/* pps fudge	  */
1581 	)
1582 {
1583 	l_fp		delta[1];
1584 
1585 #ifdef HAVE_PPSAPI
1586 
1587 	pps_info_t	pps_info;
1588 	struct timespec timeout;
1589 	l_fp		stamp[1];
1590 	uint32_t	phase;
1591 
1592 	static const uint32_t s_plim_hi = UINT32_C(1932735284);
1593 	static const uint32_t s_plim_lo = UINT32_C(2362232013);
1594 
1595 	/* fixup receive time in case we have to bail out early */
1596 	DTOLFP(rcvfudge, delta);
1597 	L_SUB(rcvtime, delta);
1598 
1599 	if (NULL == ap)
1600 		return FALSE;
1601 
1602 	ZERO(timeout);
1603 	ZERO(pps_info);
1604 
1605 	/* fetch PPS stamp from ATOM block */
1606 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC,
1607 			   &pps_info, &timeout) < 0)
1608 		return FALSE; /* can't get time stamps */
1609 
1610 	/* get last active PPS edge before receive */
1611 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1612 		timeout = pps_info.assert_timestamp;
1613 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1614 		timeout = pps_info.clear_timestamp;
1615 	else
1616 		return FALSE; /* WHICH edge, please?!? */
1617 
1618 	/* convert PPS stamp to l_fp and apply fudge */
1619 	*stamp = tspec_stamp_to_lfp(timeout);
1620 	DTOLFP(ppsfudge, delta);
1621 	L_SUB(stamp, delta);
1622 
1623 	/* Get difference between PPS stamp (--> yield) and receive time
1624 	 * (--> base)
1625 	 */
1626 	*delta = *stamp;
1627 	L_SUB(delta, rcvtime);
1628 
1629 	/* check if either the PPS or the STAMP is stale in relation
1630 	 * to each other. Bail if it is so...
1631 	 */
1632 	phase = delta->l_ui;
1633 	if (phase >= 2 && phase < (uint32_t)-2)
1634 		return FALSE; /* PPS is stale, don't use it */
1635 
1636 	/* If the phase is too close to 0.5, the decision whether to
1637 	 * move up or down is becoming noise sensitive. That is, we
1638 	 * might amplify usec noise between samples into seconds with a
1639 	 * simple threshold. This can be solved by a Schmitt Trigger
1640 	 * characteristic, but that would also require additional state
1641 	 * where we could remember previous decisions.  Easier to play
1642 	 * dead duck and wait for the conditions to become clear.
1643 	 */
1644 	phase = delta->l_uf;
1645 	if (phase > s_plim_hi && phase < s_plim_lo)
1646 		return FALSE; /* we're in the noise lock gap */
1647 
1648 	/* sign-extend fraction into seconds */
1649 	delta->l_ui = UINT32_C(0) - ((phase >> 31) & 1);
1650 	/* add it up now */
1651 	L_ADD(rcvtime, delta);
1652 	return TRUE;
1653 
1654 #   else /* have no PPS support at all */
1655 
1656 	/* just fixup receive time and fail */
1657 	UNUSED_ARG(ap);
1658 	UNUSED_ARG(ppsfudge);
1659 
1660 	DTOLFP(rcvfudge, delta);
1661 	L_SUB(rcvtime, delta);
1662 	return FALSE;
1663 
1664 #   endif
1665 }
1666 
1667 /*
1668  * -------------------------------------------------------------------
1669  * check if it makes sense to schedule an 'early' poll to get the clock
1670  * up fast after start or longer signal dropout.
1671  */
1672 static void
1673 refclock_checkburst(
1674 	struct peer *         peer,
1675 	struct refclockproc * pp
1676 	)
1677 {
1678 	uint32_t	limit;	/* when we should poll */
1679 	u_int		needs;	/* needed number of samples */
1680 
1681 	/* Paranoia: stop here if peer and clockproc don't match up.
1682 	 * And when a poll is actually pending, we don't have to do
1683 	 * anything, either. Likewise if the reach mask is full, of
1684 	 * course, and if the filter has stabilized.
1685 	 */
1686 	if (pp->inpoll || (peer->procptr != pp) ||
1687 	    ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE)))
1688 		return;
1689 
1690 	/* If the next poll is soon enough, bail out, too: */
1691 	limit = current_time + 1;
1692 	if (peer->nextdate <= limit)
1693 		return;
1694 
1695 	/* Derive the number of samples needed from the popcount of the
1696 	 * reach mask.  With less samples available, we break away.
1697 	 */
1698 	needs  = peer->reach;
1699 	needs -= (needs >> 1) & 0x55;
1700 	needs  = (needs & 0x33) + ((needs >> 2) & 0x33);
1701 	needs  = (needs + (needs >> 4)) & 0x0F;
1702 	if (needs > 6)
1703 		needs = 6;
1704 	else if (needs < 3)
1705 		needs = 3;
1706 	if (clk_cnt_sample(pp) < needs)
1707 		return;
1708 
1709 	/* Get serious. Reduce the poll to minimum and schedule early.
1710 	 * (Changing the peer poll is probably in vain, as it will be
1711 	 * re-adjusted, but maybe some time the hint will work...)
1712 	 */
1713 	peer->hpoll = peer->minpoll;
1714 	peer->nextdate = limit;
1715 }
1716 
1717 /*
1718  * -------------------------------------------------------------------
1719  * Save the last timecode string, making sure it's properly truncated
1720  * if necessary and NUL terminated in any case.
1721  */
1722 void
1723 refclock_save_lcode(
1724 	struct refclockproc *	pp,
1725 	char const *		tc,
1726 	size_t			len
1727 	)
1728 {
1729 	if (len == (size_t)-1)
1730 		len = strnlen(tc,  sizeof(pp->a_lastcode) - 1);
1731 	else if (len >= sizeof(pp->a_lastcode))
1732 		len = sizeof(pp->a_lastcode) - 1;
1733 
1734 	pp->lencode = (u_short)len;
1735 	memcpy(pp->a_lastcode, tc, len);
1736 	pp->a_lastcode[len] = '\0';
1737 }
1738 
1739 /* format data into a_lastcode */
1740 void
1741 refclock_vformat_lcode(
1742 	struct refclockproc *	pp,
1743 	char const *		fmt,
1744 	va_list			va
1745 	)
1746 {
1747 	long len;
1748 
1749 	len = vsnprintf(pp->a_lastcode, sizeof(pp->a_lastcode), fmt, va);
1750 	if (len <= 0)
1751 		len = 0;
1752 	else if ((size_t)len >= sizeof(pp->a_lastcode))
1753 		len = sizeof(pp->a_lastcode) - 1;
1754 
1755 	pp->lencode = (u_short)len;
1756 	pp->a_lastcode[len] = '\0';
1757 	/* !note! the NUL byte is needed in case vsnprintf() really fails */
1758 }
1759 
1760 void
1761 refclock_format_lcode(
1762 	struct refclockproc *	pp,
1763 	char const *		fmt,
1764 	...
1765 	)
1766 {
1767 	va_list va;
1768 
1769 	va_start(va, fmt);
1770 	refclock_vformat_lcode(pp, fmt, va);
1771 	va_end(va);
1772 }
1773 
1774 #endif /* REFCLOCK */
1775