1 /* $NetBSD: ntp_request.c,v 1.19 2024/08/18 20:47:18 christos Exp $ */ 2 3 /* 4 * ntp_request.c - respond to information requests 5 */ 6 7 #ifdef HAVE_CONFIG_H 8 # include <config.h> 9 #endif 10 11 #include "ntpd.h" 12 #include "ntp_io.h" 13 #include "ntp_request.h" 14 #include "ntp_control.h" 15 #include "ntp_refclock.h" 16 #include "ntp_if.h" 17 #include "ntp_stdlib.h" 18 #include "ntp_assert.h" 19 20 #include <stdio.h> 21 #include <stddef.h> 22 #include <signal.h> 23 #ifdef HAVE_NETINET_IN_H 24 #include <netinet/in.h> 25 #endif 26 #include <arpa/inet.h> 27 28 #include "recvbuff.h" 29 30 #ifdef KERNEL_PLL 31 #include "ntp_syscall.h" 32 #endif /* KERNEL_PLL */ 33 34 /* 35 * Structure to hold request procedure information 36 */ 37 #define NOAUTH 0 38 #define AUTH 1 39 40 #define NO_REQUEST (-1) 41 /* 42 * Because we now have v6 addresses in the messages, we need to compensate 43 * for the larger size. Therefore, we introduce the alternate size to 44 * keep us friendly with older implementations. A little ugly. 45 */ 46 static int client_v6_capable = 0; /* the client can handle longer messages */ 47 48 #define v6sizeof(type) (client_v6_capable ? sizeof(type) : v4sizeof(type)) 49 50 struct req_proc { 51 short request_code; /* defined request code */ 52 short needs_auth; /* true when authentication needed */ 53 short sizeofitem; /* size of request data item (older size)*/ 54 short v6_sizeofitem; /* size of request data item (new size)*/ 55 void (*handler) (sockaddr_u *, endpt *, 56 struct req_pkt *); /* routine to handle request */ 57 }; 58 59 /* 60 * Universal request codes 61 */ 62 static const struct req_proc univ_codes[] = { 63 { NO_REQUEST, NOAUTH, 0, 0, NULL } 64 }; 65 66 static void req_ack (sockaddr_u *, endpt *, struct req_pkt *, int); 67 static void * prepare_pkt (sockaddr_u *, endpt *, 68 struct req_pkt *, size_t); 69 static void * more_pkt (void); 70 static void flush_pkt (void); 71 static void list_peers (sockaddr_u *, endpt *, struct req_pkt *); 72 static void list_peers_sum (sockaddr_u *, endpt *, struct req_pkt *); 73 static void peer_info (sockaddr_u *, endpt *, struct req_pkt *); 74 static void peer_stats (sockaddr_u *, endpt *, struct req_pkt *); 75 static void sys_info (sockaddr_u *, endpt *, struct req_pkt *); 76 static void sys_stats (sockaddr_u *, endpt *, struct req_pkt *); 77 static void mem_stats (sockaddr_u *, endpt *, struct req_pkt *); 78 static void io_stats (sockaddr_u *, endpt *, struct req_pkt *); 79 static void timer_stats (sockaddr_u *, endpt *, struct req_pkt *); 80 static void loop_info (sockaddr_u *, endpt *, struct req_pkt *); 81 static void do_conf (sockaddr_u *, endpt *, struct req_pkt *); 82 static void do_unconf (sockaddr_u *, endpt *, struct req_pkt *); 83 static void set_sys_flag (sockaddr_u *, endpt *, struct req_pkt *); 84 static void clr_sys_flag (sockaddr_u *, endpt *, struct req_pkt *); 85 static void setclr_flags (sockaddr_u *, endpt *, struct req_pkt *, u_long); 86 static void list_restrict4 (const restrict_u *, struct info_restrict **); 87 static void list_restrict6 (const restrict_u *, struct info_restrict **); 88 static void list_restrict (sockaddr_u *, endpt *, struct req_pkt *); 89 static void do_resaddflags (sockaddr_u *, endpt *, struct req_pkt *); 90 static void do_ressubflags (sockaddr_u *, endpt *, struct req_pkt *); 91 static void do_unrestrict (sockaddr_u *, endpt *, struct req_pkt *); 92 static void do_restrict (sockaddr_u *, endpt *, struct req_pkt *, restrict_op); 93 static void mon_getlist (sockaddr_u *, endpt *, struct req_pkt *); 94 static void reset_stats (sockaddr_u *, endpt *, struct req_pkt *); 95 static void reset_peer (sockaddr_u *, endpt *, struct req_pkt *); 96 static void do_key_reread (sockaddr_u *, endpt *, struct req_pkt *); 97 static void trust_key (sockaddr_u *, endpt *, struct req_pkt *); 98 static void untrust_key (sockaddr_u *, endpt *, struct req_pkt *); 99 static void do_trustkey (sockaddr_u *, endpt *, struct req_pkt *, u_long); 100 static void get_auth_info (sockaddr_u *, endpt *, struct req_pkt *); 101 static void req_get_traps (sockaddr_u *, endpt *, struct req_pkt *); 102 static void req_set_trap (sockaddr_u *, endpt *, struct req_pkt *); 103 static void req_clr_trap (sockaddr_u *, endpt *, struct req_pkt *); 104 static void do_setclr_trap (sockaddr_u *, endpt *, struct req_pkt *, int); 105 static void set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *); 106 static void set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *); 107 static void get_ctl_stats (sockaddr_u *, endpt *, struct req_pkt *); 108 static void get_if_stats (sockaddr_u *, endpt *, struct req_pkt *); 109 static void do_if_reload (sockaddr_u *, endpt *, struct req_pkt *); 110 #ifdef KERNEL_PLL 111 static void get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *); 112 #endif /* KERNEL_PLL */ 113 #ifdef REFCLOCK 114 static void get_clock_info (sockaddr_u *, endpt *, struct req_pkt *); 115 static void set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *); 116 #endif /* REFCLOCK */ 117 #ifdef REFCLOCK 118 static void get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *); 119 #endif /* REFCLOCK */ 120 121 /* 122 * ntpd request codes 123 */ 124 static const struct req_proc ntp_codes[] = { 125 { REQ_PEER_LIST, NOAUTH, 0, 0, list_peers }, 126 { REQ_PEER_LIST_SUM, NOAUTH, 0, 0, list_peers_sum }, 127 { REQ_PEER_INFO, NOAUTH, v4sizeof(struct info_peer_list), 128 sizeof(struct info_peer_list), peer_info}, 129 { REQ_PEER_STATS, NOAUTH, v4sizeof(struct info_peer_list), 130 sizeof(struct info_peer_list), peer_stats}, 131 { REQ_SYS_INFO, NOAUTH, 0, 0, sys_info }, 132 { REQ_SYS_STATS, NOAUTH, 0, 0, sys_stats }, 133 { REQ_IO_STATS, NOAUTH, 0, 0, io_stats }, 134 { REQ_MEM_STATS, NOAUTH, 0, 0, mem_stats }, 135 { REQ_LOOP_INFO, NOAUTH, 0, 0, loop_info }, 136 { REQ_TIMER_STATS, NOAUTH, 0, 0, timer_stats }, 137 { REQ_CONFIG, AUTH, v4sizeof(struct conf_peer), 138 sizeof(struct conf_peer), do_conf }, 139 { REQ_UNCONFIG, AUTH, v4sizeof(struct conf_unpeer), 140 sizeof(struct conf_unpeer), do_unconf }, 141 { REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags), 142 sizeof(struct conf_sys_flags), set_sys_flag }, 143 { REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags), 144 sizeof(struct conf_sys_flags), clr_sys_flag }, 145 { REQ_GET_RESTRICT, NOAUTH, 0, 0, list_restrict }, 146 { REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict), 147 sizeof(struct conf_restrict), do_resaddflags }, 148 { REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict), 149 sizeof(struct conf_restrict), do_ressubflags }, 150 { REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict), 151 sizeof(struct conf_restrict), do_unrestrict }, 152 { REQ_MON_GETLIST, NOAUTH, 0, 0, mon_getlist }, 153 { REQ_MON_GETLIST_1, NOAUTH, 0, 0, mon_getlist }, 154 { REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats }, 155 { REQ_RESET_PEER, AUTH, v4sizeof(struct conf_unpeer), 156 sizeof(struct conf_unpeer), reset_peer }, 157 { REQ_REREAD_KEYS, AUTH, 0, 0, do_key_reread }, 158 { REQ_TRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), trust_key }, 159 { REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key }, 160 { REQ_AUTHINFO, NOAUTH, 0, 0, get_auth_info }, 161 { REQ_TRAPS, NOAUTH, 0, 0, req_get_traps }, 162 { REQ_ADD_TRAP, AUTH, v4sizeof(struct conf_trap), 163 sizeof(struct conf_trap), req_set_trap }, 164 { REQ_CLR_TRAP, AUTH, v4sizeof(struct conf_trap), 165 sizeof(struct conf_trap), req_clr_trap }, 166 { REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long), 167 set_request_keyid }, 168 { REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long), 169 set_control_keyid }, 170 { REQ_GET_CTLSTATS, NOAUTH, 0, 0, get_ctl_stats }, 171 #ifdef KERNEL_PLL 172 { REQ_GET_KERNEL, NOAUTH, 0, 0, get_kernel_info }, 173 #endif 174 #ifdef REFCLOCK 175 { REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32), 176 get_clock_info }, 177 { REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge), 178 sizeof(struct conf_fudge), set_clock_fudge }, 179 { REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32), 180 get_clkbug_info }, 181 #endif 182 { REQ_IF_STATS, AUTH, 0, 0, get_if_stats }, 183 { REQ_IF_RELOAD, AUTH, 0, 0, do_if_reload }, 184 185 { NO_REQUEST, NOAUTH, 0, 0, 0 } 186 }; 187 188 189 /* 190 * Authentication keyid used to authenticate requests. Zero means we 191 * don't allow writing anything. 192 */ 193 keyid_t info_auth_keyid; 194 195 /* 196 * Statistic counters to keep track of requests and responses. 197 */ 198 u_long numrequests; /* number of requests we've received */ 199 u_long numresppkts; /* number of resp packets sent with data */ 200 201 /* 202 * lazy way to count errors, indexed by the error code 203 */ 204 u_long errorcounter[MAX_INFO_ERR + 1]; 205 206 /* 207 * A hack. To keep the authentication module clear of ntp-ism's, we 208 * include a time reset variable for its stats here. 209 */ 210 u_long auth_timereset; 211 212 /* 213 * Response packet used by these routines. Also some state information 214 * so that we can handle packet formatting within a common set of 215 * subroutines. Note we try to enter data in place whenever possible, 216 * but the need to set the more bit correctly means we occasionally 217 * use the extra buffer and copy. 218 */ 219 static struct resp_pkt rpkt; 220 static int reqver; 221 static int seqno; 222 static int nitems; 223 static int itemsize; 224 static int databytes; 225 static char exbuf[RESP_DATA_SIZE]; 226 static int usingexbuf; 227 static sockaddr_u *toaddr; 228 static endpt *frominter; 229 230 /* 231 * init_request - initialize request data 232 */ 233 void 234 init_request (void) 235 { 236 size_t i; 237 238 numrequests = 0; 239 numresppkts = 0; 240 auth_timereset = 0; 241 info_auth_keyid = 0; /* by default, can't do this */ 242 243 for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++) 244 errorcounter[i] = 0; 245 } 246 247 248 /* 249 * req_ack - acknowledge request with no data 250 */ 251 static void 252 req_ack( 253 sockaddr_u *srcadr, 254 endpt *inter, 255 struct req_pkt *inpkt, 256 int errcode 257 ) 258 { 259 /* 260 * fill in the fields 261 */ 262 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver); 263 rpkt.auth_seq = AUTH_SEQ(0, 0); 264 rpkt.implementation = inpkt->implementation; 265 rpkt.request = inpkt->request; 266 rpkt.err_nitems = ERR_NITEMS(errcode, 0); 267 rpkt.mbz_itemsize = MBZ_ITEMSIZE(0); 268 269 /* 270 * send packet and bump counters 271 */ 272 sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE); 273 errorcounter[errcode]++; 274 } 275 276 277 /* 278 * prepare_pkt - prepare response packet for transmission, return pointer 279 * to storage for data item. 280 */ 281 static void * 282 prepare_pkt( 283 sockaddr_u *srcadr, 284 endpt *inter, 285 struct req_pkt *pkt, 286 size_t structsize 287 ) 288 { 289 DPRINTF(4, ("request: preparing pkt\n")); 290 291 /* 292 * Fill in the implementation, request and itemsize fields 293 * since these won't change. 294 */ 295 rpkt.implementation = pkt->implementation; 296 rpkt.request = pkt->request; 297 rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize); 298 299 /* 300 * Compute the static data needed to carry on. 301 */ 302 toaddr = srcadr; 303 frominter = inter; 304 seqno = 0; 305 nitems = 0; 306 itemsize = structsize; 307 databytes = 0; 308 usingexbuf = 0; 309 310 /* 311 * return the beginning of the packet buffer. 312 */ 313 return &rpkt.u; 314 } 315 316 317 /* 318 * more_pkt - return a data pointer for a new item. 319 */ 320 static void * 321 more_pkt(void) 322 { 323 /* 324 * If we were using the extra buffer, send the packet. 325 */ 326 if (usingexbuf) { 327 DPRINTF(3, ("request: sending pkt\n")); 328 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver); 329 rpkt.auth_seq = AUTH_SEQ(0, seqno); 330 rpkt.err_nitems = htons((u_short)nitems); 331 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt, 332 RESP_HEADER_SIZE + databytes); 333 numresppkts++; 334 335 /* 336 * Copy data out of exbuf into the packet. 337 */ 338 memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize); 339 seqno++; 340 databytes = 0; 341 nitems = 0; 342 usingexbuf = 0; 343 } 344 345 databytes += itemsize; 346 nitems++; 347 if (databytes + itemsize <= RESP_DATA_SIZE) { 348 DPRINTF(4, ("request: giving him more data\n")); 349 /* 350 * More room in packet. Give him the 351 * next address. 352 */ 353 return &rpkt.u.data[databytes]; 354 } else { 355 /* 356 * No room in packet. Give him the extra 357 * buffer unless this was the last in the sequence. 358 */ 359 DPRINTF(4, ("request: into extra buffer\n")); 360 if (seqno == MAXSEQ) 361 return NULL; 362 else { 363 usingexbuf = 1; 364 return exbuf; 365 } 366 } 367 } 368 369 370 /* 371 * flush_pkt - we're done, return remaining information. 372 */ 373 static void 374 flush_pkt(void) 375 { 376 DPRINTF(3, ("request: flushing packet, %d items\n", nitems)); 377 /* 378 * Must send the last packet. If nothing in here and nothing 379 * has been sent, send an error saying no data to be found. 380 */ 381 if (seqno == 0 && nitems == 0) 382 req_ack(toaddr, frominter, (struct req_pkt *)&rpkt, 383 INFO_ERR_NODATA); 384 else { 385 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver); 386 rpkt.auth_seq = AUTH_SEQ(0, seqno); 387 rpkt.err_nitems = htons((u_short)nitems); 388 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt, 389 RESP_HEADER_SIZE+databytes); 390 numresppkts++; 391 } 392 } 393 394 395 396 /* 397 * Given a buffer, return the packet mode 398 */ 399 int 400 get_packet_mode(struct recvbuf *rbufp) 401 { 402 struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt; 403 return (INFO_MODE(inpkt->rm_vn_mode)); 404 } 405 406 407 /* 408 * process_private - process private mode (7) packets 409 */ 410 void 411 process_private( 412 struct recvbuf *rbufp, 413 int mod_okay 414 ) 415 { 416 static u_long quiet_until; 417 struct req_pkt *inpkt; 418 struct req_pkt_tail *tailinpkt; 419 sockaddr_u *srcadr; 420 endpt *inter; 421 const struct req_proc *proc; 422 int ec; 423 short temp_size; 424 l_fp ftmp; 425 double dtemp; 426 size_t recv_len; 427 size_t noslop_len; 428 size_t mac_len; 429 430 /* 431 * Initialize pointers, for convenience 432 */ 433 recv_len = rbufp->recv_length; 434 inpkt = (struct req_pkt *)&rbufp->recv_pkt; 435 srcadr = &rbufp->recv_srcadr; 436 inter = rbufp->dstadr; 437 438 DPRINTF(3, ("process_private: impl %d req %d\n", 439 inpkt->implementation, inpkt->request)); 440 441 /* 442 * Do some sanity checks on the packet. Return a format 443 * error if it fails. 444 */ 445 ec = 0; 446 if ( (++ec, ISRESPONSE(inpkt->rm_vn_mode)) 447 || (++ec, ISMORE(inpkt->rm_vn_mode)) 448 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION) 449 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION) 450 || (++ec, INFO_SEQ(inpkt->auth_seq) != 0) 451 || (++ec, INFO_ERR(inpkt->err_nitems) != 0) 452 || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0) 453 || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR) 454 ) { 455 NLOG(NLOG_SYSEVENT) 456 if (current_time >= quiet_until) { 457 msyslog(LOG_ERR, 458 "process_private: drop test %d" 459 " failed, pkt from %s", 460 ec, stoa(srcadr)); 461 quiet_until = current_time + 60; 462 } 463 return; 464 } 465 466 reqver = INFO_VERSION(inpkt->rm_vn_mode); 467 468 /* 469 * Get the appropriate procedure list to search. 470 */ 471 if (inpkt->implementation == IMPL_UNIV) 472 proc = univ_codes; 473 else if ((inpkt->implementation == IMPL_XNTPD) || 474 (inpkt->implementation == IMPL_XNTPD_OLD)) 475 proc = ntp_codes; 476 else { 477 req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL); 478 return; 479 } 480 481 /* 482 * Search the list for the request codes. If it isn't one 483 * we know, return an error. 484 */ 485 while (proc->request_code != NO_REQUEST) { 486 if (proc->request_code == (short) inpkt->request) 487 break; 488 proc++; 489 } 490 if (proc->request_code == NO_REQUEST) { 491 req_ack(srcadr, inter, inpkt, INFO_ERR_REQ); 492 return; 493 } 494 495 DPRINTF(4, ("found request in tables\n")); 496 497 /* 498 * If we need data, check to see if we have some. If we 499 * don't, check to see that there is none (picky, picky). 500 */ 501 502 /* This part is a bit tricky, we want to be sure that the size 503 * returned is either the old or the new size. We also can find 504 * out if the client can accept both types of messages this way. 505 * 506 * Handle the exception of REQ_CONFIG. It can have two data sizes. 507 */ 508 temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize); 509 if ((temp_size != proc->sizeofitem && 510 temp_size != proc->v6_sizeofitem) && 511 !(inpkt->implementation == IMPL_XNTPD && 512 inpkt->request == REQ_CONFIG && 513 temp_size == sizeof(struct old_conf_peer))) { 514 DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n", 515 temp_size, proc->sizeofitem, proc->v6_sizeofitem)); 516 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 517 return; 518 } 519 if ((proc->sizeofitem != 0) && 520 ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) > 521 (recv_len - REQ_LEN_HDR))) { 522 DPRINTF(3, ("process_private: not enough data\n")); 523 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 524 return; 525 } 526 527 switch (inpkt->implementation) { 528 case IMPL_XNTPD: 529 client_v6_capable = 1; 530 break; 531 case IMPL_XNTPD_OLD: 532 client_v6_capable = 0; 533 break; 534 default: 535 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 536 return; 537 } 538 539 /* 540 * If we need to authenticate, do so. Note that an 541 * authenticatable packet must include a mac field, must 542 * have used key info_auth_keyid and must have included 543 * a time stamp in the appropriate field. The time stamp 544 * must be within INFO_TS_MAXSKEW of the receive 545 * time stamp. 546 */ 547 if (proc->needs_auth && sys_authenticate) { 548 549 if (recv_len < (REQ_LEN_HDR + 550 (INFO_ITEMSIZE(inpkt->mbz_itemsize) * 551 INFO_NITEMS(inpkt->err_nitems)) + 552 REQ_TAIL_MIN)) { 553 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 554 return; 555 } 556 557 /* 558 * For 16-octet digests, regardless of itemsize and 559 * nitems, authenticated requests are a fixed size 560 * with the timestamp, key ID, and digest located 561 * at the end of the packet. Because the key ID 562 * determining the digest size precedes the digest, 563 * for larger digests the fixed size request scheme 564 * is abandoned and the timestamp, key ID, and digest 565 * are located relative to the start of the packet, 566 * with the digest size determined by the packet size. 567 */ 568 noslop_len = REQ_LEN_HDR 569 + INFO_ITEMSIZE(inpkt->mbz_itemsize) * 570 INFO_NITEMS(inpkt->err_nitems) 571 + sizeof(inpkt->tstamp); 572 /* 32-bit alignment */ 573 noslop_len = (noslop_len + 3) & ~3; 574 if (recv_len > (noslop_len + MAX_MAC_LEN)) 575 mac_len = 20; 576 else 577 mac_len = recv_len - noslop_len; 578 579 tailinpkt = (void *)((char *)inpkt + recv_len - 580 (mac_len + sizeof(inpkt->tstamp))); 581 582 /* 583 * If this guy is restricted from doing this, don't let 584 * him. If the wrong key was used, or packet doesn't 585 * have mac, return. 586 */ 587 /* XXX: Use authistrustedip(), or equivalent. */ 588 if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid 589 || ntohl(tailinpkt->keyid) != info_auth_keyid) { 590 DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n", 591 INFO_IS_AUTH(inpkt->auth_seq), 592 info_auth_keyid, 593 ntohl(tailinpkt->keyid), (u_long)mac_len)); 594 #ifdef DEBUG 595 msyslog(LOG_DEBUG, 596 "process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n", 597 INFO_IS_AUTH(inpkt->auth_seq), 598 info_auth_keyid, 599 ntohl(tailinpkt->keyid), (u_long)mac_len); 600 #endif 601 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 602 return; 603 } 604 if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) { 605 DPRINTF(5, ("bad pkt length %zu\n", recv_len)); 606 msyslog(LOG_ERR, 607 "process_private: bad pkt length %zu", 608 recv_len); 609 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 610 return; 611 } 612 if (!mod_okay || !authhavekey(info_auth_keyid)) { 613 DPRINTF(5, ("failed auth mod_okay %d\n", 614 mod_okay)); 615 #ifdef DEBUG 616 msyslog(LOG_DEBUG, 617 "process_private: failed auth mod_okay %d\n", 618 mod_okay); 619 #endif 620 if (!mod_okay) { 621 sys_restricted++; 622 } 623 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 624 return; 625 } 626 627 /* 628 * calculate absolute time difference between xmit time stamp 629 * and receive time stamp. If too large, too bad. 630 */ 631 NTOHL_FP(&tailinpkt->tstamp, &ftmp); 632 L_SUB(&ftmp, &rbufp->recv_time); 633 LFPTOD(&ftmp, dtemp); 634 if (fabs(dtemp) > INFO_TS_MAXSKEW) { 635 /* 636 * He's a loser. Tell him. 637 */ 638 DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n", 639 dtemp, INFO_TS_MAXSKEW)); 640 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 641 return; 642 } 643 644 /* 645 * So far so good. See if decryption works out okay. 646 */ 647 if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt, 648 recv_len - mac_len, mac_len)) { 649 DPRINTF(5, ("authdecrypt failed\n")); 650 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH); 651 return; 652 } 653 } 654 655 DPRINTF(3, ("process_private: all okay, into handler\n")); 656 /* 657 * Packet is okay. Call the handler to send him data. 658 */ 659 (proc->handler)(srcadr, inter, inpkt); 660 } 661 662 663 /* 664 * list_peers - send a list of the peers 665 */ 666 static void 667 list_peers( 668 sockaddr_u *srcadr, 669 endpt *inter, 670 struct req_pkt *inpkt 671 ) 672 { 673 struct info_peer_list * ip; 674 const struct peer * pp; 675 676 ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt, 677 v6sizeof(struct info_peer_list)); 678 for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) { 679 if (IS_IPV6(&pp->srcadr)) { 680 if (!client_v6_capable) 681 continue; 682 ip->addr6 = SOCK_ADDR6(&pp->srcadr); 683 ip->v6_flag = 1; 684 } else { 685 ip->addr = NSRCADR(&pp->srcadr); 686 if (client_v6_capable) 687 ip->v6_flag = 0; 688 } 689 690 ip->port = NSRCPORT(&pp->srcadr); 691 ip->hmode = pp->hmode; 692 ip->flags = 0; 693 if (pp->flags & FLAG_CONFIG) 694 ip->flags |= INFO_FLAG_CONFIG; 695 if (pp == sys_peer) 696 ip->flags |= INFO_FLAG_SYSPEER; 697 if (pp->status == CTL_PST_SEL_SYNCCAND) 698 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 699 if (pp->status >= CTL_PST_SEL_SYSPEER) 700 ip->flags |= INFO_FLAG_SHORTLIST; 701 ip = (struct info_peer_list *)more_pkt(); 702 } /* for pp */ 703 704 flush_pkt(); 705 } 706 707 708 /* 709 * list_peers_sum - return extended peer list 710 */ 711 static void 712 list_peers_sum( 713 sockaddr_u *srcadr, 714 endpt *inter, 715 struct req_pkt *inpkt 716 ) 717 { 718 struct info_peer_summary * ips; 719 const struct peer * pp; 720 l_fp ltmp; 721 722 DPRINTF(3, ("wants peer list summary\n")); 723 724 ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt, 725 v6sizeof(struct info_peer_summary)); 726 for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) { 727 DPRINTF(4, ("sum: got one\n")); 728 /* 729 * Be careful here not to return v6 peers when we 730 * want only v4. 731 */ 732 if (IS_IPV6(&pp->srcadr)) { 733 if (!client_v6_capable) 734 continue; 735 ips->srcadr6 = SOCK_ADDR6(&pp->srcadr); 736 ips->v6_flag = 1; 737 if (pp->dstadr) 738 ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin); 739 else 740 ZERO(ips->dstadr6); 741 } else { 742 ips->srcadr = NSRCADR(&pp->srcadr); 743 if (client_v6_capable) 744 ips->v6_flag = 0; 745 746 if (pp->dstadr) { 747 if (!pp->processed) 748 ips->dstadr = NSRCADR(&pp->dstadr->sin); 749 else { 750 if (MDF_BCAST == pp->cast_flags) 751 ips->dstadr = NSRCADR(&pp->dstadr->bcast); 752 else if (pp->cast_flags) { 753 ips->dstadr = NSRCADR(&pp->dstadr->sin); 754 if (!ips->dstadr) 755 ips->dstadr = NSRCADR(&pp->dstadr->bcast); 756 } 757 } 758 } else { 759 ips->dstadr = 0; 760 } 761 } 762 763 ips->srcport = NSRCPORT(&pp->srcadr); 764 ips->stratum = pp->stratum; 765 ips->hpoll = pp->hpoll; 766 ips->ppoll = pp->ppoll; 767 ips->reach = pp->reach; 768 ips->flags = 0; 769 if (pp == sys_peer) 770 ips->flags |= INFO_FLAG_SYSPEER; 771 if (pp->flags & FLAG_CONFIG) 772 ips->flags |= INFO_FLAG_CONFIG; 773 if (pp->flags & FLAG_REFCLOCK) 774 ips->flags |= INFO_FLAG_REFCLOCK; 775 if (pp->flags & FLAG_PREFER) 776 ips->flags |= INFO_FLAG_PREFER; 777 if (pp->flags & FLAG_BURST) 778 ips->flags |= INFO_FLAG_BURST; 779 if (pp->status == CTL_PST_SEL_SYNCCAND) 780 ips->flags |= INFO_FLAG_SEL_CANDIDATE; 781 if (pp->status >= CTL_PST_SEL_SYSPEER) 782 ips->flags |= INFO_FLAG_SHORTLIST; 783 ips->hmode = pp->hmode; 784 ips->delay = HTONS_FP(DTOFP(pp->delay)); 785 DTOLFP(pp->offset, <mp); 786 HTONL_FP(<mp, &ips->offset); 787 ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp))); 788 789 ips = (struct info_peer_summary *)more_pkt(); 790 } /* for pp */ 791 792 flush_pkt(); 793 } 794 795 796 /* 797 * peer_info - send information for one or more peers 798 */ 799 static void 800 peer_info ( 801 sockaddr_u *srcadr, 802 endpt *inter, 803 struct req_pkt *inpkt 804 ) 805 { 806 u_short items; 807 size_t item_sz; 808 char * datap; 809 struct info_peer_list ipl; 810 struct peer * pp; 811 struct info_peer * ip; 812 int i; 813 int j; 814 sockaddr_u addr; 815 l_fp ltmp; 816 817 items = INFO_NITEMS(inpkt->err_nitems); 818 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 819 datap = inpkt->u.data; 820 if (item_sz != sizeof(ipl)) { 821 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 822 return; 823 } 824 ip = prepare_pkt(srcadr, inter, inpkt, 825 v6sizeof(struct info_peer)); 826 while (items-- > 0 && ip != NULL) { 827 ZERO(ipl); 828 memcpy(&ipl, datap, item_sz); 829 ZERO_SOCK(&addr); 830 NSRCPORT(&addr) = ipl.port; 831 if (client_v6_capable && ipl.v6_flag) { 832 AF(&addr) = AF_INET6; 833 SOCK_ADDR6(&addr) = ipl.addr6; 834 } else { 835 AF(&addr) = AF_INET; 836 NSRCADR(&addr) = ipl.addr; 837 } 838 #ifdef ISC_PLATFORM_HAVESALEN 839 addr.sa.sa_len = SOCKLEN(&addr); 840 #endif 841 datap += item_sz; 842 843 pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL); 844 if (NULL == pp) 845 continue; 846 if (IS_IPV6(&pp->srcadr)) { 847 if (pp->dstadr) 848 ip->dstadr6 = 849 (MDF_BCAST == pp->cast_flags) 850 ? SOCK_ADDR6(&pp->dstadr->bcast) 851 : SOCK_ADDR6(&pp->dstadr->sin); 852 else 853 ZERO(ip->dstadr6); 854 855 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr); 856 ip->v6_flag = 1; 857 } else { 858 if (pp->dstadr) { 859 if (!pp->processed) 860 ip->dstadr = NSRCADR(&pp->dstadr->sin); 861 else { 862 if (MDF_BCAST == pp->cast_flags) 863 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 864 else if (pp->cast_flags) { 865 ip->dstadr = NSRCADR(&pp->dstadr->sin); 866 if (!ip->dstadr) 867 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 868 } 869 } 870 } else 871 ip->dstadr = 0; 872 873 ip->srcadr = NSRCADR(&pp->srcadr); 874 if (client_v6_capable) 875 ip->v6_flag = 0; 876 } 877 ip->srcport = NSRCPORT(&pp->srcadr); 878 ip->flags = 0; 879 if (pp == sys_peer) 880 ip->flags |= INFO_FLAG_SYSPEER; 881 if (pp->flags & FLAG_CONFIG) 882 ip->flags |= INFO_FLAG_CONFIG; 883 if (pp->flags & FLAG_REFCLOCK) 884 ip->flags |= INFO_FLAG_REFCLOCK; 885 if (pp->flags & FLAG_PREFER) 886 ip->flags |= INFO_FLAG_PREFER; 887 if (pp->flags & FLAG_BURST) 888 ip->flags |= INFO_FLAG_BURST; 889 if (pp->status == CTL_PST_SEL_SYNCCAND) 890 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 891 if (pp->status >= CTL_PST_SEL_SYSPEER) 892 ip->flags |= INFO_FLAG_SHORTLIST; 893 ip->leap = pp->leap; 894 ip->hmode = pp->hmode; 895 ip->pmode = pp->pmode; 896 ip->keyid = pp->keyid; 897 ip->stratum = pp->stratum; 898 ip->ppoll = pp->ppoll; 899 ip->hpoll = pp->hpoll; 900 ip->precision = pp->precision; 901 ip->version = pp->version; 902 ip->reach = pp->reach; 903 ip->unreach = (u_char)pp->unreach; 904 ip->flash = (u_char)pp->flash; 905 ip->flash2 = (u_short)pp->flash; 906 ip->estbdelay = HTONS_FP(DTOFP(pp->delay)); 907 ip->ttl = (u_char)pp->ttl; 908 ip->associd = htons(pp->associd); 909 ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay)); 910 ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp)); 911 ip->refid = pp->refid; 912 HTONL_FP(&pp->reftime, &ip->reftime); 913 HTONL_FP(&pp->aorg, &ip->org); 914 HTONL_FP(&pp->rec, &ip->rec); 915 HTONL_FP(&pp->xmt, &ip->xmt); 916 j = pp->filter_nextpt - 1; 917 for (i = 0; i < NTP_SHIFT; i++, j--) { 918 if (j < 0) 919 j = NTP_SHIFT-1; 920 ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j])); 921 DTOLFP(pp->filter_offset[j], <mp); 922 HTONL_FP(<mp, &ip->filtoffset[i]); 923 ip->order[i] = (u_char)((pp->filter_nextpt + 924 NTP_SHIFT - 1) - 925 pp->filter_order[i]); 926 if (ip->order[i] >= NTP_SHIFT) 927 ip->order[i] -= NTP_SHIFT; 928 } 929 DTOLFP(pp->offset, <mp); 930 HTONL_FP(<mp, &ip->offset); 931 ip->delay = HTONS_FP(DTOFP(pp->delay)); 932 ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp))); 933 ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter))); 934 ip = more_pkt(); 935 } 936 flush_pkt(); 937 } 938 939 940 /* 941 * peer_stats - send statistics for one or more peers 942 */ 943 static void 944 peer_stats ( 945 sockaddr_u *srcadr, 946 endpt *inter, 947 struct req_pkt *inpkt 948 ) 949 { 950 u_short items; 951 size_t item_sz; 952 char * datap; 953 struct info_peer_list ipl; 954 struct peer * pp; 955 struct info_peer_stats *ip; 956 sockaddr_u addr; 957 958 DPRINTF(1, ("peer_stats: called\n")); 959 items = INFO_NITEMS(inpkt->err_nitems); 960 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 961 datap = inpkt->u.data; 962 if (item_sz > sizeof(ipl)) { 963 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 964 return; 965 } 966 ip = prepare_pkt(srcadr, inter, inpkt, 967 v6sizeof(struct info_peer_stats)); 968 while (items-- > 0 && ip != NULL) { 969 ZERO(ipl); 970 memcpy(&ipl, datap, item_sz); 971 ZERO(addr); 972 NSRCPORT(&addr) = ipl.port; 973 if (client_v6_capable && ipl.v6_flag) { 974 AF(&addr) = AF_INET6; 975 SOCK_ADDR6(&addr) = ipl.addr6; 976 } else { 977 AF(&addr) = AF_INET; 978 NSRCADR(&addr) = ipl.addr; 979 } 980 #ifdef ISC_PLATFORM_HAVESALEN 981 addr.sa.sa_len = SOCKLEN(&addr); 982 #endif 983 DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n", 984 stoa(&addr), ipl.port, NSRCPORT(&addr))); 985 986 datap += item_sz; 987 988 pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL); 989 if (NULL == pp) 990 continue; 991 992 DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr))); 993 994 if (IS_IPV4(&pp->srcadr)) { 995 if (pp->dstadr) { 996 if (!pp->processed) 997 ip->dstadr = NSRCADR(&pp->dstadr->sin); 998 else { 999 if (MDF_BCAST == pp->cast_flags) 1000 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 1001 else if (pp->cast_flags) { 1002 ip->dstadr = NSRCADR(&pp->dstadr->sin); 1003 if (!ip->dstadr) 1004 ip->dstadr = NSRCADR(&pp->dstadr->bcast); 1005 } 1006 } 1007 } else 1008 ip->dstadr = 0; 1009 1010 ip->srcadr = NSRCADR(&pp->srcadr); 1011 if (client_v6_capable) 1012 ip->v6_flag = 0; 1013 } else { 1014 if (pp->dstadr) 1015 ip->dstadr6 = 1016 (MDF_BCAST == pp->cast_flags) 1017 ? SOCK_ADDR6(&pp->dstadr->bcast) 1018 : SOCK_ADDR6(&pp->dstadr->sin); 1019 else 1020 ZERO(ip->dstadr6); 1021 1022 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr); 1023 ip->v6_flag = 1; 1024 } 1025 ip->srcport = NSRCPORT(&pp->srcadr); 1026 ip->flags = 0; 1027 if (pp == sys_peer) 1028 ip->flags |= INFO_FLAG_SYSPEER; 1029 if (pp->flags & FLAG_CONFIG) 1030 ip->flags |= INFO_FLAG_CONFIG; 1031 if (pp->flags & FLAG_REFCLOCK) 1032 ip->flags |= INFO_FLAG_REFCLOCK; 1033 if (pp->flags & FLAG_PREFER) 1034 ip->flags |= INFO_FLAG_PREFER; 1035 if (pp->flags & FLAG_BURST) 1036 ip->flags |= INFO_FLAG_BURST; 1037 if (pp->flags & FLAG_IBURST) 1038 ip->flags |= INFO_FLAG_IBURST; 1039 if (pp->status == CTL_PST_SEL_SYNCCAND) 1040 ip->flags |= INFO_FLAG_SEL_CANDIDATE; 1041 if (pp->status >= CTL_PST_SEL_SYSPEER) 1042 ip->flags |= INFO_FLAG_SHORTLIST; 1043 ip->flags = htons(ip->flags); 1044 ip->timereceived = htonl((u_int32)(current_time - pp->timereceived)); 1045 ip->timetosend = htonl(pp->nextdate - current_time); 1046 ip->timereachable = htonl((u_int32)(current_time - pp->timereachable)); 1047 ip->sent = htonl((u_int32)(pp->sent)); 1048 ip->processed = htonl((u_int32)(pp->processed)); 1049 ip->badauth = htonl((u_int32)(pp->badauth)); 1050 ip->bogusorg = htonl((u_int32)(pp->bogusorg)); 1051 ip->oldpkt = htonl((u_int32)(pp->oldpkt)); 1052 ip->seldisp = htonl((u_int32)(pp->seldisptoolarge)); 1053 ip->selbroken = htonl((u_int32)(pp->selbroken)); 1054 ip->candidate = pp->status; 1055 ip = (struct info_peer_stats *)more_pkt(); 1056 } 1057 flush_pkt(); 1058 } 1059 1060 1061 /* 1062 * sys_info - return system info 1063 */ 1064 static void 1065 sys_info( 1066 sockaddr_u *srcadr, 1067 endpt *inter, 1068 struct req_pkt *inpkt 1069 ) 1070 { 1071 register struct info_sys *is; 1072 1073 is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt, 1074 v6sizeof(struct info_sys)); 1075 1076 if (sys_peer) { 1077 if (IS_IPV4(&sys_peer->srcadr)) { 1078 is->peer = NSRCADR(&sys_peer->srcadr); 1079 if (client_v6_capable) 1080 is->v6_flag = 0; 1081 } else if (client_v6_capable) { 1082 is->peer6 = SOCK_ADDR6(&sys_peer->srcadr); 1083 is->v6_flag = 1; 1084 } 1085 is->peer_mode = sys_peer->hmode; 1086 } else { 1087 is->peer = 0; 1088 if (client_v6_capable) { 1089 is->v6_flag = 0; 1090 } 1091 is->peer_mode = 0; 1092 } 1093 1094 is->leap = sys_leap; 1095 is->stratum = sys_stratum; 1096 is->precision = sys_precision; 1097 is->rootdelay = htonl(DTOFP(sys_rootdelay)); 1098 is->rootdispersion = htonl(DTOUFP(sys_rootdisp)); 1099 is->frequency = htonl(DTOFP(sys_jitter)); 1100 is->stability = htonl(DTOUFP(clock_stability * 1e6)); 1101 is->refid = sys_refid; 1102 HTONL_FP(&sys_reftime, &is->reftime); 1103 1104 is->poll = sys_poll; 1105 1106 is->flags = 0; 1107 if (sys_authenticate) 1108 is->flags |= INFO_FLAG_AUTHENTICATE; 1109 if (sys_bclient || sys_mclient) 1110 is->flags |= INFO_FLAG_BCLIENT; 1111 #ifdef REFCLOCK 1112 if (cal_enable) 1113 is->flags |= INFO_FLAG_CAL; 1114 #endif /* REFCLOCK */ 1115 if (kern_enable) 1116 is->flags |= INFO_FLAG_KERNEL; 1117 if (mon_enabled != MON_OFF) 1118 is->flags |= INFO_FLAG_MONITOR; 1119 if (ntp_enable) 1120 is->flags |= INFO_FLAG_NTP; 1121 if (hardpps_enable) 1122 is->flags |= INFO_FLAG_PPS_SYNC; 1123 if (stats_control) 1124 is->flags |= INFO_FLAG_FILEGEN; 1125 is->bdelay = HTONS_FP(DTOFP(sys_bdelay)); 1126 HTONL_UF(sys_authdelay.l_uf, &is->authdelay); 1127 (void) more_pkt(); 1128 flush_pkt(); 1129 } 1130 1131 1132 /* 1133 * sys_stats - return system statistics 1134 */ 1135 static void 1136 sys_stats( 1137 sockaddr_u *srcadr, 1138 endpt *inter, 1139 struct req_pkt *inpkt 1140 ) 1141 { 1142 register struct info_sys_stats *ss; 1143 1144 ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt, 1145 sizeof(struct info_sys_stats)); 1146 ss->timeup = htonl((u_int32)current_time); 1147 ss->timereset = htonl((u_int32)(current_time - sys_stattime)); 1148 ss->denied = htonl((u_int32)sys_restricted); 1149 ss->oldversionpkt = htonl((u_int32)sys_oldversion); 1150 ss->newversionpkt = htonl((u_int32)sys_newversion); 1151 ss->unknownversion = htonl((u_int32)sys_declined); 1152 ss->badlength = htonl((u_int32)sys_badlength); 1153 ss->processed = htonl((u_int32)sys_processed); 1154 ss->badauth = htonl((u_int32)sys_badauth); 1155 ss->limitrejected = htonl((u_int32)sys_limitrejected); 1156 ss->received = htonl((u_int32)sys_received); 1157 ss->lamport = htonl((u_int32)sys_lamport); 1158 ss->tsrounding = htonl((u_int32)sys_tsrounding); 1159 (void) more_pkt(); 1160 flush_pkt(); 1161 } 1162 1163 1164 /* 1165 * mem_stats - return memory statistics 1166 */ 1167 static void 1168 mem_stats( 1169 sockaddr_u *srcadr, 1170 endpt *inter, 1171 struct req_pkt *inpkt 1172 ) 1173 { 1174 register struct info_mem_stats *ms; 1175 register int i; 1176 1177 ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt, 1178 sizeof(struct info_mem_stats)); 1179 1180 ms->timereset = htonl((u_int32)(current_time - peer_timereset)); 1181 ms->totalpeermem = htons((u_short)total_peer_structs); 1182 ms->freepeermem = htons((u_short)peer_free_count); 1183 ms->findpeer_calls = htonl((u_int32)findpeer_calls); 1184 ms->allocations = htonl((u_int32)peer_allocations); 1185 ms->demobilizations = htonl((u_int32)peer_demobilizations); 1186 1187 for (i = 0; i < NTP_HASH_SIZE; i++) 1188 ms->hashcount[i] = (u_char) 1189 min((u_int)peer_hash_count[i], UCHAR_MAX); 1190 1191 (void) more_pkt(); 1192 flush_pkt(); 1193 } 1194 1195 1196 /* 1197 * io_stats - return io statistics 1198 */ 1199 static void 1200 io_stats( 1201 sockaddr_u *srcadr, 1202 endpt *inter, 1203 struct req_pkt *inpkt 1204 ) 1205 { 1206 struct info_io_stats *io; 1207 1208 io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt, 1209 sizeof(struct info_io_stats)); 1210 1211 io->timereset = htonl((u_int32)(current_time - io_timereset)); 1212 io->totalrecvbufs = htons((u_short) total_recvbuffs()); 1213 io->freerecvbufs = htons((u_short) free_recvbuffs()); 1214 io->fullrecvbufs = htons((u_short) full_recvbuffs()); 1215 io->lowwater = htons((u_short) lowater_additions()); 1216 io->dropped = htonl((u_int32)packets_dropped); 1217 io->ignored = htonl((u_int32)packets_ignored); 1218 io->received = htonl((u_int32)packets_received); 1219 io->sent = htonl((u_int32)packets_sent); 1220 io->notsent = htonl((u_int32)packets_notsent); 1221 io->interrupts = htonl((u_int32)handler_calls); 1222 io->int_received = htonl((u_int32)handler_pkts); 1223 1224 (void) more_pkt(); 1225 flush_pkt(); 1226 } 1227 1228 1229 /* 1230 * timer_stats - return timer statistics 1231 */ 1232 static void 1233 timer_stats( 1234 sockaddr_u * srcadr, 1235 endpt * inter, 1236 struct req_pkt * inpkt 1237 ) 1238 { 1239 struct info_timer_stats * ts; 1240 u_long sincereset; 1241 1242 ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter, 1243 inpkt, sizeof(*ts)); 1244 1245 sincereset = current_time - timer_timereset; 1246 ts->timereset = htonl((u_int32)sincereset); 1247 ts->alarms = ts->timereset; 1248 ts->overflows = htonl((u_int32)alarm_overflow); 1249 ts->xmtcalls = htonl((u_int32)timer_xmtcalls); 1250 1251 (void) more_pkt(); 1252 flush_pkt(); 1253 } 1254 1255 1256 /* 1257 * loop_info - return the current state of the loop filter 1258 */ 1259 static void 1260 loop_info( 1261 sockaddr_u *srcadr, 1262 endpt *inter, 1263 struct req_pkt *inpkt 1264 ) 1265 { 1266 struct info_loop *li; 1267 l_fp ltmp; 1268 1269 li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt, 1270 sizeof(struct info_loop)); 1271 1272 DTOLFP(last_offset, <mp); 1273 HTONL_FP(<mp, &li->last_offset); 1274 DTOLFP(drift_comp * 1e6, <mp); 1275 HTONL_FP(<mp, &li->drift_comp); 1276 li->compliance = htonl((u_int32)(tc_counter)); 1277 li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch)); 1278 1279 (void) more_pkt(); 1280 flush_pkt(); 1281 } 1282 1283 1284 /* 1285 * do_conf - add a peer to the configuration list 1286 */ 1287 static void 1288 do_conf( 1289 sockaddr_u *srcadr, 1290 endpt *inter, 1291 struct req_pkt *inpkt 1292 ) 1293 { 1294 u_short items; 1295 size_t item_sz; 1296 u_int fl; 1297 char * datap; 1298 struct conf_peer temp_cp; 1299 sockaddr_u peeraddr; 1300 1301 /* 1302 * Do a check of everything to see that it looks 1303 * okay. If not, complain about it. Note we are 1304 * very picky here. 1305 */ 1306 items = INFO_NITEMS(inpkt->err_nitems); 1307 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1308 datap = inpkt->u.data; 1309 if (item_sz > sizeof(temp_cp)) { 1310 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1311 return; 1312 } 1313 1314 while (items-- > 0) { 1315 ZERO(temp_cp); 1316 memcpy(&temp_cp, datap, item_sz); 1317 ZERO_SOCK(&peeraddr); 1318 1319 fl = 0; 1320 if (temp_cp.flags & CONF_FLAG_PREFER) 1321 fl |= FLAG_PREFER; 1322 if (temp_cp.flags & CONF_FLAG_BURST) 1323 fl |= FLAG_BURST; 1324 if (temp_cp.flags & CONF_FLAG_IBURST) 1325 fl |= FLAG_IBURST; 1326 #ifdef AUTOKEY 1327 if (temp_cp.flags & CONF_FLAG_SKEY) 1328 fl |= FLAG_SKEY; 1329 #endif /* AUTOKEY */ 1330 if (client_v6_capable && temp_cp.v6_flag) { 1331 AF(&peeraddr) = AF_INET6; 1332 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6; 1333 } else { 1334 AF(&peeraddr) = AF_INET; 1335 NSRCADR(&peeraddr) = temp_cp.peeraddr; 1336 /* 1337 * Make sure the address is valid 1338 */ 1339 if (!ISREFCLOCKADR(&peeraddr) && 1340 ISBADADR(&peeraddr)) { 1341 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1342 return; 1343 } 1344 1345 } 1346 NSRCPORT(&peeraddr) = htons(NTP_PORT); 1347 #ifdef ISC_PLATFORM_HAVESALEN 1348 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1349 #endif 1350 1351 /* check mode value: 0 <= hmode <= 6 1352 * 1353 * There's no good global define for that limit, and 1354 * using a magic define is as good (or bad, actually) as 1355 * a magic number. So we use the highest possible peer 1356 * mode, and that is MODE_BCLIENT. 1357 * 1358 * [Bug 3009] claims that a problem occurs for hmode > 7, 1359 * but the code in ntp_peer.c indicates trouble for any 1360 * hmode > 6 ( --> MODE_BCLIENT). 1361 */ 1362 if (temp_cp.hmode > MODE_BCLIENT) { 1363 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1364 return; 1365 } 1366 1367 /* Any more checks on the values? Unchecked at this 1368 * point: 1369 * - version 1370 * - ttl 1371 * - keyid 1372 * 1373 * - minpoll/maxpoll, but they are treated properly 1374 * for all cases internally. Checking not necessary. 1375 * 1376 * Note that we ignore any previously-specified ippeerlimit. 1377 * If we're told to create the peer, we create the peer. 1378 */ 1379 1380 /* finally create the peer */ 1381 if (peer_config(&peeraddr, NULL, NULL, -1, 1382 temp_cp.hmode, temp_cp.version, temp_cp.minpoll, 1383 temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid, 1384 NULL) == 0) 1385 { 1386 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1387 return; 1388 } 1389 1390 datap += item_sz; 1391 } 1392 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1393 } 1394 1395 1396 /* 1397 * do_unconf - remove a peer from the configuration list 1398 */ 1399 static void 1400 do_unconf( 1401 sockaddr_u * srcadr, 1402 endpt * inter, 1403 struct req_pkt *inpkt 1404 ) 1405 { 1406 u_short items; 1407 size_t item_sz; 1408 char * datap; 1409 struct conf_unpeer temp_cp; 1410 struct peer * p; 1411 sockaddr_u peeraddr; 1412 int loops; 1413 1414 /* 1415 * This is a bit unstructured, but I like to be careful. 1416 * We check to see that every peer exists and is actually 1417 * configured. If so, we remove them. If not, we return 1418 * an error. 1419 * 1420 * [Bug 3011] Even if we checked all peers given in the request 1421 * in a dry run, there's still a chance that the caller played 1422 * unfair and gave the same peer multiple times. So we still 1423 * have to be prepared for nasty surprises in the second run ;) 1424 */ 1425 1426 /* basic consistency checks */ 1427 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1428 if (item_sz > sizeof(temp_cp)) { 1429 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1430 return; 1431 } 1432 1433 /* now do two runs: first a dry run, then a busy one */ 1434 for (loops = 0; loops != 2; ++loops) { 1435 items = INFO_NITEMS(inpkt->err_nitems); 1436 datap = inpkt->u.data; 1437 while (items-- > 0) { 1438 /* copy from request to local */ 1439 ZERO(temp_cp); 1440 memcpy(&temp_cp, datap, item_sz); 1441 /* get address structure */ 1442 ZERO_SOCK(&peeraddr); 1443 if (client_v6_capable && temp_cp.v6_flag) { 1444 AF(&peeraddr) = AF_INET6; 1445 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6; 1446 } else { 1447 AF(&peeraddr) = AF_INET; 1448 NSRCADR(&peeraddr) = temp_cp.peeraddr; 1449 } 1450 SET_PORT(&peeraddr, NTP_PORT); 1451 #ifdef ISC_PLATFORM_HAVESALEN 1452 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 1453 #endif 1454 DPRINTF(1, ("searching for %s\n", 1455 stoa(&peeraddr))); 1456 1457 /* search for matching configred(!) peer */ 1458 p = NULL; 1459 do { 1460 p = findexistingpeer( 1461 &peeraddr, NULL, p, -1, 0, NULL); 1462 } while (p && !(FLAG_CONFIG & p->flags)); 1463 1464 if (!loops && !p) { 1465 /* Item not found in dry run -- bail! */ 1466 req_ack(srcadr, inter, inpkt, 1467 INFO_ERR_NODATA); 1468 return; 1469 } else if (loops && p) { 1470 /* Item found in busy run -- remove! */ 1471 peer_clear(p, "GONE"); 1472 unpeer(p); 1473 } 1474 datap += item_sz; 1475 } 1476 } 1477 1478 /* report success */ 1479 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1480 } 1481 1482 1483 /* 1484 * set_sys_flag - set system flags 1485 */ 1486 static void 1487 set_sys_flag( 1488 sockaddr_u *srcadr, 1489 endpt *inter, 1490 struct req_pkt *inpkt 1491 ) 1492 { 1493 setclr_flags(srcadr, inter, inpkt, 1); 1494 } 1495 1496 1497 /* 1498 * clr_sys_flag - clear system flags 1499 */ 1500 static void 1501 clr_sys_flag( 1502 sockaddr_u *srcadr, 1503 endpt *inter, 1504 struct req_pkt *inpkt 1505 ) 1506 { 1507 setclr_flags(srcadr, inter, inpkt, 0); 1508 } 1509 1510 1511 /* 1512 * setclr_flags - do the grunge work of flag setting/clearing 1513 */ 1514 static void 1515 setclr_flags( 1516 sockaddr_u *srcadr, 1517 endpt *inter, 1518 struct req_pkt *inpkt, 1519 u_long set 1520 ) 1521 { 1522 struct conf_sys_flags *sf; 1523 u_int32 flags; 1524 1525 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 1526 msyslog(LOG_ERR, "setclr_flags: err_nitems > 1"); 1527 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1528 return; 1529 } 1530 1531 sf = (struct conf_sys_flags *)&inpkt->u; 1532 flags = ntohl(sf->flags); 1533 1534 if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS | 1535 SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR | 1536 SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) { 1537 msyslog(LOG_ERR, "setclr_flags: extra flags: %#x", 1538 flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS | 1539 SYS_FLAG_NTP | SYS_FLAG_KERNEL | 1540 SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN | 1541 SYS_FLAG_AUTH | SYS_FLAG_CAL)); 1542 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1543 return; 1544 } 1545 1546 if (flags & SYS_FLAG_BCLIENT) 1547 proto_config(PROTO_BROADCLIENT, set, 0., NULL); 1548 if (flags & SYS_FLAG_PPS) 1549 proto_config(PROTO_PPS, set, 0., NULL); 1550 if (flags & SYS_FLAG_NTP) 1551 proto_config(PROTO_NTP, set, 0., NULL); 1552 if (flags & SYS_FLAG_KERNEL) 1553 proto_config(PROTO_KERNEL, set, 0., NULL); 1554 if (flags & SYS_FLAG_MONITOR) 1555 proto_config(PROTO_MONITOR, set, 0., NULL); 1556 if (flags & SYS_FLAG_FILEGEN) 1557 proto_config(PROTO_FILEGEN, set, 0., NULL); 1558 if (flags & SYS_FLAG_AUTH) 1559 proto_config(PROTO_AUTHENTICATE, set, 0., NULL); 1560 if (flags & SYS_FLAG_CAL) 1561 proto_config(PROTO_CAL, set, 0., NULL); 1562 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1563 } 1564 1565 /* There have been some issues with the restrict list processing, 1566 * ranging from problems with deep recursion (resulting in stack 1567 * overflows) and overfull reply buffers. 1568 * 1569 * To avoid this trouble the list reversal is done iteratively using a 1570 * scratch pad. 1571 */ 1572 typedef struct RestrictStack RestrictStackT; 1573 struct RestrictStack { 1574 RestrictStackT *link; 1575 size_t fcnt; 1576 const restrict_u *pres[63]; 1577 }; 1578 1579 static size_t 1580 getStackSheetSize( 1581 RestrictStackT *sp 1582 ) 1583 { 1584 if (sp) 1585 return sizeof(sp->pres)/sizeof(sp->pres[0]); 1586 return 0u; 1587 } 1588 1589 static int/*BOOL*/ 1590 pushRestriction( 1591 RestrictStackT **spp, 1592 const restrict_u *ptr 1593 ) 1594 { 1595 RestrictStackT *sp; 1596 1597 if (NULL == (sp = *spp) || 0 == sp->fcnt) { 1598 /* need another sheet in the scratch pad */ 1599 sp = emalloc(sizeof(*sp)); 1600 sp->link = *spp; 1601 sp->fcnt = getStackSheetSize(sp); 1602 *spp = sp; 1603 } 1604 sp->pres[--sp->fcnt] = ptr; 1605 return TRUE; 1606 } 1607 1608 static int/*BOOL*/ 1609 popRestriction( 1610 RestrictStackT **spp, 1611 const restrict_u **opp 1612 ) 1613 { 1614 RestrictStackT *sp; 1615 1616 if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp)) 1617 return FALSE; 1618 1619 *opp = sp->pres[sp->fcnt++]; 1620 if (sp->fcnt >= getStackSheetSize(sp)) { 1621 /* discard sheet from scratch pad */ 1622 *spp = sp->link; 1623 free(sp); 1624 } 1625 return TRUE; 1626 } 1627 1628 static void 1629 flushRestrictionStack( 1630 RestrictStackT **spp 1631 ) 1632 { 1633 RestrictStackT *sp; 1634 1635 while (NULL != (sp = *spp)) { 1636 *spp = sp->link; 1637 free(sp); 1638 } 1639 } 1640 1641 /* 1642 * list_restrict4 - iterative helper for list_restrict dumps IPv4 1643 * restriction list in reverse order. 1644 */ 1645 static void 1646 list_restrict4( 1647 const restrict_u * res, 1648 struct info_restrict ** ppir 1649 ) 1650 { 1651 RestrictStackT * rpad; 1652 struct info_restrict * pir; 1653 1654 pir = *ppir; 1655 for (rpad = NULL; res; res = res->link) 1656 if (!pushRestriction(&rpad, res)) 1657 break; 1658 1659 while (pir && popRestriction(&rpad, &res)) { 1660 pir->addr = htonl(res->u.v4.addr); 1661 if (client_v6_capable) 1662 pir->v6_flag = 0; 1663 pir->mask = htonl(res->u.v4.mask); 1664 pir->count = htonl(res->count); 1665 pir->rflags = htons(res->rflags); 1666 pir->mflags = htons(res->mflags); 1667 pir = (struct info_restrict *)more_pkt(); 1668 } 1669 flushRestrictionStack(&rpad); 1670 *ppir = pir; 1671 } 1672 1673 /* 1674 * list_restrict6 - iterative helper for list_restrict dumps IPv6 1675 * restriction list in reverse order. 1676 */ 1677 static void 1678 list_restrict6( 1679 const restrict_u * res, 1680 struct info_restrict ** ppir 1681 ) 1682 { 1683 RestrictStackT * rpad; 1684 struct info_restrict * pir; 1685 1686 pir = *ppir; 1687 for (rpad = NULL; res; res = res->link) 1688 if (!pushRestriction(&rpad, res)) 1689 break; 1690 1691 while (pir && popRestriction(&rpad, &res)) { 1692 pir->addr6 = res->u.v6.addr; 1693 pir->mask6 = res->u.v6.mask; 1694 pir->v6_flag = 1; 1695 pir->count = htonl(res->count); 1696 pir->rflags = htons(res->rflags); 1697 pir->mflags = htons(res->mflags); 1698 pir = (struct info_restrict *)more_pkt(); 1699 } 1700 flushRestrictionStack(&rpad); 1701 *ppir = pir; 1702 } 1703 1704 1705 /* 1706 * list_restrict - return the restrict list 1707 */ 1708 static void 1709 list_restrict( 1710 sockaddr_u *srcadr, 1711 endpt *inter, 1712 struct req_pkt *inpkt 1713 ) 1714 { 1715 struct info_restrict *ir; 1716 1717 DPRINTF(3, ("wants restrict list summary\n")); 1718 1719 ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt, 1720 v6sizeof(struct info_restrict)); 1721 1722 /* 1723 * The restriction lists are kept sorted in the reverse order 1724 * than they were originally. To preserve the output semantics, 1725 * dump each list in reverse order. The workers take care of that. 1726 */ 1727 list_restrict4(restrictlist4, &ir); 1728 if (client_v6_capable) 1729 list_restrict6(restrictlist6, &ir); 1730 flush_pkt(); 1731 } 1732 1733 1734 /* 1735 * do_resaddflags - add flags to a restrict entry (or create one) 1736 */ 1737 static void 1738 do_resaddflags( 1739 sockaddr_u *srcadr, 1740 endpt *inter, 1741 struct req_pkt *inpkt 1742 ) 1743 { 1744 do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS); 1745 } 1746 1747 1748 1749 /* 1750 * do_ressubflags - remove flags from a restrict entry 1751 */ 1752 static void 1753 do_ressubflags( 1754 sockaddr_u *srcadr, 1755 endpt *inter, 1756 struct req_pkt *inpkt 1757 ) 1758 { 1759 do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG); 1760 } 1761 1762 1763 /* 1764 * do_unrestrict - remove a restrict entry from the list 1765 */ 1766 static void 1767 do_unrestrict( 1768 sockaddr_u *srcadr, 1769 endpt *inter, 1770 struct req_pkt *inpkt 1771 ) 1772 { 1773 do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE); 1774 } 1775 1776 1777 /* 1778 * do_restrict - do the dirty stuff of dealing with restrictions 1779 */ 1780 static void 1781 do_restrict( 1782 sockaddr_u *srcadr, 1783 endpt *inter, 1784 struct req_pkt *inpkt, 1785 restrict_op op 1786 ) 1787 { 1788 char * datap; 1789 struct conf_restrict cr; 1790 u_short items; 1791 size_t item_sz; 1792 sockaddr_u matchaddr; 1793 sockaddr_u matchmask; 1794 int bad; 1795 int/*BOOL*/ success; 1796 1797 switch(op) { 1798 case RESTRICT_FLAGS: 1799 case RESTRICT_UNFLAG: 1800 case RESTRICT_REMOVE: 1801 case RESTRICT_REMOVEIF: 1802 break; 1803 1804 default: 1805 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1806 return; 1807 } 1808 1809 /* 1810 * Do a check of the flags to make sure that only 1811 * the NTPPORT flag is set, if any. If not, complain 1812 * about it. Note we are very picky here. 1813 */ 1814 items = INFO_NITEMS(inpkt->err_nitems); 1815 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1816 datap = inpkt->u.data; 1817 if (item_sz > sizeof(cr)) { 1818 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1819 return; 1820 } 1821 1822 bad = 0; 1823 while (items-- > 0 && !bad) { 1824 memcpy(&cr, datap, item_sz); 1825 cr.flags = ntohs(cr.flags); /* XXX */ 1826 cr.mflags = ntohs(cr.mflags); 1827 if (~RESM_NTPONLY & cr.mflags) 1828 bad |= 1; 1829 if (~RES_ALLFLAGS & cr.flags) 1830 bad |= 2; 1831 if (INADDR_ANY != cr.mask) { 1832 if (client_v6_capable && cr.v6_flag) { 1833 if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6)) 1834 bad |= 4; 1835 } else { 1836 if (INADDR_ANY == cr.addr) 1837 bad |= 8; 1838 } 1839 } 1840 datap += item_sz; 1841 } 1842 1843 if (bad) { 1844 msyslog(LOG_ERR, "%s: bad = 0x%x", __func__, bad); 1845 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1846 return; 1847 } 1848 1849 /* 1850 * Looks okay, try it out. Needs to reload data pointer and 1851 * item counter. (Talos-CAN-0052) 1852 */ 1853 ZERO_SOCK(&matchaddr); 1854 ZERO_SOCK(&matchmask); 1855 items = INFO_NITEMS(inpkt->err_nitems); 1856 datap = inpkt->u.data; 1857 1858 while (items-- > 0) { 1859 memcpy(&cr, datap, item_sz); 1860 cr.flags = ntohs(cr.flags); /* XXX: size */ 1861 cr.mflags = ntohs(cr.mflags); 1862 cr.ippeerlimit = ntohs(cr.ippeerlimit); 1863 if (client_v6_capable && cr.v6_flag) { 1864 AF(&matchaddr) = AF_INET6; 1865 AF(&matchmask) = AF_INET6; 1866 SOCK_ADDR6(&matchaddr) = cr.addr6; 1867 SOCK_ADDR6(&matchmask) = cr.mask6; 1868 } else { 1869 AF(&matchaddr) = AF_INET; 1870 AF(&matchmask) = AF_INET; 1871 NSRCADR(&matchaddr) = cr.addr; 1872 NSRCADR(&matchmask) = cr.mask; 1873 } 1874 success = hack_restrict(op, &matchaddr, &matchmask, 1875 cr.ippeerlimit, cr.mflags, 1876 cr.flags, 0); 1877 if (!success) { 1878 DPRINTF(1, ("%s: %s %s mask %s ippeerlimit %hd %s %s failed", 1879 __func__, resop_str(op), 1880 stoa(&matchaddr), stoa(&matchmask), 1881 cr.ippeerlimit, mflags_str(cr.mflags), 1882 rflags_str(cr.flags))); 1883 } 1884 datap += item_sz; 1885 } 1886 1887 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1888 } 1889 1890 1891 /* 1892 * mon_getlist - return monitor data 1893 */ 1894 static void 1895 mon_getlist( 1896 sockaddr_u *srcadr, 1897 endpt *inter, 1898 struct req_pkt *inpkt 1899 ) 1900 { 1901 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 1902 } 1903 1904 1905 /* 1906 * Module entry points and the flags they correspond with 1907 */ 1908 struct reset_entry { 1909 int flag; /* flag this corresponds to */ 1910 void (*handler)(void); /* routine to handle request */ 1911 }; 1912 1913 struct reset_entry reset_entries[] = { 1914 { RESET_FLAG_ALLPEERS, peer_all_reset }, 1915 { RESET_FLAG_IO, io_clr_stats }, 1916 { RESET_FLAG_SYS, proto_clr_stats }, 1917 { RESET_FLAG_MEM, peer_clr_stats }, 1918 { RESET_FLAG_TIMER, timer_clr_stats }, 1919 { RESET_FLAG_AUTH, reset_auth_stats }, 1920 { RESET_FLAG_CTL, ctl_clr_stats }, 1921 { 0, 0 } 1922 }; 1923 1924 /* 1925 * reset_stats - reset statistic counters here and there 1926 */ 1927 static void 1928 reset_stats( 1929 sockaddr_u *srcadr, 1930 endpt *inter, 1931 struct req_pkt *inpkt 1932 ) 1933 { 1934 struct reset_flags *rflags; 1935 u_long flags; 1936 struct reset_entry *rent; 1937 1938 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 1939 msyslog(LOG_ERR, "reset_stats: err_nitems > 1"); 1940 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1941 return; 1942 } 1943 1944 rflags = (struct reset_flags *)&inpkt->u; 1945 flags = ntohl(rflags->flags); 1946 1947 if (flags & ~RESET_ALLFLAGS) { 1948 msyslog(LOG_ERR, "reset_stats: reset leaves %#lx", 1949 flags & ~RESET_ALLFLAGS); 1950 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1951 return; 1952 } 1953 1954 for (rent = reset_entries; rent->flag != 0; rent++) { 1955 if (flags & rent->flag) 1956 (*rent->handler)(); 1957 } 1958 req_ack(srcadr, inter, inpkt, INFO_OKAY); 1959 } 1960 1961 1962 /* 1963 * reset_peer - clear a peer's statistics 1964 */ 1965 static void 1966 reset_peer( 1967 sockaddr_u *srcadr, 1968 endpt *inter, 1969 struct req_pkt *inpkt 1970 ) 1971 { 1972 u_short items; 1973 size_t item_sz; 1974 char * datap; 1975 struct conf_unpeer cp; 1976 struct peer * p; 1977 sockaddr_u peeraddr; 1978 int bad; 1979 1980 /* 1981 * We check first to see that every peer exists. If not, 1982 * we return an error. 1983 */ 1984 1985 items = INFO_NITEMS(inpkt->err_nitems); 1986 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize); 1987 datap = inpkt->u.data; 1988 if (item_sz > sizeof(cp)) { 1989 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 1990 return; 1991 } 1992 1993 bad = FALSE; 1994 while (items-- > 0 && !bad) { 1995 ZERO(cp); 1996 memcpy(&cp, datap, item_sz); 1997 ZERO_SOCK(&peeraddr); 1998 if (client_v6_capable && cp.v6_flag) { 1999 AF(&peeraddr) = AF_INET6; 2000 SOCK_ADDR6(&peeraddr) = cp.peeraddr6; 2001 } else { 2002 AF(&peeraddr) = AF_INET; 2003 NSRCADR(&peeraddr) = cp.peeraddr; 2004 } 2005 2006 #ifdef ISC_PLATFORM_HAVESALEN 2007 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 2008 #endif 2009 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL); 2010 if (NULL == p) 2011 bad++; 2012 datap += item_sz; 2013 } 2014 2015 if (bad) { 2016 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2017 return; 2018 } 2019 2020 /* 2021 * Now do it in earnest. Needs to reload data pointer and item 2022 * counter. (Talos-CAN-0052) 2023 */ 2024 2025 items = INFO_NITEMS(inpkt->err_nitems); 2026 datap = inpkt->u.data; 2027 while (items-- > 0) { 2028 ZERO(cp); 2029 memcpy(&cp, datap, item_sz); 2030 ZERO_SOCK(&peeraddr); 2031 if (client_v6_capable && cp.v6_flag) { 2032 AF(&peeraddr) = AF_INET6; 2033 SOCK_ADDR6(&peeraddr) = cp.peeraddr6; 2034 } else { 2035 AF(&peeraddr) = AF_INET; 2036 NSRCADR(&peeraddr) = cp.peeraddr; 2037 } 2038 SET_PORT(&peeraddr, 123); 2039 #ifdef ISC_PLATFORM_HAVESALEN 2040 peeraddr.sa.sa_len = SOCKLEN(&peeraddr); 2041 #endif 2042 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL); 2043 while (p != NULL) { 2044 peer_reset(p); 2045 p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL); 2046 } 2047 datap += item_sz; 2048 } 2049 2050 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2051 } 2052 2053 2054 /* 2055 * do_key_reread - reread the encryption key file 2056 */ 2057 static void 2058 do_key_reread( 2059 sockaddr_u *srcadr, 2060 endpt *inter, 2061 struct req_pkt *inpkt 2062 ) 2063 { 2064 rereadkeys(); 2065 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2066 } 2067 2068 2069 /* 2070 * trust_key - make one or more keys trusted 2071 */ 2072 static void 2073 trust_key( 2074 sockaddr_u *srcadr, 2075 endpt *inter, 2076 struct req_pkt *inpkt 2077 ) 2078 { 2079 do_trustkey(srcadr, inter, inpkt, 1); 2080 } 2081 2082 2083 /* 2084 * untrust_key - make one or more keys untrusted 2085 */ 2086 static void 2087 untrust_key( 2088 sockaddr_u *srcadr, 2089 endpt *inter, 2090 struct req_pkt *inpkt 2091 ) 2092 { 2093 do_trustkey(srcadr, inter, inpkt, 0); 2094 } 2095 2096 2097 /* 2098 * do_trustkey - make keys either trustable or untrustable 2099 */ 2100 static void 2101 do_trustkey( 2102 sockaddr_u *srcadr, 2103 endpt *inter, 2104 struct req_pkt *inpkt, 2105 u_long trust 2106 ) 2107 { 2108 register uint32_t *kp; 2109 register int items; 2110 2111 items = INFO_NITEMS(inpkt->err_nitems); 2112 kp = (uint32_t *)&inpkt->u; 2113 while (items-- > 0) { 2114 authtrust(*kp, trust); 2115 kp++; 2116 } 2117 2118 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2119 } 2120 2121 2122 /* 2123 * get_auth_info - return some stats concerning the authentication module 2124 */ 2125 static void 2126 get_auth_info( 2127 sockaddr_u *srcadr, 2128 endpt *inter, 2129 struct req_pkt *inpkt 2130 ) 2131 { 2132 register struct info_auth *ia; 2133 2134 ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt, 2135 sizeof(struct info_auth)); 2136 2137 ia->numkeys = htonl((u_int32)authnumkeys); 2138 ia->numfreekeys = htonl((u_int32)authnumfreekeys); 2139 ia->keylookups = htonl((u_int32)authkeylookups); 2140 ia->keynotfound = htonl((u_int32)authkeynotfound); 2141 ia->encryptions = htonl((u_int32)authencryptions); 2142 ia->decryptions = htonl((u_int32)authdecryptions); 2143 ia->keyuncached = htonl((u_int32)authkeyuncached); 2144 ia->expired = htonl((u_int32)authkeyexpired); 2145 ia->timereset = htonl((u_int32)(current_time - auth_timereset)); 2146 2147 (void) more_pkt(); 2148 flush_pkt(); 2149 } 2150 2151 2152 2153 /* 2154 * reset_auth_stats - reset the authentication stat counters. Done here 2155 * to keep ntp-isms out of the authentication module 2156 */ 2157 void 2158 reset_auth_stats(void) 2159 { 2160 authkeylookups = 0; 2161 authkeynotfound = 0; 2162 authencryptions = 0; 2163 authdecryptions = 0; 2164 authkeyuncached = 0; 2165 auth_timereset = current_time; 2166 } 2167 2168 2169 /* 2170 * req_get_traps - return information about current trap holders 2171 */ 2172 static void 2173 req_get_traps( 2174 sockaddr_u *srcadr, 2175 endpt *inter, 2176 struct req_pkt *inpkt 2177 ) 2178 { 2179 struct info_trap *it; 2180 struct ctl_trap *tr; 2181 size_t i; 2182 2183 if (num_ctl_traps == 0) { 2184 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2185 return; 2186 } 2187 2188 it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt, 2189 v6sizeof(struct info_trap)); 2190 2191 for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) { 2192 if (tr->tr_flags & TRAP_INUSE) { 2193 if (IS_IPV4(&tr->tr_addr)) { 2194 if (tr->tr_localaddr == any_interface) 2195 it->local_address = 0; 2196 else 2197 it->local_address 2198 = NSRCADR(&tr->tr_localaddr->sin); 2199 it->trap_address = NSRCADR(&tr->tr_addr); 2200 if (client_v6_capable) 2201 it->v6_flag = 0; 2202 } else { 2203 if (!client_v6_capable) 2204 continue; 2205 it->local_address6 2206 = SOCK_ADDR6(&tr->tr_localaddr->sin); 2207 it->trap_address6 = SOCK_ADDR6(&tr->tr_addr); 2208 it->v6_flag = 1; 2209 } 2210 it->trap_port = NSRCPORT(&tr->tr_addr); 2211 it->sequence = htons(tr->tr_sequence); 2212 it->settime = htonl((u_int32)(current_time - tr->tr_settime)); 2213 it->origtime = htonl((u_int32)(current_time - tr->tr_origtime)); 2214 it->resets = htonl((u_int32)tr->tr_resets); 2215 it->flags = htonl((u_int32)tr->tr_flags); 2216 it = (struct info_trap *)more_pkt(); 2217 } 2218 } 2219 flush_pkt(); 2220 } 2221 2222 2223 /* 2224 * req_set_trap - configure a trap 2225 */ 2226 static void 2227 req_set_trap( 2228 sockaddr_u *srcadr, 2229 endpt *inter, 2230 struct req_pkt *inpkt 2231 ) 2232 { 2233 do_setclr_trap(srcadr, inter, inpkt, 1); 2234 } 2235 2236 2237 2238 /* 2239 * req_clr_trap - unconfigure a trap 2240 */ 2241 static void 2242 req_clr_trap( 2243 sockaddr_u *srcadr, 2244 endpt *inter, 2245 struct req_pkt *inpkt 2246 ) 2247 { 2248 do_setclr_trap(srcadr, inter, inpkt, 0); 2249 } 2250 2251 2252 2253 /* 2254 * do_setclr_trap - do the grunge work of (un)configuring a trap 2255 */ 2256 static void 2257 do_setclr_trap( 2258 sockaddr_u *srcadr, 2259 endpt *inter, 2260 struct req_pkt *inpkt, 2261 int set 2262 ) 2263 { 2264 register struct conf_trap *ct; 2265 register endpt *linter; 2266 int res; 2267 sockaddr_u laddr; 2268 2269 /* 2270 * Prepare sockaddr 2271 */ 2272 ZERO_SOCK(&laddr); 2273 AF(&laddr) = AF(srcadr); 2274 SET_PORT(&laddr, NTP_PORT); 2275 2276 /* 2277 * Restrict ourselves to one item only. This eliminates 2278 * the error reporting problem. 2279 */ 2280 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 2281 msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1"); 2282 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2283 return; 2284 } 2285 ct = (struct conf_trap *)&inpkt->u; 2286 2287 /* 2288 * Look for the local interface. If none, use the default. 2289 */ 2290 if (ct->local_address == 0) { 2291 linter = any_interface; 2292 } else { 2293 if (IS_IPV4(&laddr)) 2294 NSRCADR(&laddr) = ct->local_address; 2295 else 2296 SOCK_ADDR6(&laddr) = ct->local_address6; 2297 linter = findinterface(&laddr); 2298 if (NULL == linter) { 2299 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2300 return; 2301 } 2302 } 2303 2304 if (IS_IPV4(&laddr)) 2305 NSRCADR(&laddr) = ct->trap_address; 2306 else 2307 SOCK_ADDR6(&laddr) = ct->trap_address6; 2308 if (ct->trap_port) 2309 NSRCPORT(&laddr) = ct->trap_port; 2310 else 2311 SET_PORT(&laddr, TRAPPORT); 2312 2313 if (set) { 2314 res = ctlsettrap(&laddr, linter, 0, 2315 INFO_VERSION(inpkt->rm_vn_mode)); 2316 } else { 2317 res = ctlclrtrap(&laddr, linter, 0); 2318 } 2319 2320 if (!res) { 2321 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2322 } else { 2323 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2324 } 2325 return; 2326 } 2327 2328 /* 2329 * Validate a request packet for a new request or control key: 2330 * - only one item allowed 2331 * - key must be valid (that is, known, and not in the autokey range) 2332 */ 2333 static void 2334 set_keyid_checked( 2335 keyid_t *into, 2336 const char *what, 2337 sockaddr_u *srcadr, 2338 endpt *inter, 2339 struct req_pkt *inpkt 2340 ) 2341 { 2342 keyid_t *pkeyid; 2343 keyid_t tmpkey; 2344 2345 /* restrict ourselves to one item only */ 2346 if (INFO_NITEMS(inpkt->err_nitems) > 1) { 2347 msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1", 2348 what); 2349 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2350 return; 2351 } 2352 2353 /* plug the new key from the packet */ 2354 pkeyid = (keyid_t *)&inpkt->u; 2355 tmpkey = ntohl(*pkeyid); 2356 2357 /* validate the new key id, claim data error on failure */ 2358 if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) { 2359 msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld", 2360 what, (long)tmpkey); 2361 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2362 return; 2363 } 2364 2365 /* if we arrive here, the key is good -- use it */ 2366 *into = tmpkey; 2367 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2368 } 2369 2370 /* 2371 * set_request_keyid - set the keyid used to authenticate requests 2372 */ 2373 static void 2374 set_request_keyid( 2375 sockaddr_u *srcadr, 2376 endpt *inter, 2377 struct req_pkt *inpkt 2378 ) 2379 { 2380 set_keyid_checked(&info_auth_keyid, "request", 2381 srcadr, inter, inpkt); 2382 } 2383 2384 2385 2386 /* 2387 * set_control_keyid - set the keyid used to authenticate requests 2388 */ 2389 static void 2390 set_control_keyid( 2391 sockaddr_u *srcadr, 2392 endpt *inter, 2393 struct req_pkt *inpkt 2394 ) 2395 { 2396 set_keyid_checked(&ctl_auth_keyid, "control", 2397 srcadr, inter, inpkt); 2398 } 2399 2400 2401 2402 /* 2403 * get_ctl_stats - return some stats concerning the control message module 2404 */ 2405 static void 2406 get_ctl_stats( 2407 sockaddr_u *srcadr, 2408 endpt *inter, 2409 struct req_pkt *inpkt 2410 ) 2411 { 2412 register struct info_control *ic; 2413 2414 ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt, 2415 sizeof(struct info_control)); 2416 2417 ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset)); 2418 ic->numctlreq = htonl((u_int32)numctlreq); 2419 ic->numctlbadpkts = htonl((u_int32)numctlbadpkts); 2420 ic->numctlresponses = htonl((u_int32)numctlresponses); 2421 ic->numctlfrags = htonl((u_int32)numctlfrags); 2422 ic->numctlerrors = htonl((u_int32)numctlerrors); 2423 ic->numctltooshort = htonl((u_int32)numctltooshort); 2424 ic->numctlinputresp = htonl((u_int32)numctlinputresp); 2425 ic->numctlinputfrag = htonl((u_int32)numctlinputfrag); 2426 ic->numctlinputerr = htonl((u_int32)numctlinputerr); 2427 ic->numctlbadoffset = htonl((u_int32)numctlbadoffset); 2428 ic->numctlbadversion = htonl((u_int32)numctlbadversion); 2429 ic->numctldatatooshort = htonl((u_int32)numctldatatooshort); 2430 ic->numctlbadop = htonl((u_int32)numctlbadop); 2431 ic->numasyncmsgs = htonl((u_int32)numasyncmsgs); 2432 2433 (void) more_pkt(); 2434 flush_pkt(); 2435 } 2436 2437 2438 #ifdef KERNEL_PLL 2439 /* 2440 * get_kernel_info - get kernel pll/pps information 2441 */ 2442 static void 2443 get_kernel_info( 2444 sockaddr_u *srcadr, 2445 endpt *inter, 2446 struct req_pkt *inpkt 2447 ) 2448 { 2449 register struct info_kernel *ik; 2450 struct timex ntx; 2451 2452 if (!pll_control) { 2453 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2454 return; 2455 } 2456 2457 ZERO(ntx); 2458 if (ntp_adjtime(&ntx) < 0) 2459 msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m"); 2460 ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt, 2461 sizeof(struct info_kernel)); 2462 2463 /* 2464 * pll variables 2465 */ 2466 ik->offset = htonl((u_int32)ntx.offset); 2467 ik->freq = htonl((u_int32)ntx.freq); 2468 ik->maxerror = htonl((u_int32)ntx.maxerror); 2469 ik->esterror = htonl((u_int32)ntx.esterror); 2470 ik->status = htons(ntx.status); 2471 ik->constant = htonl((u_int32)ntx.constant); 2472 ik->precision = htonl((u_int32)ntx.precision); 2473 ik->tolerance = htonl((u_int32)ntx.tolerance); 2474 2475 /* 2476 * pps variables 2477 */ 2478 ik->ppsfreq = htonl((u_int32)ntx.ppsfreq); 2479 ik->jitter = htonl((u_int32)ntx.jitter); 2480 ik->shift = htons(ntx.shift); 2481 ik->stabil = htonl((u_int32)ntx.stabil); 2482 ik->jitcnt = htonl((u_int32)ntx.jitcnt); 2483 ik->calcnt = htonl((u_int32)ntx.calcnt); 2484 ik->errcnt = htonl((u_int32)ntx.errcnt); 2485 ik->stbcnt = htonl((u_int32)ntx.stbcnt); 2486 2487 (void) more_pkt(); 2488 flush_pkt(); 2489 } 2490 #endif /* KERNEL_PLL */ 2491 2492 2493 #ifdef REFCLOCK 2494 /* 2495 * get_clock_info - get info about a clock 2496 */ 2497 static void 2498 get_clock_info( 2499 sockaddr_u *srcadr, 2500 endpt *inter, 2501 struct req_pkt *inpkt 2502 ) 2503 { 2504 register struct info_clock *ic; 2505 register u_int32 *clkaddr; 2506 register int items; 2507 struct refclockstat clock_stat; 2508 sockaddr_u addr; 2509 l_fp ltmp; 2510 2511 ZERO_SOCK(&addr); 2512 AF(&addr) = AF_INET; 2513 #ifdef ISC_PLATFORM_HAVESALEN 2514 addr.sa.sa_len = SOCKLEN(&addr); 2515 #endif 2516 SET_PORT(&addr, NTP_PORT); 2517 items = INFO_NITEMS(inpkt->err_nitems); 2518 clkaddr = &inpkt->u.u32[0]; 2519 2520 ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt, 2521 sizeof(struct info_clock)); 2522 2523 while (items-- > 0 && ic) { 2524 NSRCADR(&addr) = *clkaddr++; 2525 if (!ISREFCLOCKADR(&addr) || NULL == 2526 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) { 2527 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2528 return; 2529 } 2530 2531 clock_stat.kv_list = (struct ctl_var *)0; 2532 2533 refclock_control(&addr, NULL, &clock_stat); 2534 2535 ic->clockadr = NSRCADR(&addr); 2536 ic->type = clock_stat.type; 2537 ic->flags = clock_stat.flags; 2538 ic->lastevent = clock_stat.lastevent; 2539 ic->currentstatus = clock_stat.currentstatus; 2540 ic->polls = htonl((u_int32)clock_stat.polls); 2541 ic->noresponse = htonl((u_int32)clock_stat.noresponse); 2542 ic->badformat = htonl((u_int32)clock_stat.badformat); 2543 ic->baddata = htonl((u_int32)clock_stat.baddata); 2544 ic->timestarted = htonl((u_int32)clock_stat.timereset); 2545 DTOLFP(clock_stat.fudgetime1, <mp); 2546 HTONL_FP(<mp, &ic->fudgetime1); 2547 DTOLFP(clock_stat.fudgetime2, <mp); 2548 HTONL_FP(<mp, &ic->fudgetime2); 2549 ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1); 2550 /* [Bug3527] Backward Incompatible: ic->fudgeval2 is 2551 * a string, instantiated via memcpy() so there is no 2552 * endian issue to correct. 2553 */ 2554 #ifdef DISABLE_BUG3527_FIX 2555 ic->fudgeval2 = htonl(clock_stat.fudgeval2); 2556 #else 2557 ic->fudgeval2 = clock_stat.fudgeval2; 2558 #endif 2559 2560 free_varlist(clock_stat.kv_list); 2561 2562 ic = (struct info_clock *)more_pkt(); 2563 } 2564 flush_pkt(); 2565 } 2566 2567 2568 2569 /* 2570 * set_clock_fudge - get a clock's fudge factors 2571 */ 2572 static void 2573 set_clock_fudge( 2574 sockaddr_u *srcadr, 2575 endpt *inter, 2576 struct req_pkt *inpkt 2577 ) 2578 { 2579 register struct conf_fudge *cf; 2580 register int items; 2581 struct refclockstat clock_stat; 2582 sockaddr_u addr; 2583 l_fp ltmp; 2584 2585 ZERO(addr); 2586 ZERO(clock_stat); 2587 items = INFO_NITEMS(inpkt->err_nitems); 2588 cf = (struct conf_fudge *)&inpkt->u; 2589 2590 while (items-- > 0) { 2591 AF(&addr) = AF_INET; 2592 NSRCADR(&addr) = cf->clockadr; 2593 #ifdef ISC_PLATFORM_HAVESALEN 2594 addr.sa.sa_len = SOCKLEN(&addr); 2595 #endif 2596 SET_PORT(&addr, NTP_PORT); 2597 if (!ISREFCLOCKADR(&addr) || NULL == 2598 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) { 2599 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2600 return; 2601 } 2602 2603 switch(ntohl(cf->which)) { 2604 case FUDGE_TIME1: 2605 NTOHL_FP(&cf->fudgetime, <mp); 2606 LFPTOD(<mp, clock_stat.fudgetime1); 2607 clock_stat.haveflags = CLK_HAVETIME1; 2608 break; 2609 case FUDGE_TIME2: 2610 NTOHL_FP(&cf->fudgetime, <mp); 2611 LFPTOD(<mp, clock_stat.fudgetime2); 2612 clock_stat.haveflags = CLK_HAVETIME2; 2613 break; 2614 case FUDGE_VAL1: 2615 clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags); 2616 clock_stat.haveflags = CLK_HAVEVAL1; 2617 break; 2618 case FUDGE_VAL2: 2619 clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags); 2620 clock_stat.haveflags = CLK_HAVEVAL2; 2621 break; 2622 case FUDGE_FLAGS: 2623 clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf); 2624 clock_stat.haveflags = 2625 (CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4); 2626 break; 2627 default: 2628 msyslog(LOG_ERR, "set_clock_fudge: default!"); 2629 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); 2630 return; 2631 } 2632 2633 refclock_control(&addr, &clock_stat, (struct refclockstat *)0); 2634 } 2635 2636 req_ack(srcadr, inter, inpkt, INFO_OKAY); 2637 } 2638 #endif 2639 2640 #ifdef REFCLOCK 2641 /* 2642 * get_clkbug_info - get debugging info about a clock 2643 */ 2644 static void 2645 get_clkbug_info( 2646 sockaddr_u *srcadr, 2647 endpt *inter, 2648 struct req_pkt *inpkt 2649 ) 2650 { 2651 register int i; 2652 register struct info_clkbug *ic; 2653 register u_int32 *clkaddr; 2654 register int items; 2655 struct refclockbug bug; 2656 sockaddr_u addr; 2657 2658 ZERO_SOCK(&addr); 2659 AF(&addr) = AF_INET; 2660 #ifdef ISC_PLATFORM_HAVESALEN 2661 addr.sa.sa_len = SOCKLEN(&addr); 2662 #endif 2663 SET_PORT(&addr, NTP_PORT); 2664 items = INFO_NITEMS(inpkt->err_nitems); 2665 clkaddr = (u_int32 *)&inpkt->u; 2666 2667 ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt, 2668 sizeof(struct info_clkbug)); 2669 2670 while (items-- > 0 && ic) { 2671 NSRCADR(&addr) = *clkaddr++; 2672 if (!ISREFCLOCKADR(&addr) || NULL == 2673 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) { 2674 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2675 return; 2676 } 2677 2678 ZERO(bug); 2679 refclock_buginfo(&addr, &bug); 2680 if (bug.nvalues == 0 && bug.ntimes == 0) { 2681 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); 2682 return; 2683 } 2684 2685 ic->clockadr = NSRCADR(&addr); 2686 i = bug.nvalues; 2687 if (i > NUMCBUGVALUES) 2688 i = NUMCBUGVALUES; 2689 ic->nvalues = (u_char)i; 2690 ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1))); 2691 while (--i >= 0) 2692 ic->values[i] = htonl(bug.values[i]); 2693 2694 i = bug.ntimes; 2695 if (i > NUMCBUGTIMES) 2696 i = NUMCBUGTIMES; 2697 ic->ntimes = (u_char)i; 2698 ic->stimes = htonl(bug.stimes); 2699 while (--i >= 0) { 2700 HTONL_FP(&bug.times[i], &ic->times[i]); 2701 } 2702 2703 ic = (struct info_clkbug *)more_pkt(); 2704 } 2705 flush_pkt(); 2706 } 2707 #endif 2708 2709 /* 2710 * receiver of interface structures 2711 */ 2712 static void 2713 fill_info_if_stats(void *data, interface_info_t *interface_info) 2714 { 2715 struct info_if_stats **ifsp = (struct info_if_stats **)data; 2716 struct info_if_stats *ifs = *ifsp; 2717 endpt *ep = interface_info->ep; 2718 2719 if (NULL == ifs) 2720 return; 2721 2722 ZERO(*ifs); 2723 2724 if (IS_IPV6(&ep->sin)) { 2725 if (!client_v6_capable) 2726 return; 2727 ifs->v6_flag = 1; 2728 ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin); 2729 ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast); 2730 ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask); 2731 } else { 2732 ifs->v6_flag = 0; 2733 ifs->unaddr.addr = SOCK_ADDR4(&ep->sin); 2734 ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast); 2735 ifs->unmask.addr = SOCK_ADDR4(&ep->mask); 2736 } 2737 ifs->v6_flag = htonl(ifs->v6_flag); 2738 strlcpy(ifs->name, ep->name, sizeof(ifs->name)); 2739 ifs->family = htons(ep->family); 2740 ifs->flags = htonl(ep->flags); 2741 ifs->last_ttl = htonl(ep->last_ttl); 2742 ifs->num_mcast = htonl(ep->num_mcast); 2743 ifs->received = htonl(ep->received); 2744 ifs->sent = htonl(ep->sent); 2745 ifs->notsent = htonl(ep->notsent); 2746 ifs->ifindex = htonl(ep->ifindex); 2747 /* scope no longer in endpt, in in6_addr typically */ 2748 ifs->scopeid = ifs->ifindex; 2749 ifs->ifnum = htonl(ep->ifnum); 2750 ifs->uptime = htonl(current_time - ep->starttime); 2751 ifs->ignore_packets = ep->ignore_packets; 2752 ifs->peercnt = htonl(ep->peercnt); 2753 ifs->action = interface_info->action; 2754 2755 *ifsp = (struct info_if_stats *)more_pkt(); 2756 } 2757 2758 /* 2759 * get_if_stats - get interface statistics 2760 */ 2761 static void 2762 get_if_stats( 2763 sockaddr_u *srcadr, 2764 endpt *inter, 2765 struct req_pkt *inpkt 2766 ) 2767 { 2768 struct info_if_stats *ifs; 2769 2770 DPRINTF(3, ("wants interface statistics\n")); 2771 2772 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt, 2773 v6sizeof(struct info_if_stats)); 2774 2775 interface_enumerate(fill_info_if_stats, &ifs); 2776 2777 flush_pkt(); 2778 } 2779 2780 static void 2781 do_if_reload( 2782 sockaddr_u *srcadr, 2783 endpt *inter, 2784 struct req_pkt *inpkt 2785 ) 2786 { 2787 struct info_if_stats *ifs; 2788 2789 DPRINTF(3, ("wants interface reload\n")); 2790 2791 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt, 2792 v6sizeof(struct info_if_stats)); 2793 2794 interface_update(fill_info_if_stats, &ifs); 2795 2796 flush_pkt(); 2797 } 2798 2799