1 /* $NetBSD: ntp_scanner.c,v 1.15 2024/08/18 20:47:18 christos Exp $ */ 2 3 4 /* ntp_scanner.c 5 * 6 * The source code for a simple lexical analyzer. 7 * 8 * Written By: Sachin Kamboj 9 * University of Delaware 10 * Newark, DE 19711 11 * Copyright (c) 2006 12 */ 13 14 #ifdef HAVE_CONFIG_H 15 # include <config.h> 16 #endif 17 18 #include <stdio.h> 19 #include <ctype.h> 20 #include <stdlib.h> 21 #include <errno.h> 22 #include <string.h> 23 24 #include "ntpd.h" 25 #include "ntp_config.h" 26 #include "ntpsim.h" 27 #include "ntp_scanner.h" 28 #include "ntp_parser.h" 29 30 /* ntp_keyword.h declares finite state machine and token text */ 31 #include "ntp_keyword.h" 32 33 34 35 /* SCANNER GLOBAL VARIABLES 36 * ------------------------ 37 */ 38 39 #define MAX_LEXEME 128 /* The maximum size of a lexeme */ 40 char yytext[MAX_LEXEME]; /* Buffer for storing the input text/lexeme */ 41 u_int32 conf_file_sum; /* Simple sum of characters read */ 42 43 static struct FILE_INFO * lex_stack = NULL; 44 45 46 47 /* CONSTANTS 48 * --------- 49 */ 50 51 52 /* SCANNER GLOBAL VARIABLES 53 * ------------------------ 54 */ 55 const char special_chars[] = "{}(),;|="; 56 57 58 /* FUNCTIONS 59 * --------- 60 */ 61 62 static int is_keyword(char *lexeme, follby *pfollowedby); 63 64 65 /* 66 * keyword() - Return the keyword associated with token T_ identifier. 67 * See also token_name() for the string-ized T_ identifier. 68 * Example: keyword(T_Server) returns "server" 69 * token_name(T_Server) returns "T_Server" 70 */ 71 const char * 72 keyword( 73 int token 74 ) 75 { 76 size_t i; 77 const char *text; 78 static char sbuf[64]; 79 80 i = token - LOWEST_KEYWORD_ID; 81 82 switch (token) { 83 case T_ServerresponseFuzz: 84 text = "serverresponse fuzz"; 85 break; 86 87 default: 88 if (i < COUNTOF(keyword_text)) { 89 text = keyword_text[i]; 90 } else { 91 snprintf(sbuf, sizeof sbuf, 92 "(keyword #%u not found)", token); 93 text = sbuf; 94 } 95 } 96 97 return text; 98 } 99 100 101 /* FILE & STRING BUFFER INTERFACE 102 * ------------------------------ 103 * 104 * This set out as a couple of wrapper functions around the standard C 105 * fgetc and ungetc functions in order to include positional 106 * bookkeeping. Alas, this is no longer a good solution with nested 107 * input files and the possibility to send configuration commands via 108 * 'ntpdc' and 'ntpq'. 109 * 110 * Now there are a few functions to maintain a stack of nested input 111 * sources (though nesting is only allowd for disk files) and from the 112 * scanner / parser point of view there's no difference between both 113 * types of sources. 114 * 115 * The 'fgetc()' / 'ungetc()' replacements now operate on a FILE_INFO 116 * structure. Instead of trying different 'ungetc()' strategies for file 117 * and buffer based parsing, we keep the backup char in our own 118 * FILE_INFO structure. This is sufficient, as the parser does *not* 119 * jump around via 'seek' or the like, and there's no need to 120 * check/clear the backup store in other places than 'lex_getch()'. 121 */ 122 123 /* 124 * Allocate an info structure and attach it to a file. 125 * 126 * Note: When 'mode' is NULL, then the INFO block will be set up to 127 * contain a NULL file pointer, as suited for remote config command 128 * parsing. Otherwise having a NULL file pointer is considered an error, 129 * and a NULL info block pointer is returned to indicate failure! 130 * 131 * Note: We use a variable-sized structure to hold a copy of the file 132 * name (or, more proper, the input source description). This is more 133 * secure than keeping a reference to some other storage that might go 134 * out of scope. 135 */ 136 static struct FILE_INFO * 137 lex_open( 138 const char *path, 139 const char *mode 140 ) 141 { 142 struct FILE_INFO *stream; 143 size_t nnambuf; 144 145 nnambuf = strlen(path); 146 stream = emalloc_zero(sizeof(*stream) + nnambuf); 147 stream->curpos.nline = 1; 148 stream->backch = EOF; 149 /* copy name with memcpy -- trailing NUL already there! */ 150 memcpy(stream->fname, path, nnambuf); 151 152 if (NULL != mode) { 153 stream->fpi = fopen(path, mode); 154 if (NULL == stream->fpi) { 155 free(stream); 156 stream = NULL; 157 } 158 } 159 return stream; 160 } 161 162 /* get next character from buffer or file. This will return any putback 163 * character first; it will also make sure the last line is at least 164 * virtually terminated with a '\n'. 165 */ 166 static int 167 lex_getch( 168 struct FILE_INFO *stream 169 ) 170 { 171 int ch; 172 173 if (NULL == stream || stream->force_eof) 174 return EOF; 175 176 if (EOF != stream->backch) { 177 ch = stream->backch; 178 stream->backch = EOF; 179 if (stream->fpi) 180 conf_file_sum += ch; 181 stream->curpos.ncol++; 182 } else if (stream->fpi) { 183 /* fetch next 7-bit ASCII char (or EOF) from file */ 184 while ((ch = fgetc(stream->fpi)) != EOF && ch > SCHAR_MAX) 185 stream->curpos.ncol++; 186 if (EOF != ch) { 187 conf_file_sum += ch; 188 stream->curpos.ncol++; 189 } 190 } else { 191 /* fetch next 7-bit ASCII char from buffer */ 192 const char * scan; 193 scan = &remote_config.buffer[remote_config.pos]; 194 while ((ch = (u_char)*scan) > SCHAR_MAX) { 195 scan++; 196 stream->curpos.ncol++; 197 } 198 if ('\0' != ch) { 199 scan++; 200 stream->curpos.ncol++; 201 } else { 202 ch = EOF; 203 } 204 remote_config.pos = (int)(scan - remote_config.buffer); 205 } 206 207 /* If the last line ends without '\n', generate one. This 208 * happens most likely on Windows, where editors often have a 209 * sloppy concept of a line. 210 */ 211 if (EOF == ch && stream->curpos.ncol != 0) 212 ch = '\n'; 213 214 /* update scan position tallies */ 215 if (ch == '\n') { 216 stream->bakpos = stream->curpos; 217 stream->curpos.nline++; 218 stream->curpos.ncol = 0; 219 } 220 221 return ch; 222 } 223 224 /* Note: lex_ungetch will fail to track more than one line of push 225 * back. But since it guarantees only one char of back storage anyway, 226 * this should not be a problem. 227 */ 228 static int 229 lex_ungetch( 230 int ch, 231 struct FILE_INFO *stream 232 ) 233 { 234 /* check preconditions */ 235 if (NULL == stream || stream->force_eof) 236 return EOF; 237 if (EOF != stream->backch || EOF == ch) 238 return EOF; 239 240 /* keep for later reference and update checksum */ 241 stream->backch = (u_char)ch; 242 if (stream->fpi) 243 conf_file_sum -= stream->backch; 244 245 /* update position */ 246 if (stream->backch == '\n') { 247 stream->curpos = stream->bakpos; 248 stream->bakpos.ncol = -1; 249 } 250 stream->curpos.ncol--; 251 return stream->backch; 252 } 253 254 /* dispose of an input structure. If the file pointer is not NULL, close 255 * the file. This function does not check the result of 'fclose()'. 256 */ 257 static void 258 lex_close( 259 struct FILE_INFO *stream 260 ) 261 { 262 if (NULL != stream) { 263 if (NULL != stream->fpi) 264 fclose(stream->fpi); 265 free(stream); 266 } 267 } 268 269 /* INPUT STACK 270 * ----------- 271 * 272 * Nested input sources are a bit tricky at first glance. We deal with 273 * this problem using a stack of input sources, that is, a forward 274 * linked list of FILE_INFO structs. 275 * 276 * This stack is never empty during parsing; while an encounter with EOF 277 * can and will remove nested input sources, removing the last element 278 * in the stack will not work during parsing, and the EOF condition of 279 * the outermost input file remains until the parser folds up. 280 */ 281 282 static struct FILE_INFO * 283 drop_stack_do( 284 struct FILE_INFO * head 285 ) 286 { 287 struct FILE_INFO * tail; 288 while (NULL != head) { 289 tail = head->st_next; 290 lex_close(head); 291 head = tail; 292 } 293 return head; 294 } 295 296 297 298 /* Create a singleton input source on an empty lexer stack. This will 299 * fail if there is already an input source, or if the underlying disk 300 * file cannot be opened. 301 * 302 * Returns TRUE if a new input object was successfully created. 303 */ 304 int/*BOOL*/ 305 lex_init_stack( 306 const char * path, 307 const char * mode 308 ) 309 { 310 if (NULL != lex_stack || NULL == path) 311 return FALSE; 312 313 lex_stack = lex_open(path, mode); 314 return (NULL != lex_stack); 315 } 316 317 /* This removes *all* input sources from the stack, leaving the head 318 * pointer as NULL. Any attempt to parse in that state is likely to bomb 319 * with segmentation faults or the like. 320 * 321 * In other words: Use this to clean up after parsing, and do not parse 322 * anything until the next 'lex_init_stack()' succeeded. 323 */ 324 void 325 lex_drop_stack(void) 326 { 327 lex_stack = drop_stack_do(lex_stack); 328 } 329 330 /* Flush the lexer input stack: This will nip all input objects on the 331 * stack (but keeps the current top-of-stack) and marks the top-of-stack 332 * as inactive. Any further calls to lex_getch yield only EOF, and it's 333 * no longer possible to push something back. 334 * 335 * Returns TRUE if there is a head element (top-of-stack) that was not 336 * in the force-eof mode before this call. 337 */ 338 int/*BOOL*/ 339 lex_flush_stack(void) 340 { 341 int retv = FALSE; 342 343 if (NULL != lex_stack) { 344 retv = !lex_stack->force_eof; 345 lex_stack->force_eof = TRUE; 346 lex_stack->st_next = drop_stack_do( 347 lex_stack->st_next); 348 } 349 return retv; 350 } 351 352 /* Push another file on the parsing stack. If the mode is NULL, create a 353 * FILE_INFO suitable for in-memory parsing; otherwise, create a 354 * FILE_INFO that is bound to a local/disc file. Note that 'path' must 355 * not be NULL, or the function will fail. 356 * 357 * Returns TRUE if a new info record was pushed onto the stack. 358 */ 359 int/*BOOL*/ lex_push_file( 360 const char * path, 361 const char * mode 362 ) 363 { 364 struct FILE_INFO * next = NULL; 365 366 if (NULL != path) { 367 next = lex_open(path, mode); 368 if (NULL != next) { 369 next->st_next = lex_stack; 370 lex_stack = next; 371 } 372 } 373 return (NULL != next); 374 } 375 376 /* Pop, close & free the top of the include stack, unless the stack 377 * contains only a singleton input object. In that case the function 378 * fails, because the parser does not expect the input stack to be 379 * empty. 380 * 381 * Returns TRUE if an object was successfuly popped from the stack. 382 */ 383 int/*BOOL*/ 384 lex_pop_file(void) 385 { 386 struct FILE_INFO * head = lex_stack; 387 struct FILE_INFO * tail = NULL; 388 389 if (NULL != head) { 390 tail = head->st_next; 391 if (NULL != tail) { 392 lex_stack = tail; 393 lex_close(head); 394 } 395 } 396 return (NULL != tail); 397 } 398 399 /* Get include nesting level. This currently loops over the stack and 400 * counts elements; but since this is of concern only with an include 401 * statement and the nesting depth has a small limit, there's no 402 * bottleneck expected here. 403 * 404 * Returns the nesting level of includes, that is, the current depth of 405 * the lexer input stack. 406 * 407 * Note: 408 */ 409 size_t 410 lex_level(void) 411 { 412 size_t cnt = 0; 413 struct FILE_INFO *ipf = lex_stack; 414 415 while (NULL != ipf) { 416 cnt++; 417 ipf = ipf->st_next; 418 } 419 return cnt; 420 } 421 422 /* check if the current input is from a file */ 423 int/*BOOL*/ 424 lex_from_file(void) 425 { 426 return (NULL != lex_stack) && (NULL != lex_stack->fpi); 427 } 428 429 struct FILE_INFO * 430 lex_current(void) 431 { 432 /* this became so simple, it could be a macro. But then, 433 * lex_stack needed to be global... 434 */ 435 return lex_stack; 436 } 437 438 439 /* STATE MACHINES 440 * -------------- 441 */ 442 443 /* Keywords */ 444 static int 445 is_keyword( 446 char *lexeme, 447 follby *pfollowedby 448 ) 449 { 450 follby fb; 451 int curr_s; /* current state index */ 452 int token; 453 int i; 454 455 curr_s = SCANNER_INIT_S; 456 token = 0; 457 458 for (i = 0; lexeme[i]; i++) { 459 while (curr_s && (lexeme[i] != SS_CH(sst[curr_s]))) 460 curr_s = SS_OTHER_N(sst[curr_s]); 461 462 if (curr_s && (lexeme[i] == SS_CH(sst[curr_s]))) { 463 if ('\0' == lexeme[i + 1] 464 && FOLLBY_NON_ACCEPTING 465 != SS_FB(sst[curr_s])) { 466 fb = SS_FB(sst[curr_s]); 467 *pfollowedby = fb; 468 token = curr_s; 469 break; 470 } 471 curr_s = SS_MATCH_N(sst[curr_s]); 472 } else 473 break; 474 } 475 476 return token; 477 } 478 479 480 /* Integer */ 481 static int 482 is_integer( 483 char *lexeme 484 ) 485 { 486 int i; 487 int is_neg; 488 u_int u_val; 489 490 i = 0; 491 492 /* Allow a leading minus sign */ 493 if (lexeme[i] == '-') { 494 i++; 495 is_neg = TRUE; 496 } else { 497 is_neg = FALSE; 498 } 499 500 /* Check that all the remaining characters are digits */ 501 for (; lexeme[i] != '\0'; i++) { 502 if (!isdigit((u_char)lexeme[i])) 503 return FALSE; 504 } 505 506 if (is_neg) 507 return TRUE; 508 509 /* Reject numbers that fit in unsigned but not in signed int */ 510 if (1 == sscanf(lexeme, "%u", &u_val)) 511 return (u_val <= INT_MAX); 512 else 513 return FALSE; 514 } 515 516 517 /* U_int -- assumes is_integer() has returned FALSE */ 518 static int 519 is_u_int( 520 char *lexeme 521 ) 522 { 523 int i; 524 int is_hex; 525 526 i = 0; 527 if ('0' == lexeme[i] && 'x' == tolower((u_char)lexeme[i + 1])) { 528 i += 2; 529 is_hex = TRUE; 530 } else { 531 is_hex = FALSE; 532 } 533 534 /* Check that all the remaining characters are digits */ 535 for (; lexeme[i] != '\0'; i++) { 536 if (is_hex && !isxdigit((u_char)lexeme[i])) 537 return FALSE; 538 if (!is_hex && !isdigit((u_char)lexeme[i])) 539 return FALSE; 540 } 541 542 return TRUE; 543 } 544 545 546 /* Double */ 547 static int 548 is_double( 549 char *lexeme 550 ) 551 { 552 u_int num_digits = 0; /* Number of digits read */ 553 u_int i; 554 555 i = 0; 556 557 /* Check for an optional '+' or '-' */ 558 if ('+' == lexeme[i] || '-' == lexeme[i]) 559 i++; 560 561 /* Read the integer part */ 562 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 563 num_digits++; 564 565 /* Check for the optional decimal point */ 566 if ('.' == lexeme[i]) { 567 i++; 568 /* Check for any digits after the decimal point */ 569 for (; lexeme[i] && isdigit((u_char)lexeme[i]); i++) 570 num_digits++; 571 } 572 573 /* 574 * The number of digits in both the decimal part and the 575 * fraction part must not be zero at this point 576 */ 577 if (!num_digits) 578 return 0; 579 580 /* Check if we are done */ 581 if (!lexeme[i]) 582 return 1; 583 584 /* There is still more input, read the exponent */ 585 if ('e' == tolower((u_char)lexeme[i])) 586 i++; 587 else 588 return 0; 589 590 /* Read an optional Sign */ 591 if ('+' == lexeme[i] || '-' == lexeme[i]) 592 i++; 593 594 /* Now read the exponent part */ 595 while (lexeme[i] && isdigit((u_char)lexeme[i])) 596 i++; 597 598 /* Check if we are done */ 599 if (!lexeme[i]) 600 return 1; 601 else 602 return 0; 603 } 604 605 606 /* is_special() - Test whether a character is a token */ 607 static inline int 608 is_special( 609 int ch 610 ) 611 { 612 return strchr(special_chars, ch) != NULL; 613 } 614 615 616 static int 617 is_EOC( 618 int ch 619 ) 620 { 621 if ((old_config_style && (ch == '\n')) || 622 (!old_config_style && (ch == ';'))) 623 return 1; 624 return 0; 625 } 626 627 628 char * 629 quote_if_needed(char *str) 630 { 631 char *ret; 632 size_t len; 633 size_t octets; 634 635 len = strlen(str); 636 octets = len + 2 + 1; 637 ret = emalloc(octets); 638 if ('"' != str[0] 639 && (strcspn(str, special_chars) < len 640 || strchr(str, ' ') != NULL)) { 641 snprintf(ret, octets, "\"%s\"", str); 642 } else 643 strlcpy(ret, str, octets); 644 645 return ret; 646 } 647 648 649 static int 650 create_string_token( 651 char *lexeme 652 ) 653 { 654 char *pch; 655 656 /* 657 * ignore end of line whitespace 658 */ 659 pch = lexeme; 660 while (*pch && isspace((u_char)*pch)) 661 pch++; 662 663 if (!*pch) { 664 yylval.Integer = T_EOC; 665 return yylval.Integer; 666 } 667 668 yylval.String = estrdup(lexeme); 669 return T_String; 670 } 671 672 673 /* 674 * yylex() - function that does the actual scanning. 675 * Bison expects this function to be called yylex and for it to take no 676 * input and return an int. 677 * Conceptually yylex "returns" yylval as well as the actual return 678 * value representing the token or type. 679 */ 680 int 681 yylex(void) 682 { 683 static follby followedby = FOLLBY_TOKEN; 684 size_t i; 685 int instring; 686 int yylval_was_set; 687 int converted; 688 int token; /* The return value */ 689 int ch; 690 691 instring = FALSE; 692 yylval_was_set = FALSE; 693 694 do { 695 /* Ignore whitespace at the beginning */ 696 while (EOF != (ch = lex_getch(lex_stack)) && 697 isspace(ch) && 698 !is_EOC(ch)) 699 700 ; /* Null Statement */ 701 702 if (EOF == ch) { 703 704 if ( ! lex_pop_file()) 705 return 0; 706 token = T_EOC; 707 goto normal_return; 708 709 } else if (is_EOC(ch)) { 710 711 /* end FOLLBY_STRINGS_TO_EOC effect */ 712 followedby = FOLLBY_TOKEN; 713 token = T_EOC; 714 goto normal_return; 715 716 } else if (is_special(ch) && FOLLBY_TOKEN == followedby) { 717 /* special chars are their own token values */ 718 token = ch; 719 /* 720 * '=' outside simulator configuration implies 721 * a single string following as in: 722 * setvar Owner = "The Boss" default 723 */ 724 if ('=' == ch && old_config_style) 725 followedby = FOLLBY_STRING; 726 yytext[0] = (char)ch; 727 yytext[1] = '\0'; 728 goto normal_return; 729 } else 730 lex_ungetch(ch, lex_stack); 731 732 /* save the position of start of the token */ 733 lex_stack->tokpos = lex_stack->curpos; 734 735 /* Read in the lexeme */ 736 i = 0; 737 while (EOF != (ch = lex_getch(lex_stack))) { 738 739 yytext[i] = (char)ch; 740 741 /* Break on whitespace or a special character */ 742 if (isspace(ch) || is_EOC(ch) 743 || '"' == ch 744 || (FOLLBY_TOKEN == followedby 745 && is_special(ch))) 746 break; 747 748 /* Read the rest of the line on reading a start 749 of comment character */ 750 if ('#' == ch) { 751 while (EOF != (ch = lex_getch(lex_stack)) 752 && '\n' != ch) 753 ; /* Null Statement */ 754 break; 755 } 756 757 i++; 758 if (i >= COUNTOF(yytext)) 759 goto lex_too_long; 760 } 761 /* Pick up all of the string inside between " marks, to 762 * end of line. If we make it to EOL without a 763 * terminating " assume it for them. 764 * 765 * XXX - HMS: I'm not sure we want to assume the closing " 766 */ 767 if ('"' == ch) { 768 instring = TRUE; 769 while (EOF != (ch = lex_getch(lex_stack)) && 770 ch != '"' && ch != '\n') { 771 yytext[i++] = (char)ch; 772 if (i >= COUNTOF(yytext)) 773 goto lex_too_long; 774 } 775 /* 776 * yytext[i] will be pushed back as not part of 777 * this lexeme, but any closing quote should 778 * not be pushed back, so we read another char. 779 */ 780 if ('"' == ch) 781 ch = lex_getch(lex_stack); 782 } 783 /* Pushback the last character read that is not a part 784 * of this lexeme. This fails silently if ch is EOF, 785 * but then the EOF condition persists and is handled on 786 * the next turn by the include stack mechanism. 787 */ 788 lex_ungetch(ch, lex_stack); 789 790 yytext[i] = '\0'; 791 } while (i == 0); 792 793 /* Now return the desired token */ 794 795 /* First make sure that the parser is *not* expecting a string 796 * as the next token (based on the previous token that was 797 * returned) and that we haven't read a string. 798 */ 799 800 if (followedby == FOLLBY_TOKEN && !instring) { 801 token = is_keyword(yytext, &followedby); 802 if (token) { 803 /* 804 * T_Server is exceptional as it forces the 805 * following token to be a string in the 806 * non-simulator parts of the configuration, 807 * but in the simulator configuration section, 808 * "server" is followed by "=" which must be 809 * recognized as a token not a string. 810 */ 811 if (T_Server == token && !old_config_style) 812 followedby = FOLLBY_TOKEN; 813 goto normal_return; 814 } else if (is_integer(yytext)) { 815 yylval_was_set = TRUE; 816 errno = 0; 817 if ((yylval.Integer = strtol(yytext, NULL, 10)) == 0 818 && ((errno == EINVAL) || (errno == ERANGE))) { 819 msyslog(LOG_ERR, 820 "Integer cannot be represented: %s", 821 yytext); 822 if (lex_from_file()) { 823 exit(1); 824 } else { 825 /* force end of parsing */ 826 yylval.Integer = 0; 827 return 0; 828 } 829 } 830 token = T_Integer; 831 goto normal_return; 832 } else if (is_u_int(yytext)) { 833 yylval_was_set = TRUE; 834 if ('0' == yytext[0] && 835 'x' == tolower((unsigned long)yytext[1])) 836 converted = sscanf(&yytext[2], "%x", 837 &yylval.U_int); 838 else 839 converted = sscanf(yytext, "%u", 840 &yylval.U_int); 841 if (1 != converted) { 842 msyslog(LOG_ERR, 843 "U_int cannot be represented: %s", 844 yytext); 845 if (lex_from_file()) { 846 exit(1); 847 } else { 848 /* force end of parsing */ 849 yylval.Integer = 0; 850 return 0; 851 } 852 } 853 token = T_U_int; 854 goto normal_return; 855 } else if (is_double(yytext)) { 856 yylval_was_set = TRUE; 857 errno = 0; 858 if ((yylval.Double = atof(yytext)) == 0 && errno == ERANGE) { 859 msyslog(LOG_ERR, 860 "Double too large to represent: %s", 861 yytext); 862 exit(1); 863 } else { 864 token = T_Double; 865 goto normal_return; 866 } 867 } else { 868 /* Default: Everything is a string */ 869 yylval_was_set = TRUE; 870 token = create_string_token(yytext); 871 goto normal_return; 872 } 873 } 874 875 /* 876 * Either followedby is not FOLLBY_TOKEN or this lexeme is part 877 * of a string. Hence, we need to return T_String. 878 * 879 * _Except_ we might have a -4 or -6 flag on a an association 880 * configuration line (server, peer, pool, etc.). 881 * 882 * This is a terrible hack, but the grammar is ambiguous so we 883 * don't have a choice. [SK] 884 * 885 * The ambiguity is in the keyword scanner, not ntp_parser.y. 886 * We do not require server addresses be quoted in ntp.conf, 887 * complicating the scanner's job. To avoid trying (and 888 * failing) to match an IP address or DNS name to a keyword, 889 * the association keywords use FOLLBY_STRING in the keyword 890 * table, which tells the scanner to force the next token to be 891 * a T_String, so it does not try to match a keyword but rather 892 * expects a string when -4/-6 modifiers to server, peer, etc. 893 * are encountered. 894 * restrict -4 and restrict -6 parsing works correctly without 895 * this hack, as restrict uses FOLLBY_TOKEN. [DH] 896 */ 897 if ('-' == yytext[0]) { 898 if ('4' == yytext[1]) { 899 token = T_Ipv4_flag; 900 goto normal_return; 901 } else if ('6' == yytext[1]) { 902 token = T_Ipv6_flag; 903 goto normal_return; 904 } 905 } 906 907 if (FOLLBY_STRING == followedby) 908 followedby = FOLLBY_TOKEN; 909 910 yylval_was_set = TRUE; 911 token = create_string_token(yytext); 912 913 normal_return: 914 if (T_EOC == token) 915 DPRINTF(10, ("\t<end of command>\n")); 916 else 917 DPRINTF(10, ("yylex: lexeme '%s' -> %s\n", yytext, 918 token_name(token))); 919 920 if (!yylval_was_set) 921 yylval.Integer = token; 922 923 return token; 924 925 lex_too_long: 926 /* 927 * DLH: What is the purpose of the limit of 50? 928 * Is there any reason for yytext[] to be bigger? 929 */ 930 yytext[min(sizeof(yytext) - 1, 50)] = 0; 931 msyslog(LOG_ERR, 932 "configuration item on line %d longer than limit of %lu, began with '%s'", 933 lex_stack->curpos.nline, (u_long)min(sizeof(yytext) - 1, 50), 934 yytext); 935 936 /* 937 * If we hit the length limit reading the startup configuration 938 * file, abort. 939 */ 940 if (lex_from_file()) 941 exit(sizeof(yytext) - 1); 942 943 /* 944 * If it's runtime configuration via ntpq :config treat it as 945 * if the configuration text ended before the too-long lexeme, 946 * hostname, or string. 947 */ 948 yylval.Integer = 0; 949 return 0; 950 } 951