1# $NetBSD: t_db.sh,v 1.7 2016/09/24 20:12:33 christos Exp $ 2# 3# Copyright (c) 2008 The NetBSD Foundation, Inc. 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions 8# are met: 9# 1. Redistributions of source code must retain the above copyright 10# notice, this list of conditions and the following disclaimer. 11# 2. Redistributions in binary form must reproduce the above copyright 12# notice, this list of conditions and the following disclaimer in the 13# documentation and/or other materials provided with the distribution. 14# 15# THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25# POSSIBILITY OF SUCH DAMAGE. 26# 27 28prog_db() 29{ 30 echo $(atf_get_srcdir)/h_db 31} 32 33prog_lfsr() 34{ 35 echo $(atf_get_srcdir)/h_lfsr 36} 37 38dict() 39{ 40 if [ -f /usr/share/dict/words ]; then 41 echo /usr/share/dict/words 42 elif [ -f /usr/dict/words ]; then 43 echo /usr/dict/words 44 else 45 atf_fail "no dictionary found" 46 fi 47} 48 49SEVEN_SEVEN="abcdefg|abcdefg|abcdefg|abcdefg|abcdefg|abcdefg|abcdefg" 50 51atf_test_case small_btree 52small_btree_head() 53{ 54 atf_set "descr" \ 55 "Checks btree database using small keys and small data" \ 56 "pairs: takes the first hundred entries in the dictionary," \ 57 "and makes them be key/data pairs." 58} 59small_btree_body() 60{ 61 TMPDIR="$(pwd)/db_dir"; export TMPDIR 62 mkdir ${TMPDIR} 63 64 sed 200q $(dict) >exp 65 66 for i in `sed 200q $(dict)`; do 67 echo p 68 echo k$i 69 echo d$i 70 echo g 71 echo k$i 72 done >in 73 74 atf_check -o file:exp "$(prog_db)" btree in 75} 76 77atf_test_case small_hash 78small_hash_head() 79{ 80 atf_set "descr" \ 81 "Checks hash database using small keys and small data" \ 82 "pairs: takes the first hundred entries in the dictionary," \ 83 "and makes them be key/data pairs." 84} 85small_hash_body() 86{ 87 TMPDIR="$(pwd)/db_dir"; export TMPDIR 88 mkdir ${TMPDIR} 89 90 sed 200q $(dict) >exp 91 92 for i in `sed 200q $(dict)`; do 93 echo p 94 echo k$i 95 echo d$i 96 echo g 97 echo k$i 98 done >in 99 100 atf_check -o file:exp "$(prog_db)" hash in 101} 102 103atf_test_case small_recno 104small_recno_head() 105{ 106 atf_set "descr" \ 107 "Checks recno database using small keys and small data" \ 108 "pairs: takes the first hundred entries in the dictionary," \ 109 "and makes them be key/data pairs." 110} 111small_recno_body() 112{ 113 TMPDIR="$(pwd)/db_dir"; export TMPDIR 114 mkdir ${TMPDIR} 115 116 sed 200q $(dict) >exp 117 118 sed 200q $(dict) | 119 awk '{ 120 ++i; 121 printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i); 122 }' >in 123 124 atf_check -o file:exp "$(prog_db)" recno in 125} 126 127atf_test_case medium_btree 128medium_btree_head() 129{ 130 atf_set "descr" \ 131 "Checks btree database using small keys and medium" \ 132 "data pairs: takes the first 200 entries in the" \ 133 "dictionary, and gives them each a medium size data entry." 134} 135medium_btree_body() 136{ 137 TMPDIR="$(pwd)/db_dir"; export TMPDIR 138 mkdir ${TMPDIR} 139 140 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz 141 echo $mdata | 142 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp 143 144 for i in $(sed 200q $(dict)); do 145 echo p 146 echo k$i 147 echo d$mdata 148 echo g 149 echo k$i 150 done >in 151 152 atf_check -o file:exp "$(prog_db)" btree in 153} 154 155atf_test_case medium_hash 156medium_hash_head() 157{ 158 atf_set "descr" \ 159 "Checks hash database using small keys and medium" \ 160 "data pairs: takes the first 200 entries in the" \ 161 "dictionary, and gives them each a medium size data entry." 162} 163medium_hash_body() 164{ 165 TMPDIR="$(pwd)/db_dir"; export TMPDIR 166 mkdir ${TMPDIR} 167 168 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz 169 echo $mdata | 170 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp 171 172 for i in $(sed 200q $(dict)); do 173 echo p 174 echo k$i 175 echo d$mdata 176 echo g 177 echo k$i 178 done >in 179 180 atf_check -o file:exp "$(prog_db)" hash in 181} 182 183atf_test_case medium_recno 184medium_recno_head() 185{ 186 atf_set "descr" \ 187 "Checks recno database using small keys and medium" \ 188 "data pairs: takes the first 200 entries in the" \ 189 "dictionary, and gives them each a medium size data entry." 190} 191medium_recno_body() 192{ 193 TMPDIR="$(pwd)/db_dir"; export TMPDIR 194 mkdir ${TMPDIR} 195 196 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz 197 echo $mdata | 198 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp 199 200 echo $mdata | 201 awk '{ for (i = 1; i < 201; ++i) 202 printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i); 203 }' >in 204 205 atf_check -o file:exp "$(prog_db)" recno in 206} 207 208atf_test_case big_btree 209big_btree_head() 210{ 211 atf_set "descr" \ 212 "Checks btree database using small keys and big data" \ 213 "pairs: inserts the programs in /bin with their paths" \ 214 "as their keys." 215} 216big_btree_body() 217{ 218 TMPDIR="$(pwd)/db_dir"; export TMPDIR 219 mkdir ${TMPDIR} 220 221 (find /bin -type f -print | xargs cat) >exp 222 223 for psize in 512 16384 65536; do 224 echo "checking page size: $psize" 225 226 for i in `find /bin -type f -print`; do 227 echo p 228 echo k$i 229 echo D$i 230 echo g 231 echo k$i 232 done >in 233 234 atf_check "$(prog_db)" -o out btree in 235 cmp -s exp out || atf_fail "test failed for page size: $psize" 236 done 237} 238 239atf_test_case big_hash 240big_hash_head() 241{ 242 atf_set "descr" \ 243 "Checks hash database using small keys and big data" \ 244 "pairs: inserts the programs in /bin with their paths" \ 245 "as their keys." 246} 247big_hash_body() 248{ 249 TMPDIR="$(pwd)/db_dir"; export TMPDIR 250 mkdir ${TMPDIR} 251 252 (find /bin -type f -print | xargs cat) >exp 253 254 for i in `find /bin -type f -print`; do 255 echo p 256 echo k$i 257 echo D$i 258 echo g 259 echo k$i 260 done >in 261 262 atf_check "$(prog_db)" -o out hash in 263 cmp -s exp out || atf_fail "test failed" 264} 265 266atf_test_case big_recno 267big_recno_head() 268{ 269 atf_set "descr" \ 270 "Checks recno database using small keys and big data" \ 271 "pairs: inserts the programs in /bin with their paths" \ 272 "as their keys." 273} 274big_recno_body() 275{ 276 TMPDIR="$(pwd)/db_dir"; export TMPDIR 277 mkdir ${TMPDIR} 278 279 (find /bin -type f -print | xargs cat) >exp 280 281 find /bin -type f -print | 282 awk '{ 283 ++i; 284 printf("p\nk%d\nD%s\ng\nk%d\n", i, $0, i); 285 }' >in 286 287 for psize in 512 16384 65536; do 288 echo "checking page size: $psize" 289 290 atf_check "$(prog_db)" -o out recno in 291 cmp -s exp out || atf_fail "test failed for page size: $psize" 292 done 293} 294 295atf_test_case random_recno 296random_recno_head() 297{ 298 atf_set "descr" "Checks recno database using random entries" 299} 300random_recno_body() 301{ 302 TMPDIR="$(pwd)/db_dir"; export TMPDIR 303 mkdir ${TMPDIR} 304 305 echo $SEVEN_SEVEN | 306 awk '{ 307 for (i = 37; i <= 37 + 88 * 17; i += 17) { 308 if (i % 41) 309 s = substr($0, 1, i % 41); 310 else 311 s = substr($0, 1); 312 printf("input key %d: %s\n", i, s); 313 } 314 for (i = 1; i <= 15; ++i) { 315 if (i % 41) 316 s = substr($0, 1, i % 41); 317 else 318 s = substr($0, 1); 319 printf("input key %d: %s\n", i, s); 320 } 321 for (i = 19234; i <= 19234 + 61 * 27; i += 27) { 322 if (i % 41) 323 s = substr($0, 1, i % 41); 324 else 325 s = substr($0, 1); 326 printf("input key %d: %s\n", i, s); 327 } 328 exit 329 }' >exp 330 331 cat exp | 332 awk 'BEGIN { 333 i = 37; 334 incr = 17; 335 } 336 { 337 printf("p\nk%d\nd%s\n", i, $0); 338 if (i == 19234 + 61 * 27) 339 exit; 340 if (i == 37 + 88 * 17) { 341 i = 1; 342 incr = 1; 343 } else if (i == 15) { 344 i = 19234; 345 incr = 27; 346 } else 347 i += incr; 348 } 349 END { 350 for (i = 37; i <= 37 + 88 * 17; i += 17) 351 printf("g\nk%d\n", i); 352 for (i = 1; i <= 15; ++i) 353 printf("g\nk%d\n", i); 354 for (i = 19234; i <= 19234 + 61 * 27; i += 27) 355 printf("g\nk%d\n", i); 356 }' >in 357 358 atf_check -o file:exp "$(prog_db)" recno in 359} 360 361atf_test_case reverse_recno 362reverse_recno_head() 363{ 364 atf_set "descr" "Checks recno database using reverse order entries" 365} 366reverse_recno_body() 367{ 368 TMPDIR="$(pwd)/db_dir"; export TMPDIR 369 mkdir ${TMPDIR} 370 371 echo $SEVEN_SEVEN | 372 awk ' { 373 for (i = 1500; i; --i) { 374 if (i % 34) 375 s = substr($0, 1, i % 34); 376 else 377 s = substr($0, 1); 378 printf("input key %d: %s\n", i, s); 379 } 380 exit; 381 }' >exp 382 383 cat exp | 384 awk 'BEGIN { 385 i = 1500; 386 } 387 { 388 printf("p\nk%d\nd%s\n", i, $0); 389 --i; 390 } 391 END { 392 for (i = 1500; i; --i) 393 printf("g\nk%d\n", i); 394 }' >in 395 396 atf_check -o file:exp "$(prog_db)" recno in 397} 398 399atf_test_case alternate_recno 400alternate_recno_head() 401{ 402 atf_set "descr" "Checks recno database using alternating order entries" 403} 404alternate_recno_body() 405{ 406 TMPDIR="$(pwd)/db_dir"; export TMPDIR 407 mkdir ${TMPDIR} 408 409 echo $SEVEN_SEVEN | 410 awk ' { 411 for (i = 1; i < 1200; i += 2) { 412 if (i % 34) 413 s = substr($0, 1, i % 34); 414 else 415 s = substr($0, 1); 416 printf("input key %d: %s\n", i, s); 417 } 418 for (i = 2; i < 1200; i += 2) { 419 if (i % 34) 420 s = substr($0, 1, i % 34); 421 else 422 s = substr($0, 1); 423 printf("input key %d: %s\n", i, s); 424 } 425 exit; 426 }' >exp 427 428 cat exp | 429 awk 'BEGIN { 430 i = 1; 431 even = 0; 432 } 433 { 434 printf("p\nk%d\nd%s\n", i, $0); 435 i += 2; 436 if (i >= 1200) { 437 if (even == 1) 438 exit; 439 even = 1; 440 i = 2; 441 } 442 } 443 END { 444 for (i = 1; i < 1200; ++i) 445 printf("g\nk%d\n", i); 446 }' >in 447 448 atf_check "$(prog_db)" -o out recno in 449 450 sort -o exp exp 451 sort -o out out 452 453 cmp -s exp out || atf_fail "test failed" 454} 455 456h_delete() 457{ 458 TMPDIR="$(pwd)/db_dir"; export TMPDIR 459 mkdir ${TMPDIR} 460 461 type=$1 462 463 echo $SEVEN_SEVEN | 464 awk '{ 465 for (i = 1; i <= 120; ++i) 466 printf("%05d: input key %d: %s\n", i, i, $0); 467 }' >exp 468 469 cat exp | 470 awk '{ 471 printf("p\nk%d\nd%s\n", ++i, $0); 472 } 473 END { 474 printf("fR_NEXT\n"); 475 for (i = 1; i <= 120; ++i) 476 printf("s\n"); 477 printf("fR_CURSOR\ns\nkXX\n"); 478 printf("r\n"); 479 printf("fR_NEXT\ns\n"); 480 printf("fR_CURSOR\ns\nk1\n"); 481 printf("r\n"); 482 printf("fR_FIRST\ns\n"); 483 }' >in 484 485 # For btree, the records are ordered by the string representation 486 # of the key value. So sort the expected output file accordingly, 487 # and set the seek_last key to the last expected key value. 488 489 if [ "$type" = "btree" ] ; then 490 sed -e 's/kXX/k99/' < in > tmp 491 mv tmp in 492 sort -d -k4 < exp > tmp 493 mv tmp exp 494 echo $SEVEN_SEVEN | 495 awk '{ 496 printf("%05d: input key %d: %s\n", 99, 99, $0); 497 printf("seq failed, no such key\n"); 498 printf("%05d: input key %d: %s\n", 1, 1, $0); 499 printf("%05d: input key %d: %s\n", 10, 10, $0); 500 exit; 501 }' >> exp 502 else 503 # For recno, records are ordered by numerical key value. No sort 504 # is needed, but still need to set proper seek_last key value. 505 sed -e 's/kXX/k120/' < in > tmp 506 mv tmp in 507 echo $SEVEN_SEVEN | 508 awk '{ 509 printf("%05d: input key %d: %s\n", 120, 120, $0); 510 printf("seq failed, no such key\n"); 511 printf("%05d: input key %d: %s\n", 1, 1, $0); 512 printf("%05d: input key %d: %s\n", 2, 2, $0); 513 exit; 514 }' >> exp 515 fi 516 517 atf_check "$(prog_db)" -o out $type in 518 atf_check -o file:exp cat out 519} 520 521atf_test_case delete_btree 522delete_btree_head() 523{ 524 atf_set "descr" "Checks removing records in btree database" 525} 526delete_btree_body() 527{ 528 h_delete btree 529} 530 531atf_test_case delete_recno 532delete_recno_head() 533{ 534 atf_set "descr" "Checks removing records in recno database" 535} 536delete_recno_body() 537{ 538 h_delete recno 539} 540 541h_repeated() 542{ 543 local type="$1" 544 TMPDIR="$(pwd)/db_dir"; export TMPDIR 545 mkdir ${TMPDIR} 546 547 echo "" | 548 awk 'BEGIN { 549 for (i = 1; i <= 10; ++i) { 550 printf("p\nkkey1\nD/bin/sh\n"); 551 printf("p\nkkey2\nD/bin/csh\n"); 552 if (i % 8 == 0) { 553 printf("c\nkkey2\nD/bin/csh\n"); 554 printf("c\nkkey1\nD/bin/sh\n"); 555 printf("e\t%d of 10 (comparison)\n", i); 556 } else 557 printf("e\t%d of 10 \n", i); 558 printf("r\nkkey1\nr\nkkey2\n"); 559 } 560 }' >in 561 562 $(prog_db) $type in 563} 564 565atf_test_case repeated_btree 566repeated_btree_head() 567{ 568 atf_set "descr" \ 569 "Checks btree database with repeated small keys and" \ 570 "big data pairs. Makes sure that overflow pages are reused" 571} 572repeated_btree_body() 573{ 574 h_repeated btree 575} 576 577atf_test_case repeated_hash 578repeated_hash_head() 579{ 580 atf_set "descr" \ 581 "Checks hash database with repeated small keys and" \ 582 "big data pairs. Makes sure that overflow pages are reused" 583} 584repeated_hash_body() 585{ 586 h_repeated hash 587} 588 589atf_test_case duplicate_btree 590duplicate_btree_head() 591{ 592 atf_set "descr" "Checks btree database with duplicate keys" 593} 594duplicate_btree_body() 595{ 596 TMPDIR="$(pwd)/db_dir"; export TMPDIR 597 mkdir ${TMPDIR} 598 599 echo $SEVEN_SEVEN | 600 awk '{ 601 for (i = 1; i <= 543; ++i) 602 printf("%05d: input key %d: %s\n", i, i, $0); 603 exit; 604 }' >exp 605 606 cat exp | 607 awk '{ 608 if (i++ % 2) 609 printf("p\nkduplicatekey\nd%s\n", $0); 610 else 611 printf("p\nkunique%dkey\nd%s\n", i, $0); 612 } 613 END { 614 printf("o\n"); 615 }' >in 616 617 atf_check -o file:exp -x "$(prog_db) -iflags=1 btree in | sort" 618} 619 620h_cursor_flags() 621{ 622 local type=$1 623 TMPDIR="$(pwd)/db_dir"; export TMPDIR 624 mkdir ${TMPDIR} 625 626 echo $SEVEN_SEVEN | 627 awk '{ 628 for (i = 1; i <= 20; ++i) 629 printf("%05d: input key %d: %s\n", i, i, $0); 630 exit; 631 }' >exp 632 633 # Test that R_CURSOR doesn't succeed before cursor initialized 634 cat exp | 635 awk '{ 636 if (i == 10) 637 exit; 638 printf("p\nk%d\nd%s\n", ++i, $0); 639 } 640 END { 641 printf("fR_CURSOR\nr\n"); 642 printf("eR_CURSOR SHOULD HAVE FAILED\n"); 643 }' >in 644 645 atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in 646 atf_check -s ne:0 test -s out 647 648 cat exp | 649 awk '{ 650 if (i == 10) 651 exit; 652 printf("p\nk%d\nd%s\n", ++i, $0); 653 } 654 END { 655 printf("fR_CURSOR\np\nk1\ndsome data\n"); 656 printf("eR_CURSOR SHOULD HAVE FAILED\n"); 657 }' >in 658 659 atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in 660 atf_check -s ne:0 test -s out 661} 662 663atf_test_case cursor_flags_btree 664cursor_flags_btree_head() 665{ 666 atf_set "descr" \ 667 "Checks use of cursor flags without initialization in btree database" 668} 669cursor_flags_btree_body() 670{ 671 h_cursor_flags btree 672} 673 674atf_test_case cursor_flags_recno 675cursor_flags_recno_head() 676{ 677 atf_set "descr" \ 678 "Checks use of cursor flags without initialization in recno database" 679} 680cursor_flags_recno_body() 681{ 682 h_cursor_flags recno 683} 684 685atf_test_case reverse_order_recno 686reverse_order_recno_head() 687{ 688 atf_set "descr" "Checks reverse order inserts in recno database" 689} 690reverse_order_recno_body() 691{ 692 TMPDIR="$(pwd)/db_dir"; export TMPDIR 693 mkdir ${TMPDIR} 694 695 echo $SEVEN_SEVEN | 696 awk '{ 697 for (i = 1; i <= 779; ++i) 698 printf("%05d: input key %d: %s\n", i, i, $0); 699 exit; 700 }' >exp 701 702 cat exp | 703 awk '{ 704 if (i == 0) { 705 i = 1; 706 printf("p\nk1\nd%s\n", $0); 707 printf("%s\n", "fR_IBEFORE"); 708 } else 709 printf("p\nk1\nd%s\n", $0); 710 } 711 END { 712 printf("or\n"); 713 }' >in 714 715 atf_check -o file:exp "$(prog_db)" recno in 716} 717 718atf_test_case small_page_btree 719small_page_btree_head() 720{ 721 atf_set "descr" \ 722 "Checks btree database with lots of keys and small page" \ 723 "size: takes the first 20000 entries in the dictionary," \ 724 "reverses them, and gives them each a small size data" \ 725 "entry. Uses a small page size to make sure the btree" \ 726 "split code gets hammered." 727} 728small_page_btree_body() 729{ 730 TMPDIR="$(pwd)/db_dir"; export TMPDIR 731 mkdir ${TMPDIR} 732 733 mdata=abcdefghijklmnopqrstuvwxy 734 echo $mdata | 735 awk '{ for (i = 1; i < 20001; ++i) print $0 }' >exp 736 737 for i in `sed 20000q $(dict) | rev`; do 738 echo p 739 echo k$i 740 echo d$mdata 741 echo g 742 echo k$i 743 done >in 744 745 atf_check -o file:exp "$(prog_db)" -i psize=512 btree in 746} 747 748h_byte_orders() 749{ 750 TMPDIR="$(pwd)/db_dir"; export TMPDIR 751 mkdir ${TMPDIR} 752 753 type=$1 754 755 sed 50q $(dict) >exp 756 for order in 1234 4321; do 757 for i in `sed 50q $(dict)`; do 758 echo p 759 echo k$i 760 echo d$i 761 echo S 762 echo g 763 echo k$i 764 done >in 765 766 atf_check -o file:exp "$(prog_db)" -ilorder=$order -f byte.file $type in 767 768 for i in `sed 50q $(dict)`; do 769 echo g 770 echo k$i 771 done >in 772 773 atf_check -o file:exp "$(prog_db)" -s -ilorder=$order -f byte.file $type in 774 done 775} 776 777atf_test_case byte_orders_btree 778byte_orders_btree_head() 779{ 780 atf_set "descr" "Checks btree database using differing byte orders" 781} 782byte_orders_btree_body() 783{ 784 h_byte_orders btree 785} 786 787atf_test_case byte_orders_hash 788byte_orders_hash_head() 789{ 790 atf_set "descr" "Checks hash database using differing byte orders" 791} 792byte_orders_hash_body() 793{ 794 h_byte_orders hash 795} 796 797h_bsize_ffactor() 798{ 799 bsize=$1 800 ffactor=$2 801 802 echo "bucketsize $bsize, fill factor $ffactor" 803 atf_check -o file:exp "$(prog_db)" "-ibsize=$bsize,\ 804ffactor=$ffactor,nelem=25000,cachesize=65536" hash in 805} 806 807atf_test_case bsize_ffactor 808bsize_ffactor_head() 809{ 810 atf_set "timeout" "1800" 811 atf_set "descr" "Checks hash database with various" \ 812 "bucketsizes and fill factors" 813} 814bsize_ffactor_body() 815{ 816 TMPDIR="$(pwd)/db_dir"; export TMPDIR 817 mkdir ${TMPDIR} 818 819 echo $SEVEN_SEVEN | 820 awk '{ 821 for (i = 1; i <= 10000; ++i) { 822 if (i % 34) 823 s = substr($0, 1, i % 34); 824 else 825 s = substr($0, 1); 826 printf("%s\n", s); 827 } 828 exit; 829 830 }' >exp 831 832 sed 10000q $(dict) | 833 awk 'BEGIN { 834 ds="'$SEVEN_SEVEN'" 835 } 836 { 837 if (++i % 34) 838 s = substr(ds, 1, i % 34); 839 else 840 s = substr(ds, 1); 841 printf("p\nk%s\nd%s\n", $0, s); 842 }' >in 843 844 sed 10000q $(dict) | 845 awk '{ 846 ++i; 847 printf("g\nk%s\n", $0); 848 }' >>in 849 850 h_bsize_ffactor 256 11 851 h_bsize_ffactor 256 14 852 h_bsize_ffactor 256 21 853 854 h_bsize_ffactor 512 21 855 h_bsize_ffactor 512 28 856 h_bsize_ffactor 512 43 857 858 h_bsize_ffactor 1024 43 859 h_bsize_ffactor 1024 57 860 h_bsize_ffactor 1024 85 861 862 h_bsize_ffactor 2048 85 863 h_bsize_ffactor 2048 114 864 h_bsize_ffactor 2048 171 865 866 h_bsize_ffactor 4096 171 867 h_bsize_ffactor 4096 228 868 h_bsize_ffactor 4096 341 869 870 h_bsize_ffactor 8192 341 871 h_bsize_ffactor 8192 455 872 h_bsize_ffactor 8192 683 873 874 h_bsize_ffactor 16384 341 875 h_bsize_ffactor 16384 455 876 h_bsize_ffactor 16384 683 877 878 h_bsize_ffactor 32768 341 879 h_bsize_ffactor 32768 455 880 h_bsize_ffactor 32768 683 881 882 h_bsize_ffactor 65536 341 883 h_bsize_ffactor 65536 455 884 h_bsize_ffactor 65536 683 885} 886 887# This tests 64K block size addition/removal 888atf_test_case four_char_hash 889four_char_hash_head() 890{ 891 atf_set "descr" \ 892 "Checks hash database with 4 char key and" \ 893 "value insert on a 65536 bucket size" 894} 895four_char_hash_body() 896{ 897 TMPDIR="$(pwd)/db_dir"; export TMPDIR 898 mkdir ${TMPDIR} 899 900 cat >in <<EOF 901p 902k1234 903d1234 904r 905k1234 906EOF 907 908 atf_check "$(prog_db)" -i bsize=65536 hash in 909} 910 911 912atf_test_case bsize_torture 913bsize_torture_head() 914{ 915 atf_set "timeout" "36000" 916 atf_set "descr" "Checks hash database with various bucket sizes" 917} 918bsize_torture_body() 919{ 920 TMPDIR="$(pwd)/db_dir"; export TMPDIR 921 mkdir ${TMPDIR} 922 for i in 2048 4096 8192 16384 32768 65536 923 do 924 atf_check "$(prog_lfsr)" $i 925 done 926} 927 928atf_test_case btree_weird_page_split 929btree_weird_page_split_head() 930{ 931 atf_set "descr" \ 932 "Test for a weird page split condition where an insertion " \ 933 "into index 0 of a page that would cause the new item to " \ 934 "be the only item on the left page results in index 0 of " \ 935 "the right page being erroneously skipped; this only " \ 936 "happens with one particular key+data length for each page size." 937} 938btree_weird_page_split_body() 939{ 940 for psize in 512 1024 2048 4096 8192; do 941 echo " page size $psize" 942 kdsizes=`awk 'BEGIN { 943 psize = '$psize'; hsize = int(psize/2); 944 for (kdsize = hsize-40; kdsize <= hsize; kdsize++) { 945 print kdsize; 946 } 947 }' /dev/null` 948 949 # Use a series of keylen+datalen values in the right 950 # neighborhood to find the one that triggers the bug. 951 # We could compute the exact size that triggers the 952 # bug but this additional fuzz may be useful. 953 954 # Insert keys in reverse order to maximize the chances 955 # for a split on index 0. 956 957 for kdsize in $kdsizes; do 958 awk 'BEGIN { 959 kdsize = '$kdsize'; 960 for (i = 8; i-- > 0; ) { 961 s = sprintf("a%03d:%09d", i, kdsize); 962 for (j = 0; j < kdsize-20; j++) { 963 s = s "x"; 964 } 965 printf("p\nka%03d\nd%s\n", i, s); 966 } 967 print "o"; 968 }' /dev/null > in 969 sed -n 's/^d//p' in | sort > exp 970 atf_check -o file:exp \ 971 "$(prog_db)" -i psize=$psize btree in 972 done 973 done 974} 975 976# Extremely tricky test attempting to replicate some unusual database 977# corruption seen in the field: pieces of the database becoming 978# inaccessible to random access, sequential access, or both. The 979# hypothesis is that at least some of these are triggered by the bug 980# in page splits on index 0 with a particular exact keylen+datalen. 981# (See Test 40.) For psize=4096, this size is exactly 2024. 982 983# The order of operations here relies on very specific knowledge of 984# the internals of the btree access method in order to place records 985# at specific offsets in a page and to create certain keys on internal 986# pages. The to-be-split page immediately prior to the bug-triggering 987# split has the following properties: 988# 989# * is not the leftmost leaf page 990# * key on the parent page is compares less than the key of the item 991# on index 0 992# * triggering record's key also compares greater than the key on the 993# parent page 994 995# Additionally, we prime the mpool LRU chain so that the head page on 996# the chain has the following properties: 997# 998# * record at index 0 is located where it will not get overwritten by 999# items written to the right-hand page during the split 1000# * key of the record at index 0 compares less than the key of the 1001# bug-triggering record 1002 1003# If the page-split bug exists, this test appears to create a database 1004# where some records are inaccessible to a search, but still remain in 1005# the file and are accessible by sequential traversal. At least one 1006# record gets duplicated out of sequence. 1007 1008atf_test_case btree_tricky_page_split 1009btree_tricky_page_split_head() 1010{ 1011 atf_set "descr" \ 1012 "btree: no unsearchables due to page split on index 0" 1013} 1014btree_tricky_page_split_body() 1015{ 1016 list=`(for i in a b c d; do 1017 for j in 990 998 999; do 1018 echo g ${i}${j} 1024 1019 done 1020 done; 1021 echo g y997 2014 1022 for i in y z; do 1023 for j in 998 999; do 1024 echo g ${i}${j} 1024 1025 done 1026 done)` 1027 # Exact number for trigger condition accounts for newlines 1028 # retained by dbtest with -ofile but not without; we use 1029 # -ofile, so count newlines. keylen=5,datalen=5+2014 for 1030 # psize=4096 here. 1031 (cat - <<EOF 1032p z999 1024 1033p z998 1024 1034p y999 1024 1035p y990 1024 1036p d999 1024 1037p d990 1024 1038p c999 1024 1039p c990 1024 1040p b999 1024 1041p b990 1024 1042p a999 1024 1043p a990 1024 1044p y998 1024 1045r y990 1046p d998 1024 1047p d990 1024 1048p c998 1024 1049p c990 1024 1050p b998 1024 1051p b990 1024 1052p a998 1024 1053p a990 1024 1054p y997 2014 1055S 1056o 1057EOF 1058 echo "$list") | 1059 # awk script input: 1060 # {p|g|r} key [datasize] 1061 awk '/^[pgr]/{ 1062 printf("%s\nk%s\n", $1, $2); 1063 } 1064 /^p/{ 1065 s = $2; 1066 for (i = 0; i < $3; i++) { 1067 s = s "x"; 1068 } 1069 printf("d%s\n", s); 1070 } 1071 !/^[pgr]/{ 1072 print $0; 1073 }' > in 1074 (echo "$list"; echo "$list") | awk '{ 1075 s = $2; 1076 for (i = 0; i < $3; i++) { 1077 s = s "x"; 1078 } 1079 print s; 1080 }' > exp 1081 atf_check -o file:exp \ 1082 "$(prog_db)" -i psize=4096 btree in 1083} 1084 1085atf_test_case btree_recursive_traversal 1086btree_recursive_traversal_head() 1087{ 1088 atf_set "descr" \ 1089 "btree: Test for recursive traversal successfully " \ 1090 "retrieving records that are inaccessible to normal " \ 1091 "sequential 'sibling-link' traversal. This works by " \ 1092 "unlinking a few leaf pages but leaving their parent " \ 1093 "links intact. To verify that the unlink actually makes " \ 1094 "records inaccessible, the test first uses 'o' to do a " \ 1095 "normal sequential traversal, followed by 'O' to do a " \ 1096 "recursive traversal." 1097} 1098btree_recursive_traversal_body() 1099{ 1100 fill="abcdefghijklmnopqrstuvwxyzy" 1101 script='{ 1102 for (i = 0; i < 20000; i++) { 1103 printf("p\nkAA%05d\nd%05d%s\n", i, i, $0); 1104 } 1105 print "u"; 1106 print "u"; 1107 print "u"; 1108 print "u"; 1109 }' 1110 (echo $fill | awk "$script"; echo o) > in1 1111 echo $fill | 1112 awk '{ 1113 for (i = 0; i < 20000; i++) { 1114 if (i >= 5 && i <= 40) 1115 continue; 1116 printf("%05d%s\n", i, $0); 1117 } 1118 }' > exp1 1119 atf_check -o file:exp1 \ 1120 "$(prog_db)" -i psize=512 btree in1 1121 echo $fill | 1122 awk '{ 1123 for (i = 0; i < 20000; i++) { 1124 printf("%05d%s\n", i, $0); 1125 } 1126 }' > exp2 1127 (echo $fill | awk "$script"; echo O) > in2 1128 atf_check -o file:exp2 \ 1129 "$(prog_db)" -i psize=512 btree in2 1130} 1131 1132atf_test_case btree_byteswap_unaligned_access_bksd 1133btree_byteswap_unaligned_access_bksd_head() 1134{ 1135 atf_set "descr" \ 1136 "btree: big key, small data, byteswap unaligned access" 1137} 1138btree_byteswap_unaligned_access_bksd_body() 1139{ 1140 (echo foo; echo bar) | 1141 awk '{ 1142 s = $0 1143 for (i = 0; i < 488; i++) { 1144 s = s "x"; 1145 } 1146 printf("p\nk%s\ndx\n", s); 1147 }' > in 1148 for order in 1234 4321; do 1149 atf_check \ 1150 "$(prog_db)" -o out -i psize=512,lorder=$order btree in 1151 done 1152} 1153 1154atf_test_case btree_byteswap_unaligned_access_skbd 1155btree_byteswap_unaligned_access_skbd_head() 1156{ 1157 atf_set "descr" \ 1158 "btree: small key, big data, byteswap unaligned access" 1159} 1160btree_byteswap_unaligned_access_skbd_body() 1161{ 1162 # 484 = 512 - 20 (header) - 7 ("foo1234") - 1 (newline) 1163 (echo foo1234; echo bar1234) | 1164 awk '{ 1165 s = $0 1166 for (i = 0; i < 484; i++) { 1167 s = s "x"; 1168 } 1169 printf("p\nk%s\nd%s\n", $0, s); 1170 }' > in 1171 for order in 1234 4321; do 1172 atf_check \ 1173 "$(prog_db)" -o out -i psize=512,lorder=$order btree in 1174 done 1175} 1176 1177atf_test_case btree_known_byte_order 1178btree_known_byte_order_head() 1179{ 1180 atf_set "descr" \ 1181 "btree: small key, big data, known byte order" 1182} 1183btree_known_byte_order_body() 1184{ 1185 local a="-i psize=512,lorder=" 1186 1187 (echo foo1234; echo bar1234) | 1188 awk '{ 1189 s = $0 1190 for (i = 0; i < 484; i++) { 1191 s = s "x"; 1192 } 1193 printf("%s\n", s); 1194 }' > exp 1195 (echo foo1234; echo bar1234) | 1196 awk '{ 1197 s = $0 1198 for (i = 0; i < 484; i++) { 1199 s = s "x"; 1200 } 1201 printf("p\nk%s\nd%s\n", $0, s); 1202 }' > in1 1203 for order in 1234 4321; do 1204 atf_check \ 1205 "$(prog_db)" -f out.$order $a$order btree in1 1206 done 1207 (echo g; echo kfoo1234; echo g; echo kbar1234) > in2 1208 for order in 1234 4321; do 1209 atf_check -o file:exp \ 1210 "$(prog_db)" -s -f out.$order $a$order btree in2 1211 done 1212} 1213 1214atf_init_test_cases() 1215{ 1216 atf_add_test_case small_btree 1217 atf_add_test_case small_hash 1218 atf_add_test_case small_recno 1219 atf_add_test_case medium_btree 1220 atf_add_test_case medium_hash 1221 atf_add_test_case medium_recno 1222 atf_add_test_case big_btree 1223 atf_add_test_case big_hash 1224 atf_add_test_case big_recno 1225 atf_add_test_case random_recno 1226 atf_add_test_case reverse_recno 1227 atf_add_test_case alternate_recno 1228 atf_add_test_case delete_btree 1229 atf_add_test_case delete_recno 1230 atf_add_test_case repeated_btree 1231 atf_add_test_case repeated_hash 1232 atf_add_test_case duplicate_btree 1233 atf_add_test_case cursor_flags_btree 1234 atf_add_test_case cursor_flags_recno 1235 atf_add_test_case reverse_order_recno 1236 atf_add_test_case small_page_btree 1237 atf_add_test_case byte_orders_btree 1238 atf_add_test_case byte_orders_hash 1239 atf_add_test_case bsize_ffactor 1240 atf_add_test_case four_char_hash 1241 atf_add_test_case bsize_torture 1242 atf_add_test_case btree_weird_page_split 1243 atf_add_test_case btree_tricky_page_split 1244 atf_add_test_case btree_recursive_traversal 1245 atf_add_test_case btree_byteswap_unaligned_access_bksd 1246 atf_add_test_case btree_byteswap_unaligned_access_skbd 1247 atf_add_test_case btree_known_byte_order 1248} 1249