t_db.sh revision 1.8 1 # $NetBSD: t_db.sh,v 1.8 2020/03/12 14:03:42 martin Exp $
2 #
3 # Copyright (c) 2008 The NetBSD Foundation, Inc.
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
8 # are met:
9 # 1. Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # 2. Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 #
15 # THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
16 # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 # POSSIBILITY OF SUCH DAMAGE.
26 #
27
28 prog_db()
29 {
30 echo $(atf_get_srcdir)/h_db
31 }
32
33 prog_lfsr()
34 {
35 echo $(atf_get_srcdir)/h_lfsr
36 }
37
38 dict()
39 {
40 if [ -f /usr/share/dict/words ]; then
41 echo /usr/share/dict/words
42 elif [ -f /usr/dict/words ]; then
43 echo /usr/dict/words
44 else
45 atf_fail "no dictionary found"
46 fi
47 }
48
49 SEVEN_SEVEN="abcdefg|abcdefg|abcdefg|abcdefg|abcdefg|abcdefg|abcdefg"
50
51 atf_test_case small_btree
52 small_btree_head()
53 {
54 atf_set "descr" \
55 "Checks btree database using small keys and small data" \
56 "pairs: takes the first hundred entries in the dictionary," \
57 "and makes them be key/data pairs."
58 }
59 small_btree_body()
60 {
61 TMPDIR="$(pwd)/db_dir"; export TMPDIR
62 mkdir ${TMPDIR}
63
64 sed 200q $(dict) >exp
65
66 for i in `sed 200q $(dict)`; do
67 echo p
68 echo k$i
69 echo d$i
70 echo g
71 echo k$i
72 done >in
73
74 atf_check -o file:exp "$(prog_db)" btree in
75 }
76
77 atf_test_case small_hash
78 small_hash_head()
79 {
80 atf_set "descr" \
81 "Checks hash database using small keys and small data" \
82 "pairs: takes the first hundred entries in the dictionary," \
83 "and makes them be key/data pairs."
84 }
85 small_hash_body()
86 {
87 TMPDIR="$(pwd)/db_dir"; export TMPDIR
88 mkdir ${TMPDIR}
89
90 sed 200q $(dict) >exp
91
92 for i in `sed 200q $(dict)`; do
93 echo p
94 echo k$i
95 echo d$i
96 echo g
97 echo k$i
98 done >in
99
100 atf_check -o file:exp "$(prog_db)" hash in
101 }
102
103 atf_test_case small_recno
104 small_recno_head()
105 {
106 atf_set "descr" \
107 "Checks recno database using small keys and small data" \
108 "pairs: takes the first hundred entries in the dictionary," \
109 "and makes them be key/data pairs."
110 }
111 small_recno_body()
112 {
113 TMPDIR="$(pwd)/db_dir"; export TMPDIR
114 mkdir ${TMPDIR}
115
116 sed 200q $(dict) >exp
117
118 sed 200q $(dict) |
119 awk '{
120 ++i;
121 printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
122 }' >in
123
124 atf_check -o file:exp "$(prog_db)" recno in
125 }
126
127 atf_test_case medium_btree
128 medium_btree_head()
129 {
130 atf_set "descr" \
131 "Checks btree database using small keys and medium" \
132 "data pairs: takes the first 200 entries in the" \
133 "dictionary, and gives them each a medium size data entry."
134 }
135 medium_btree_body()
136 {
137 TMPDIR="$(pwd)/db_dir"; export TMPDIR
138 mkdir ${TMPDIR}
139
140 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
141 echo $mdata |
142 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp
143
144 for i in $(sed 200q $(dict)); do
145 echo p
146 echo k$i
147 echo d$mdata
148 echo g
149 echo k$i
150 done >in
151
152 atf_check -o file:exp "$(prog_db)" btree in
153 }
154
155 atf_test_case medium_hash
156 medium_hash_head()
157 {
158 atf_set "descr" \
159 "Checks hash database using small keys and medium" \
160 "data pairs: takes the first 200 entries in the" \
161 "dictionary, and gives them each a medium size data entry."
162 }
163 medium_hash_body()
164 {
165 TMPDIR="$(pwd)/db_dir"; export TMPDIR
166 mkdir ${TMPDIR}
167
168 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
169 echo $mdata |
170 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp
171
172 for i in $(sed 200q $(dict)); do
173 echo p
174 echo k$i
175 echo d$mdata
176 echo g
177 echo k$i
178 done >in
179
180 atf_check -o file:exp "$(prog_db)" hash in
181 }
182
183 atf_test_case medium_recno
184 medium_recno_head()
185 {
186 atf_set "descr" \
187 "Checks recno database using small keys and medium" \
188 "data pairs: takes the first 200 entries in the" \
189 "dictionary, and gives them each a medium size data entry."
190 }
191 medium_recno_body()
192 {
193 TMPDIR="$(pwd)/db_dir"; export TMPDIR
194 mkdir ${TMPDIR}
195
196 mdata=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
197 echo $mdata |
198 awk '{ for (i = 1; i < 201; ++i) print $0 }' >exp
199
200 echo $mdata |
201 awk '{ for (i = 1; i < 201; ++i)
202 printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
203 }' >in
204
205 atf_check -o file:exp "$(prog_db)" recno in
206 }
207
208 atf_test_case big_btree
209 big_btree_head()
210 {
211 atf_set "descr" \
212 "Checks btree database using small keys and big data" \
213 "pairs: inserts the programs in /bin with their paths" \
214 "as their keys."
215 }
216 big_btree_body()
217 {
218 TMPDIR="$(pwd)/db_dir"; export TMPDIR
219 mkdir ${TMPDIR}
220
221 (find /bin -type f -print | xargs cat) >exp
222
223 for psize in 512 16384 65536; do
224 echo "checking page size: $psize"
225
226 for i in `find /bin -type f -print`; do
227 echo p
228 echo k$i
229 echo D$i
230 echo g
231 echo k$i
232 done >in
233
234 atf_check "$(prog_db)" -o out btree in
235 cmp -s exp out || atf_fail "test failed for page size: $psize"
236 done
237 }
238
239 atf_test_case big_hash
240 big_hash_head()
241 {
242 atf_set "descr" \
243 "Checks hash database using small keys and big data" \
244 "pairs: inserts the programs in /bin with their paths" \
245 "as their keys."
246 }
247 big_hash_body()
248 {
249 TMPDIR="$(pwd)/db_dir"; export TMPDIR
250 mkdir ${TMPDIR}
251
252 (find /bin -type f -print | xargs cat) >exp
253
254 for i in `find /bin -type f -print`; do
255 echo p
256 echo k$i
257 echo D$i
258 echo g
259 echo k$i
260 done >in
261
262 atf_check "$(prog_db)" -o out hash in
263 cmp -s exp out || atf_fail "test failed"
264 }
265
266 atf_test_case big_recno
267 big_recno_head()
268 {
269 atf_set "descr" \
270 "Checks recno database using small keys and big data" \
271 "pairs: inserts the programs in /bin with their paths" \
272 "as their keys."
273 }
274 big_recno_body()
275 {
276 TMPDIR="$(pwd)/db_dir"; export TMPDIR
277 mkdir ${TMPDIR}
278
279 (find /bin -type f -print | xargs cat) >exp
280
281 find /bin -type f -print |
282 awk '{
283 ++i;
284 printf("p\nk%d\nD%s\ng\nk%d\n", i, $0, i);
285 }' >in
286
287 for psize in 512 16384 65536; do
288 echo "checking page size: $psize"
289
290 atf_check "$(prog_db)" -o out recno in
291 cmp -s exp out || atf_fail "test failed for page size: $psize"
292 done
293 }
294
295 atf_test_case random_recno
296 random_recno_head()
297 {
298 atf_set "descr" "Checks recno database using random entries"
299 }
300 random_recno_body()
301 {
302 TMPDIR="$(pwd)/db_dir"; export TMPDIR
303 mkdir ${TMPDIR}
304
305 echo $SEVEN_SEVEN |
306 awk '{
307 for (i = 37; i <= 37 + 88 * 17; i += 17) {
308 if (i % 41)
309 s = substr($0, 1, i % 41);
310 else
311 s = substr($0, 1);
312 printf("input key %d: %s\n", i, s);
313 }
314 for (i = 1; i <= 15; ++i) {
315 if (i % 41)
316 s = substr($0, 1, i % 41);
317 else
318 s = substr($0, 1);
319 printf("input key %d: %s\n", i, s);
320 }
321 for (i = 19234; i <= 19234 + 61 * 27; i += 27) {
322 if (i % 41)
323 s = substr($0, 1, i % 41);
324 else
325 s = substr($0, 1);
326 printf("input key %d: %s\n", i, s);
327 }
328 exit
329 }' >exp
330
331 cat exp |
332 awk 'BEGIN {
333 i = 37;
334 incr = 17;
335 }
336 {
337 printf("p\nk%d\nd%s\n", i, $0);
338 if (i == 19234 + 61 * 27)
339 exit;
340 if (i == 37 + 88 * 17) {
341 i = 1;
342 incr = 1;
343 } else if (i == 15) {
344 i = 19234;
345 incr = 27;
346 } else
347 i += incr;
348 }
349 END {
350 for (i = 37; i <= 37 + 88 * 17; i += 17)
351 printf("g\nk%d\n", i);
352 for (i = 1; i <= 15; ++i)
353 printf("g\nk%d\n", i);
354 for (i = 19234; i <= 19234 + 61 * 27; i += 27)
355 printf("g\nk%d\n", i);
356 }' >in
357
358 atf_check -o file:exp "$(prog_db)" recno in
359 }
360
361 atf_test_case reverse_recno
362 reverse_recno_head()
363 {
364 atf_set "descr" "Checks recno database using reverse order entries"
365 }
366 reverse_recno_body()
367 {
368 TMPDIR="$(pwd)/db_dir"; export TMPDIR
369 mkdir ${TMPDIR}
370
371 echo $SEVEN_SEVEN |
372 awk ' {
373 for (i = 1500; i; --i) {
374 if (i % 34)
375 s = substr($0, 1, i % 34);
376 else
377 s = substr($0, 1);
378 printf("input key %d: %s\n", i, s);
379 }
380 exit;
381 }' >exp
382
383 cat exp |
384 awk 'BEGIN {
385 i = 1500;
386 }
387 {
388 printf("p\nk%d\nd%s\n", i, $0);
389 --i;
390 }
391 END {
392 for (i = 1500; i; --i)
393 printf("g\nk%d\n", i);
394 }' >in
395
396 atf_check -o file:exp "$(prog_db)" recno in
397 }
398
399 atf_test_case alternate_recno
400 alternate_recno_head()
401 {
402 atf_set "descr" "Checks recno database using alternating order entries"
403 }
404 alternate_recno_body()
405 {
406 TMPDIR="$(pwd)/db_dir"; export TMPDIR
407 mkdir ${TMPDIR}
408
409 echo $SEVEN_SEVEN |
410 awk ' {
411 for (i = 1; i < 1200; i += 2) {
412 if (i % 34)
413 s = substr($0, 1, i % 34);
414 else
415 s = substr($0, 1);
416 printf("input key %d: %s\n", i, s);
417 }
418 for (i = 2; i < 1200; i += 2) {
419 if (i % 34)
420 s = substr($0, 1, i % 34);
421 else
422 s = substr($0, 1);
423 printf("input key %d: %s\n", i, s);
424 }
425 exit;
426 }' >exp
427
428 cat exp |
429 awk 'BEGIN {
430 i = 1;
431 even = 0;
432 }
433 {
434 printf("p\nk%d\nd%s\n", i, $0);
435 i += 2;
436 if (i >= 1200) {
437 if (even == 1)
438 exit;
439 even = 1;
440 i = 2;
441 }
442 }
443 END {
444 for (i = 1; i < 1200; ++i)
445 printf("g\nk%d\n", i);
446 }' >in
447
448 atf_check "$(prog_db)" -o out recno in
449
450 sort -o exp exp
451 sort -o out out
452
453 cmp -s exp out || atf_fail "test failed"
454 }
455
456 h_delete()
457 {
458 TMPDIR="$(pwd)/db_dir"; export TMPDIR
459 mkdir ${TMPDIR}
460
461 type=$1
462
463 echo $SEVEN_SEVEN |
464 awk '{
465 for (i = 1; i <= 120; ++i)
466 printf("%05d: input key %d: %s\n", i, i, $0);
467 }' >exp
468
469 cat exp |
470 awk '{
471 printf("p\nk%d\nd%s\n", ++i, $0);
472 }
473 END {
474 printf("fR_NEXT\n");
475 for (i = 1; i <= 120; ++i)
476 printf("s\n");
477 printf("fR_CURSOR\ns\nkXX\n");
478 printf("r\n");
479 printf("fR_NEXT\ns\n");
480 printf("fR_CURSOR\ns\nk1\n");
481 printf("r\n");
482 printf("fR_FIRST\ns\n");
483 }' >in
484
485 # For btree, the records are ordered by the string representation
486 # of the key value. So sort the expected output file accordingly,
487 # and set the seek_last key to the last expected key value.
488
489 if [ "$type" = "btree" ] ; then
490 sed -e 's/kXX/k99/' < in > tmp
491 mv tmp in
492 sort -d -k4 < exp > tmp
493 mv tmp exp
494 echo $SEVEN_SEVEN |
495 awk '{
496 printf("%05d: input key %d: %s\n", 99, 99, $0);
497 printf("seq failed, no such key\n");
498 printf("%05d: input key %d: %s\n", 1, 1, $0);
499 printf("%05d: input key %d: %s\n", 10, 10, $0);
500 exit;
501 }' >> exp
502 else
503 # For recno, records are ordered by numerical key value. No sort
504 # is needed, but still need to set proper seek_last key value.
505 sed -e 's/kXX/k120/' < in > tmp
506 mv tmp in
507 echo $SEVEN_SEVEN |
508 awk '{
509 printf("%05d: input key %d: %s\n", 120, 120, $0);
510 printf("seq failed, no such key\n");
511 printf("%05d: input key %d: %s\n", 1, 1, $0);
512 printf("%05d: input key %d: %s\n", 2, 2, $0);
513 exit;
514 }' >> exp
515 fi
516
517 atf_check "$(prog_db)" -o out $type in
518 atf_check -o file:exp cat out
519 }
520
521 atf_test_case delete_btree
522 delete_btree_head()
523 {
524 atf_set "descr" "Checks removing records in btree database"
525 }
526 delete_btree_body()
527 {
528 h_delete btree
529 }
530
531 atf_test_case delete_recno
532 delete_recno_head()
533 {
534 atf_set "descr" "Checks removing records in recno database"
535 }
536 delete_recno_body()
537 {
538 h_delete recno
539 }
540
541 h_repeated()
542 {
543 local type="$1"
544 TMPDIR="$(pwd)/db_dir"; export TMPDIR
545 mkdir ${TMPDIR}
546
547 echo "" |
548 awk 'BEGIN {
549 for (i = 1; i <= 10; ++i) {
550 printf("p\nkkey1\nD/bin/sh\n");
551 printf("p\nkkey2\nD/bin/csh\n");
552 if (i % 8 == 0) {
553 printf("c\nkkey2\nD/bin/csh\n");
554 printf("c\nkkey1\nD/bin/sh\n");
555 printf("e\t%d of 10 (comparison)\n", i);
556 } else
557 printf("e\t%d of 10 \n", i);
558 printf("r\nkkey1\nr\nkkey2\n");
559 }
560 }' >in
561
562 $(prog_db) $type in
563 }
564
565 atf_test_case repeated_btree
566 repeated_btree_head()
567 {
568 atf_set "descr" \
569 "Checks btree database with repeated small keys and" \
570 "big data pairs. Makes sure that overflow pages are reused"
571 }
572 repeated_btree_body()
573 {
574 h_repeated btree
575 }
576
577 atf_test_case repeated_hash
578 repeated_hash_head()
579 {
580 atf_set "descr" \
581 "Checks hash database with repeated small keys and" \
582 "big data pairs. Makes sure that overflow pages are reused"
583 }
584 repeated_hash_body()
585 {
586 h_repeated hash
587 }
588
589 atf_test_case duplicate_btree
590 duplicate_btree_head()
591 {
592 atf_set "descr" "Checks btree database with duplicate keys"
593 }
594 duplicate_btree_body()
595 {
596 TMPDIR="$(pwd)/db_dir"; export TMPDIR
597 mkdir ${TMPDIR}
598
599 echo $SEVEN_SEVEN |
600 awk '{
601 for (i = 1; i <= 543; ++i)
602 printf("%05d: input key %d: %s\n", i, i, $0);
603 exit;
604 }' >exp
605
606 cat exp |
607 awk '{
608 if (i++ % 2)
609 printf("p\nkduplicatekey\nd%s\n", $0);
610 else
611 printf("p\nkunique%dkey\nd%s\n", i, $0);
612 }
613 END {
614 printf("o\n");
615 }' >in
616
617 atf_check -o file:exp -x "$(prog_db) -iflags=1 btree in | sort"
618 }
619
620 h_cursor_flags()
621 {
622 local type=$1
623 TMPDIR="$(pwd)/db_dir"; export TMPDIR
624 mkdir ${TMPDIR}
625
626 echo $SEVEN_SEVEN |
627 awk '{
628 for (i = 1; i <= 20; ++i)
629 printf("%05d: input key %d: %s\n", i, i, $0);
630 exit;
631 }' >exp
632
633 # Test that R_CURSOR doesn't succeed before cursor initialized
634 cat exp |
635 awk '{
636 if (i == 10)
637 exit;
638 printf("p\nk%d\nd%s\n", ++i, $0);
639 }
640 END {
641 printf("fR_CURSOR\nr\n");
642 printf("eR_CURSOR SHOULD HAVE FAILED\n");
643 }' >in
644
645 atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
646 atf_check -s ne:0 test -s out
647
648 cat exp |
649 awk '{
650 if (i == 10)
651 exit;
652 printf("p\nk%d\nd%s\n", ++i, $0);
653 }
654 END {
655 printf("fR_CURSOR\np\nk1\ndsome data\n");
656 printf("eR_CURSOR SHOULD HAVE FAILED\n");
657 }' >in
658
659 atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
660 atf_check -s ne:0 test -s out
661 }
662
663 atf_test_case cursor_flags_btree
664 cursor_flags_btree_head()
665 {
666 atf_set "descr" \
667 "Checks use of cursor flags without initialization in btree database"
668 }
669 cursor_flags_btree_body()
670 {
671 h_cursor_flags btree
672 }
673
674 atf_test_case cursor_flags_recno
675 cursor_flags_recno_head()
676 {
677 atf_set "descr" \
678 "Checks use of cursor flags without initialization in recno database"
679 }
680 cursor_flags_recno_body()
681 {
682 h_cursor_flags recno
683 }
684
685 atf_test_case reverse_order_recno
686 reverse_order_recno_head()
687 {
688 atf_set "descr" "Checks reverse order inserts in recno database"
689 }
690 reverse_order_recno_body()
691 {
692 TMPDIR="$(pwd)/db_dir"; export TMPDIR
693 mkdir ${TMPDIR}
694
695 echo $SEVEN_SEVEN |
696 awk '{
697 for (i = 1; i <= 779; ++i)
698 printf("%05d: input key %d: %s\n", i, i, $0);
699 exit;
700 }' >exp
701
702 cat exp |
703 awk '{
704 if (i == 0) {
705 i = 1;
706 printf("p\nk1\nd%s\n", $0);
707 printf("%s\n", "fR_IBEFORE");
708 } else
709 printf("p\nk1\nd%s\n", $0);
710 }
711 END {
712 printf("or\n");
713 }' >in
714
715 atf_check -o file:exp "$(prog_db)" recno in
716 }
717
718 atf_test_case small_page_btree
719 small_page_btree_head()
720 {
721 atf_set "descr" \
722 "Checks btree database with lots of keys and small page" \
723 "size: takes the first 20000 entries in the dictionary," \
724 "reverses them, and gives them each a small size data" \
725 "entry. Uses a small page size to make sure the btree" \
726 "split code gets hammered."
727 }
728 small_page_btree_body()
729 {
730 TMPDIR="$(pwd)/db_dir"; export TMPDIR
731 mkdir ${TMPDIR}
732
733 mdata=abcdefghijklmnopqrstuvwxy
734 echo $mdata |
735 awk '{ for (i = 1; i < 20001; ++i) print $0 }' >exp
736
737 for i in `sed 20000q $(dict) | rev`; do
738 echo p
739 echo k$i
740 echo d$mdata
741 echo g
742 echo k$i
743 done >in
744
745 atf_check -o file:exp "$(prog_db)" -i psize=512 btree in
746 }
747
748 h_byte_orders()
749 {
750 TMPDIR="$(pwd)/db_dir"; export TMPDIR
751 mkdir ${TMPDIR}
752
753 type=$1
754
755 sed 50q $(dict) >exp
756 for order in 1234 4321; do
757 for i in `sed 50q $(dict)`; do
758 echo p
759 echo k$i
760 echo d$i
761 echo S
762 echo g
763 echo k$i
764 done >in
765
766 atf_check -o file:exp "$(prog_db)" -ilorder=$order -f byte.file $type in
767
768 for i in `sed 50q $(dict)`; do
769 echo g
770 echo k$i
771 done >in
772
773 atf_check -o file:exp "$(prog_db)" -s -ilorder=$order -f byte.file $type in
774 done
775 }
776
777 atf_test_case byte_orders_btree
778 byte_orders_btree_head()
779 {
780 atf_set "descr" "Checks btree database using differing byte orders"
781 }
782 byte_orders_btree_body()
783 {
784 h_byte_orders btree
785 }
786
787 atf_test_case byte_orders_hash
788 byte_orders_hash_head()
789 {
790 atf_set "descr" "Checks hash database using differing byte orders"
791 }
792 byte_orders_hash_body()
793 {
794 h_byte_orders hash
795 }
796
797 h_bsize_ffactor()
798 {
799 bsize=$1
800 ffactor=$2
801
802 echo "bucketsize $bsize, fill factor $ffactor"
803 atf_check -o file:exp "$(prog_db)" "-ibsize=$bsize,\
804 ffactor=$ffactor,nelem=25000,cachesize=65536" hash in
805 }
806
807 atf_test_case bsize_ffactor
808 bsize_ffactor_head()
809 {
810 atf_set "timeout" "1800"
811 atf_set "descr" "Checks hash database with various" \
812 "bucketsizes and fill factors"
813 }
814 bsize_ffactor_body()
815 {
816 TMPDIR="$(pwd)/db_dir"; export TMPDIR
817 mkdir ${TMPDIR}
818
819 echo $SEVEN_SEVEN |
820 awk '{
821 for (i = 1; i <= 10000; ++i) {
822 if (i % 34)
823 s = substr($0, 1, i % 34);
824 else
825 s = substr($0, 1);
826 printf("%s\n", s);
827 }
828 exit;
829
830 }' >exp
831
832 sed 10000q $(dict) |
833 awk 'BEGIN {
834 ds="'$SEVEN_SEVEN'"
835 }
836 {
837 if (++i % 34)
838 s = substr(ds, 1, i % 34);
839 else
840 s = substr(ds, 1);
841 printf("p\nk%s\nd%s\n", $0, s);
842 }' >in
843
844 sed 10000q $(dict) |
845 awk '{
846 ++i;
847 printf("g\nk%s\n", $0);
848 }' >>in
849
850 h_bsize_ffactor 256 11
851 h_bsize_ffactor 256 14
852 h_bsize_ffactor 256 21
853
854 h_bsize_ffactor 512 21
855 h_bsize_ffactor 512 28
856 h_bsize_ffactor 512 43
857
858 h_bsize_ffactor 1024 43
859 h_bsize_ffactor 1024 57
860 h_bsize_ffactor 1024 85
861
862 h_bsize_ffactor 2048 85
863 h_bsize_ffactor 2048 114
864 h_bsize_ffactor 2048 171
865
866 h_bsize_ffactor 4096 171
867 h_bsize_ffactor 4096 228
868 h_bsize_ffactor 4096 341
869
870 h_bsize_ffactor 8192 341
871 h_bsize_ffactor 8192 455
872 h_bsize_ffactor 8192 683
873
874 h_bsize_ffactor 16384 341
875 h_bsize_ffactor 16384 455
876 h_bsize_ffactor 16384 683
877
878 h_bsize_ffactor 32768 341
879 h_bsize_ffactor 32768 455
880 h_bsize_ffactor 32768 683
881
882 h_bsize_ffactor 65536 341
883 h_bsize_ffactor 65536 455
884 h_bsize_ffactor 65536 683
885 }
886
887 # This tests 64K block size addition/removal
888 atf_test_case four_char_hash
889 four_char_hash_head()
890 {
891 atf_set "descr" \
892 "Checks hash database with 4 char key and" \
893 "value insert on a 65536 bucket size"
894 }
895 four_char_hash_body()
896 {
897 TMPDIR="$(pwd)/db_dir"; export TMPDIR
898 mkdir ${TMPDIR}
899
900 cat >in <<EOF
901 p
902 k1234
903 d1234
904 r
905 k1234
906 EOF
907
908 atf_check "$(prog_db)" -i bsize=65536 hash in
909 }
910
911
912 atf_test_case bsize_torture
913 bsize_torture_head()
914 {
915 atf_set "timeout" "36000"
916 atf_set "descr" "Checks hash database with various bucket sizes"
917 }
918 bsize_torture_body()
919 {
920 TMPDIR="$(pwd)/db_dir"; export TMPDIR
921 mkdir ${TMPDIR}
922 for i in 2048 4096 8192 16384 32768 65536
923 do
924 atf_check "$(prog_lfsr)" $i
925 done
926 }
927
928 atf_test_case btree_weird_page_split
929 btree_weird_page_split_head()
930 {
931 atf_set "descr" \
932 "Test for a weird page split condition where an insertion " \
933 "into index 0 of a page that would cause the new item to " \
934 "be the only item on the left page results in index 0 of " \
935 "the right page being erroneously skipped; this only " \
936 "happens with one particular key+data length for each page size."
937 atf_set "timeout" "900"
938 }
939 btree_weird_page_split_body()
940 {
941 for psize in 512 1024 2048 4096 8192; do
942 echo " page size $psize"
943 kdsizes=`awk 'BEGIN {
944 psize = '$psize'; hsize = int(psize/2);
945 for (kdsize = hsize-40; kdsize <= hsize; kdsize++) {
946 print kdsize;
947 }
948 }' /dev/null`
949
950 # Use a series of keylen+datalen values in the right
951 # neighborhood to find the one that triggers the bug.
952 # We could compute the exact size that triggers the
953 # bug but this additional fuzz may be useful.
954
955 # Insert keys in reverse order to maximize the chances
956 # for a split on index 0.
957
958 for kdsize in $kdsizes; do
959 awk 'BEGIN {
960 kdsize = '$kdsize';
961 for (i = 8; i-- > 0; ) {
962 s = sprintf("a%03d:%09d", i, kdsize);
963 for (j = 0; j < kdsize-20; j++) {
964 s = s "x";
965 }
966 printf("p\nka%03d\nd%s\n", i, s);
967 }
968 print "o";
969 }' /dev/null > in
970 sed -n 's/^d//p' in | sort > exp
971 atf_check -o file:exp \
972 "$(prog_db)" -i psize=$psize btree in
973 done
974 done
975 }
976
977 # Extremely tricky test attempting to replicate some unusual database
978 # corruption seen in the field: pieces of the database becoming
979 # inaccessible to random access, sequential access, or both. The
980 # hypothesis is that at least some of these are triggered by the bug
981 # in page splits on index 0 with a particular exact keylen+datalen.
982 # (See Test 40.) For psize=4096, this size is exactly 2024.
983
984 # The order of operations here relies on very specific knowledge of
985 # the internals of the btree access method in order to place records
986 # at specific offsets in a page and to create certain keys on internal
987 # pages. The to-be-split page immediately prior to the bug-triggering
988 # split has the following properties:
989 #
990 # * is not the leftmost leaf page
991 # * key on the parent page is compares less than the key of the item
992 # on index 0
993 # * triggering record's key also compares greater than the key on the
994 # parent page
995
996 # Additionally, we prime the mpool LRU chain so that the head page on
997 # the chain has the following properties:
998 #
999 # * record at index 0 is located where it will not get overwritten by
1000 # items written to the right-hand page during the split
1001 # * key of the record at index 0 compares less than the key of the
1002 # bug-triggering record
1003
1004 # If the page-split bug exists, this test appears to create a database
1005 # where some records are inaccessible to a search, but still remain in
1006 # the file and are accessible by sequential traversal. At least one
1007 # record gets duplicated out of sequence.
1008
1009 atf_test_case btree_tricky_page_split
1010 btree_tricky_page_split_head()
1011 {
1012 atf_set "descr" \
1013 "btree: no unsearchables due to page split on index 0"
1014 }
1015 btree_tricky_page_split_body()
1016 {
1017 list=`(for i in a b c d; do
1018 for j in 990 998 999; do
1019 echo g ${i}${j} 1024
1020 done
1021 done;
1022 echo g y997 2014
1023 for i in y z; do
1024 for j in 998 999; do
1025 echo g ${i}${j} 1024
1026 done
1027 done)`
1028 # Exact number for trigger condition accounts for newlines
1029 # retained by dbtest with -ofile but not without; we use
1030 # -ofile, so count newlines. keylen=5,datalen=5+2014 for
1031 # psize=4096 here.
1032 (cat - <<EOF
1033 p z999 1024
1034 p z998 1024
1035 p y999 1024
1036 p y990 1024
1037 p d999 1024
1038 p d990 1024
1039 p c999 1024
1040 p c990 1024
1041 p b999 1024
1042 p b990 1024
1043 p a999 1024
1044 p a990 1024
1045 p y998 1024
1046 r y990
1047 p d998 1024
1048 p d990 1024
1049 p c998 1024
1050 p c990 1024
1051 p b998 1024
1052 p b990 1024
1053 p a998 1024
1054 p a990 1024
1055 p y997 2014
1056 S
1057 o
1058 EOF
1059 echo "$list") |
1060 # awk script input:
1061 # {p|g|r} key [datasize]
1062 awk '/^[pgr]/{
1063 printf("%s\nk%s\n", $1, $2);
1064 }
1065 /^p/{
1066 s = $2;
1067 for (i = 0; i < $3; i++) {
1068 s = s "x";
1069 }
1070 printf("d%s\n", s);
1071 }
1072 !/^[pgr]/{
1073 print $0;
1074 }' > in
1075 (echo "$list"; echo "$list") | awk '{
1076 s = $2;
1077 for (i = 0; i < $3; i++) {
1078 s = s "x";
1079 }
1080 print s;
1081 }' > exp
1082 atf_check -o file:exp \
1083 "$(prog_db)" -i psize=4096 btree in
1084 }
1085
1086 atf_test_case btree_recursive_traversal
1087 btree_recursive_traversal_head()
1088 {
1089 atf_set "descr" \
1090 "btree: Test for recursive traversal successfully " \
1091 "retrieving records that are inaccessible to normal " \
1092 "sequential 'sibling-link' traversal. This works by " \
1093 "unlinking a few leaf pages but leaving their parent " \
1094 "links intact. To verify that the unlink actually makes " \
1095 "records inaccessible, the test first uses 'o' to do a " \
1096 "normal sequential traversal, followed by 'O' to do a " \
1097 "recursive traversal."
1098 }
1099 btree_recursive_traversal_body()
1100 {
1101 fill="abcdefghijklmnopqrstuvwxyzy"
1102 script='{
1103 for (i = 0; i < 20000; i++) {
1104 printf("p\nkAA%05d\nd%05d%s\n", i, i, $0);
1105 }
1106 print "u";
1107 print "u";
1108 print "u";
1109 print "u";
1110 }'
1111 (echo $fill | awk "$script"; echo o) > in1
1112 echo $fill |
1113 awk '{
1114 for (i = 0; i < 20000; i++) {
1115 if (i >= 5 && i <= 40)
1116 continue;
1117 printf("%05d%s\n", i, $0);
1118 }
1119 }' > exp1
1120 atf_check -o file:exp1 \
1121 "$(prog_db)" -i psize=512 btree in1
1122 echo $fill |
1123 awk '{
1124 for (i = 0; i < 20000; i++) {
1125 printf("%05d%s\n", i, $0);
1126 }
1127 }' > exp2
1128 (echo $fill | awk "$script"; echo O) > in2
1129 atf_check -o file:exp2 \
1130 "$(prog_db)" -i psize=512 btree in2
1131 }
1132
1133 atf_test_case btree_byteswap_unaligned_access_bksd
1134 btree_byteswap_unaligned_access_bksd_head()
1135 {
1136 atf_set "descr" \
1137 "btree: big key, small data, byteswap unaligned access"
1138 }
1139 btree_byteswap_unaligned_access_bksd_body()
1140 {
1141 (echo foo; echo bar) |
1142 awk '{
1143 s = $0
1144 for (i = 0; i < 488; i++) {
1145 s = s "x";
1146 }
1147 printf("p\nk%s\ndx\n", s);
1148 }' > in
1149 for order in 1234 4321; do
1150 atf_check \
1151 "$(prog_db)" -o out -i psize=512,lorder=$order btree in
1152 done
1153 }
1154
1155 atf_test_case btree_byteswap_unaligned_access_skbd
1156 btree_byteswap_unaligned_access_skbd_head()
1157 {
1158 atf_set "descr" \
1159 "btree: small key, big data, byteswap unaligned access"
1160 }
1161 btree_byteswap_unaligned_access_skbd_body()
1162 {
1163 # 484 = 512 - 20 (header) - 7 ("foo1234") - 1 (newline)
1164 (echo foo1234; echo bar1234) |
1165 awk '{
1166 s = $0
1167 for (i = 0; i < 484; i++) {
1168 s = s "x";
1169 }
1170 printf("p\nk%s\nd%s\n", $0, s);
1171 }' > in
1172 for order in 1234 4321; do
1173 atf_check \
1174 "$(prog_db)" -o out -i psize=512,lorder=$order btree in
1175 done
1176 }
1177
1178 atf_test_case btree_known_byte_order
1179 btree_known_byte_order_head()
1180 {
1181 atf_set "descr" \
1182 "btree: small key, big data, known byte order"
1183 }
1184 btree_known_byte_order_body()
1185 {
1186 local a="-i psize=512,lorder="
1187
1188 (echo foo1234; echo bar1234) |
1189 awk '{
1190 s = $0
1191 for (i = 0; i < 484; i++) {
1192 s = s "x";
1193 }
1194 printf("%s\n", s);
1195 }' > exp
1196 (echo foo1234; echo bar1234) |
1197 awk '{
1198 s = $0
1199 for (i = 0; i < 484; i++) {
1200 s = s "x";
1201 }
1202 printf("p\nk%s\nd%s\n", $0, s);
1203 }' > in1
1204 for order in 1234 4321; do
1205 atf_check \
1206 "$(prog_db)" -f out.$order $a$order btree in1
1207 done
1208 (echo g; echo kfoo1234; echo g; echo kbar1234) > in2
1209 for order in 1234 4321; do
1210 atf_check -o file:exp \
1211 "$(prog_db)" -s -f out.$order $a$order btree in2
1212 done
1213 }
1214
1215 atf_init_test_cases()
1216 {
1217 atf_add_test_case small_btree
1218 atf_add_test_case small_hash
1219 atf_add_test_case small_recno
1220 atf_add_test_case medium_btree
1221 atf_add_test_case medium_hash
1222 atf_add_test_case medium_recno
1223 atf_add_test_case big_btree
1224 atf_add_test_case big_hash
1225 atf_add_test_case big_recno
1226 atf_add_test_case random_recno
1227 atf_add_test_case reverse_recno
1228 atf_add_test_case alternate_recno
1229 atf_add_test_case delete_btree
1230 atf_add_test_case delete_recno
1231 atf_add_test_case repeated_btree
1232 atf_add_test_case repeated_hash
1233 atf_add_test_case duplicate_btree
1234 atf_add_test_case cursor_flags_btree
1235 atf_add_test_case cursor_flags_recno
1236 atf_add_test_case reverse_order_recno
1237 atf_add_test_case small_page_btree
1238 atf_add_test_case byte_orders_btree
1239 atf_add_test_case byte_orders_hash
1240 atf_add_test_case bsize_ffactor
1241 atf_add_test_case four_char_hash
1242 atf_add_test_case bsize_torture
1243 atf_add_test_case btree_weird_page_split
1244 atf_add_test_case btree_tricky_page_split
1245 atf_add_test_case btree_recursive_traversal
1246 atf_add_test_case btree_byteswap_unaligned_access_bksd
1247 atf_add_test_case btree_byteswap_unaligned_access_skbd
1248 atf_add_test_case btree_known_byte_order
1249 }
1250