xref: /spdk/test/blobfs/rocksdb/rocksdb.sh (revision 19ce0392de8615937480713f70c52c02806fb61b)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2017 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../../..)
8source $rootdir/test/common/autotest_common.sh
9
10sanitize_results() {
11	process_core
12	[[ -d $RESULTS_DIR ]] && chmod 644 "$RESULTS_DIR/"*
13}
14
15dump_db_bench_on_err() {
16	# Fetch std dump of the last run_step that might have failed
17	[[ -e $db_bench ]] || return 0
18
19	# Dump entire *.txt to stderr to clearly see what might have failed
20	xtrace_disable
21	mapfile -t step_map < "$db_bench"
22	printf '%s\n' "${step_map[@]/#/* $step (FAILED)}" >&2
23	xtrace_restore
24}
25
26run_step() {
27	if [ -z "$1" ]; then
28		echo run_step called with no parameter
29		exit 1
30	fi
31
32	cat <<- EOL >> "$1"_flags.txt
33		--spdk=$ROCKSDB_CONF
34		--spdk_bdev=Nvme0n1
35		--spdk_cache_size=$CACHE_SIZE
36	EOL
37
38	db_bench=$1_db_bench.txt
39	echo -n Start $1 test phase...
40	time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$db_bench"
41	DB_BENCH_FILE=$(grep -o '/dev/shm/\(\w\|\.\|\d\|/\)*' "$db_bench")
42	gzip $DB_BENCH_FILE
43	mv $DB_BENCH_FILE.gz "$1"_trace.gz
44	chmod 644 "$1"_trace.gz
45	echo done.
46}
47
48run_bsdump() {
49	# 0x80 is the bit mask for BlobFS tracepoints
50	$SPDK_EXAMPLE_DIR/blobcli -j $ROCKSDB_CONF -b Nvme0n1 --tpoint-group blobfs &> bsdump.txt
51}
52
53# In the autotest job, we copy the rocksdb source to just outside the spdk directory.
54DB_BENCH_DIR="$rootdir/../rocksdb"
55DB_BENCH=$DB_BENCH_DIR/db_bench
56ROCKSDB_CONF=$testdir/rocksdb.json
57
58if [ ! -e $DB_BENCH_DIR ]; then
59	echo $DB_BENCH_DIR does not exist
60	false
61fi
62
63timing_enter db_bench_build
64
65pushd $DB_BENCH_DIR
66if [ -z "$SKIP_GIT_CLEAN" ]; then
67	git clean -x -f -d
68fi
69
70EXTRA_CXXFLAGS=""
71GCC_VERSION=$(cc -dumpversion | cut -d. -f1)
72if ((GCC_VERSION >= 9)) && ((GCC_VERSION < 11)); then
73	EXTRA_CXXFLAGS+="-Wno-deprecated-copy -Wno-pessimizing-move -Wno-error=stringop-truncation"
74elif ((GCC_VERSION >= 11)); then
75	EXTRA_CXXFLAGS+="-Wno-error=range-loop-construct"
76fi
77
78$MAKE db_bench $MAKEFLAGS $MAKECONFIG DEBUG_LEVEL=0 SPDK_DIR=../spdk EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS"
79popd
80
81timing_exit db_bench_build
82
83$rootdir/scripts/gen_nvme.sh --json-with-subsystems -n 1 > $ROCKSDB_CONF
84
85trap 'dump_db_bench_on_err; run_bsdump || :; rm -f $ROCKSDB_CONF; sanitize_results; exit 1' SIGINT SIGTERM EXIT
86
87if [ -z "$SKIP_MKFS" ]; then
88	# 0x80 is the bit mask for BlobFS tracepoints
89	run_test "blobfs_mkfs" $rootdir/test/blobfs/mkfs/mkfs $ROCKSDB_CONF Nvme0n1 --tpoint-group blobfs
90fi
91
92mkdir -p $output_dir/rocksdb
93RESULTS_DIR=$output_dir/rocksdb
94if [ $RUN_NIGHTLY -eq 1 ]; then
95	CACHE_SIZE=4096
96	DURATION=60
97	NUM_KEYS=100000000
98else
99	CACHE_SIZE=2048
100	DURATION=20
101	NUM_KEYS=20000000
102fi
103# Make sure that there's enough memory available for the mempool. Unfortunately,
104# db_bench doesn't seem to allocate memory from all numa nodes since all of it
105# comes exclusively from node0. With that in mind, try to allocate CACHE_SIZE
106# + some_overhead (1G) of pages but only on node0 to make sure that we end up
107# with the right amount not allowing setup.sh to split it by using the global
108# nr_hugepages setting. Instead of bypassing it completely, we use it to also
109# get the right size of hugepages.
110HUGEMEM=$((CACHE_SIZE + 2048)) HUGENODE=0 \
111	"$rootdir/scripts/setup.sh"
112
113cd $RESULTS_DIR
114cp $testdir/common_flags.txt insert_flags.txt
115cat << EOL >> insert_flags.txt
116--benchmarks=fillseq
117--threads=1
118--disable_wal=1
119--use_existing_db=0
120--num=$NUM_KEYS
121EOL
122
123cp $testdir/common_flags.txt randread_flags.txt
124cat << EOL >> randread_flags.txt
125--benchmarks=readrandom
126--threads=16
127--duration=$DURATION
128--disable_wal=1
129--use_existing_db=1
130--num=$NUM_KEYS
131EOL
132
133cp $testdir/common_flags.txt overwrite_flags.txt
134cat << EOL >> overwrite_flags.txt
135--benchmarks=overwrite
136--threads=1
137--duration=$DURATION
138--disable_wal=1
139--use_existing_db=1
140--num=$NUM_KEYS
141EOL
142
143cp $testdir/common_flags.txt readwrite_flags.txt
144cat << EOL >> readwrite_flags.txt
145--benchmarks=readwhilewriting
146--threads=4
147--duration=$DURATION
148--disable_wal=1
149--use_existing_db=1
150--num=$NUM_KEYS
151EOL
152
153cp $testdir/common_flags.txt writesync_flags.txt
154cat << EOL >> writesync_flags.txt
155--benchmarks=overwrite
156--threads=1
157--duration=$DURATION
158--disable_wal=0
159--use_existing_db=1
160--sync=1
161--num=$NUM_KEYS
162EOL
163
164run_test "rocksdb_insert" run_step insert
165run_test "rocksdb_overwrite" run_step overwrite
166run_test "rocksdb_readwrite" run_step readwrite
167run_test "rocksdb_writesync" run_step writesync
168run_test "rocksdb_randread" run_step randread
169
170trap - SIGINT SIGTERM EXIT
171
172run_bsdump
173rm -f $ROCKSDB_CONF
174
175[[ -z "$SKIP_GIT_CLEAN" ]] && git -C "$DB_BENCH_DIR" clean -xfd
176
177sanitize_results
178