xref: /spdk/test/vhost/migration/migration-tc2.sh (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1source $rootdir/test/nvmf/common.sh
2
3function migration_tc2_cleanup_nvmf_tgt() {
4	process_shm --id $NVMF_APP_SHM_ID
5	nvmftestfini
6}
7
8function migration_tc2_cleanup_vhost_config() {
9	timing_enter migration_tc2_cleanup_vhost_config
10
11	notice "Shutting down all VMs"
12	vm_shutdown_all
13
14	notice "Removing vhost devices & controllers via RPC ..."
15	# Delete bdev first to remove all LUNs and SCSI targets
16	$rpc_0 bdev_nvme_detach_controller Nvme0
17	$rpc_0 vhost_delete_controller $incoming_vm_ctrlr
18
19	$rpc_1 delete_nvme_controller Nvme0
20	$rpc_1 vhost_delete_controller $target_vm_ctrlr
21
22	notice "killing vhost app"
23	vhost_kill 0
24	vhost_kill 1
25
26	unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
27	unset -v rpc_0 rpc_1
28
29	migration_tc2_cleanup_nvmf_tgt
30
31	timing_exit migration_tc2_cleanup_vhost_config
32}
33
34function migration_tc2_configure_vhost() {
35	timing_enter migration_tc2_configure_vhost
36
37	# HACK: Make sure we stop detecting rdma-capable NICs on mlx
38	modprobe mlx5_ib || :
39	TEST_TRANSPORT=rdma TEST_MODE=iso nvmftestinit
40
41	# Those are global intentionally - they will be unset in cleanup handler
42
43	incoming_vm=1
44	target_vm=2
45	incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
46	target_vm_ctrlr=naa.VhostScsi0.$target_vm
47
48	rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
49	rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
50
51	# Run nvmf_tgt and two vhost instances:
52	# nvmf_tgt uses core id 2 (-m 0x4)
53	# First uses core id 0
54	# Second uses core id 1
55	# This force to use VM 1 and 2.
56	timing_enter start_nvmf_tgt
57	notice "Running nvmf_tgt..."
58	nvmfappstart -s 512 -m 0x4 --wait-for-rpc
59	# Override the trap set in place via nvmfappstart()
60	trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
61	rpc_cmd framework_start_init
62	rpc_cmd nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
63	mapfile -t json < <("$rootdir/scripts/gen_nvme.sh")
64	rpc_cmd load_subsystem_config -j "'${json[*]}'"
65	timing_exit start_nvmf_tgt
66
67	vhost_run -n 0 -a "-m 0x1 -s 512 -u"
68	vhost_run -n 1 -a "-m 0x2 -s 512 -u"
69
70	local rdma_ip_list
71	local nvmf_target_ip
72	rdma_ip_list=$(get_available_rdma_ips)
73	nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
74
75	if [[ -z "$nvmf_target_ip" ]]; then
76		fail "no NIC for nvmf target"
77	fi
78
79	notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
80
81	# Construct shared bdevs and controllers
82	rpc_cmd nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
83	rpc_cmd nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
84	rpc_cmd nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
85
86	$rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
87	$rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
88	$rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
89
90	$rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
91	$rpc_1 vhost_create_scsi_controller $target_vm_ctrlr
92	$rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1
93
94	notice "Setting up VMs"
95	vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
96		--migrate-to=$target_vm --memory=1024 --vhost-name=0
97	vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
98		--vhost-name=1
99
100	# Run everything
101	vm_run $incoming_vm $target_vm
102
103	# Wait only for incoming VM, as target is waiting for migration
104	vm_wait_for_boot 300 $incoming_vm
105
106	notice "Configuration done"
107
108	timing_exit migration_tc2_configure_vhost
109}
110
111function migration_tc2_error_cleanup() {
112	vm_kill_all
113	migration_tc2_cleanup_nvmf_tgt
114	migration_tc2_cleanup_vhost_config
115	notice "Migration TC2 FAILED"
116}
117
118function migration_tc2() {
119	# Use 2 VMs:
120	# incoming VM - the one we want to migrate
121	# targe VM - the one which will accept migration
122	local job_file="$testdir/migration-tc2.job"
123	local log_file
124	log_file="/root/$(basename ${job_file%%.*}).log"
125
126	migration_tc2_configure_vhost
127
128	# Run fio before migration
129	notice "Starting FIO"
130	vm_check_scsi_location $incoming_vm
131	run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
132
133	# Wait a while to let the FIO time to issue some IO
134	sleep 5
135
136	# Check if fio is still running before migration
137	if ! is_fio_running $incoming_vm; then
138		vm_exec $incoming_vm "cat $log_file"
139		error "FIO is not running before migration: process crashed or finished too early"
140	fi
141
142	vm_migrate $incoming_vm
143	sleep 3
144
145	# Check if fio is still running after migration
146	if ! is_fio_running $target_vm; then
147		vm_exec $target_vm "cat $log_file"
148		error "FIO is not running after migration: process crashed or finished too early"
149	fi
150
151	notice "Waiting for fio to finish"
152	local timeout=20
153	while is_fio_running $target_vm; do
154		sleep 1
155		echo -n "."
156		if ((timeout-- == 0)); then
157			error "timeout while waiting for FIO!"
158		fi
159	done
160
161	notice "Fio result is:"
162	vm_exec $target_vm "cat $log_file"
163
164	migration_tc2_cleanup_vhost_config
165	notice "Migration TC2 SUCCESS"
166}
167