xref: /spdk/test/vhost/migration/migration-tc2.sh (revision c680e3a05b1a903c18bf3f75b732765607126f45)
1#  SPDX-License-Identifier: BSD-3-Clause
2#  Copyright (C) 2018 Intel Corporation
3#  All rights reserved.
4#
5
6source $rootdir/test/nvmf/common.sh
7
8function migration_tc2_cleanup_nvmf_tgt() {
9	process_shm --id $NVMF_APP_SHM_ID
10	nvmftestfini
11}
12
13function migration_tc2_cleanup_vhost_config() {
14	timing_enter migration_tc2_cleanup_vhost_config
15
16	notice "Shutting down all VMs"
17	vm_shutdown_all
18
19	notice "Removing vhost devices & controllers via RPC ..."
20	# Delete bdev first to remove all LUNs and SCSI targets
21	$rpc_0 bdev_nvme_detach_controller Nvme0
22	$rpc_0 vhost_delete_controller $incoming_vm_ctrlr
23
24	$rpc_1 bdev_nvme_detach_controller Nvme0
25	$rpc_1 vhost_delete_controller $target_vm_ctrlr
26
27	notice "killing vhost app"
28	vhost_kill 0
29	vhost_kill 1
30
31	unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
32	unset -v rpc_0 rpc_1
33
34	migration_tc2_cleanup_nvmf_tgt
35
36	timing_exit migration_tc2_cleanup_vhost_config
37}
38
39function migration_tc2_configure_vhost() {
40	timing_enter migration_tc2_configure_vhost
41
42	# HACK: Make sure we stop detecting rdma-capable NICs on mlx
43	modprobe mlx5_ib || :
44	TEST_TRANSPORT=rdma TEST_MODE=iso nvmftestinit
45
46	# Those are global intentionally - they will be unset in cleanup handler
47
48	incoming_vm=1
49	target_vm=2
50	incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
51	target_vm_ctrlr=naa.VhostScsi0.$target_vm
52
53	rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
54	rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
55
56	# Run nvmf_tgt and two vhost instances:
57	# nvmf_tgt uses core id 2 (-m 0x4)
58	# First uses core id 0
59	# Second uses core id 1
60	# This force to use VM 1 and 2.
61	timing_enter start_nvmf_tgt
62	notice "Running nvmf_tgt..."
63	nvmfappstart -s 512 -m 0x4 --wait-for-rpc
64	# Override the trap set in place via nvmfappstart()
65	trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
66	rpc_cmd framework_start_init
67	rpc_cmd nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
68	mapfile -t json < <("$rootdir/scripts/gen_nvme.sh")
69	rpc_cmd load_subsystem_config -j "'${json[*]}'"
70	timing_exit start_nvmf_tgt
71
72	vhost_run -n 0 -a "-m 0x1 -s 512 -u"
73	vhost_run -n 1 -a "-m 0x2 -s 512 -u"
74
75	local rdma_ip_list
76	local nvmf_target_ip
77	rdma_ip_list=$(get_available_rdma_ips)
78	nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
79
80	if [[ -z "$nvmf_target_ip" ]]; then
81		fail "no NIC for nvmf target"
82	fi
83
84	notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
85
86	# Construct shared bdevs and controllers
87	rpc_cmd nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
88	rpc_cmd nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
89	rpc_cmd nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
90
91	$rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
92	$rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
93	$rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
94
95	$rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
96	$rpc_1 vhost_create_scsi_controller $target_vm_ctrlr
97	$rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1
98
99	notice "Setting up VMs"
100	vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
101		--migrate-to=$target_vm --memory=1024 --vhost-name=0
102	vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
103		--vhost-name=1
104
105	# Run everything
106	vm_run $incoming_vm $target_vm
107
108	# Wait only for incoming VM, as target is waiting for migration
109	vm_wait_for_boot 300 $incoming_vm
110
111	notice "Configuration done"
112
113	timing_exit migration_tc2_configure_vhost
114}
115
116function migration_tc2_error_cleanup() {
117	vm_kill_all
118	migration_tc2_cleanup_nvmf_tgt
119	migration_tc2_cleanup_vhost_config
120	notice "Migration TC2 FAILED"
121}
122
123function migration_tc2() {
124	# Use 2 VMs:
125	# incoming VM - the one we want to migrate
126	# targe VM - the one which will accept migration
127	local job_file="$testdir/migration-tc2.job"
128	local log_file
129	log_file="/root/$(basename ${job_file%%.*}).log"
130
131	migration_tc2_configure_vhost
132
133	# Run fio before migration
134	notice "Starting FIO"
135	vm_check_scsi_location $incoming_vm
136	run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
137
138	# Wait a while to let the FIO time to issue some IO
139	sleep 5
140
141	# Check if fio is still running before migration
142	if ! is_fio_running $incoming_vm; then
143		vm_exec $incoming_vm "cat $log_file"
144		error "FIO is not running before migration: process crashed or finished too early"
145	fi
146
147	vm_migrate $incoming_vm
148	sleep 3
149
150	# Check if fio is still running after migration
151	if ! is_fio_running $target_vm; then
152		vm_exec $target_vm "cat $log_file"
153		error "FIO is not running after migration: process crashed or finished too early"
154	fi
155
156	notice "Waiting for fio to finish"
157	local timeout=20
158	while is_fio_running $target_vm; do
159		sleep 1
160		echo -n "."
161		if ((timeout-- == 0)); then
162			error "timeout while waiting for FIO!"
163		fi
164	done
165
166	notice "Fio result is:"
167	vm_exec $target_vm "cat $log_file"
168
169	migration_tc2_cleanup_vhost_config
170	notice "Migration TC2 SUCCESS"
171}
172