1# SPDX-License-Identifier: BSD-3-Clause 2# Copyright (C) 2018 Intel Corporation 3# All rights reserved. 4# 5 6source $rootdir/test/nvmf/common.sh 7 8function migration_tc2_cleanup_nvmf_tgt() { 9 process_shm --id $NVMF_APP_SHM_ID 10 nvmftestfini 11} 12 13function migration_tc2_cleanup_vhost_config() { 14 timing_enter migration_tc2_cleanup_vhost_config 15 16 notice "Shutting down all VMs" 17 vm_shutdown_all 18 19 notice "Removing vhost devices & controllers via RPC ..." 20 # Delete bdev first to remove all LUNs and SCSI targets 21 $rpc_0 bdev_nvme_detach_controller Nvme0 22 $rpc_0 vhost_delete_controller $incoming_vm_ctrlr 23 24 $rpc_1 bdev_nvme_detach_controller Nvme0 25 $rpc_1 vhost_delete_controller $target_vm_ctrlr 26 27 notice "killing vhost app" 28 vhost_kill 0 29 vhost_kill 1 30 31 unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr 32 unset -v rpc_0 rpc_1 33 34 migration_tc2_cleanup_nvmf_tgt 35 36 timing_exit migration_tc2_cleanup_vhost_config 37} 38 39function migration_tc2_configure_vhost() { 40 timing_enter migration_tc2_configure_vhost 41 42 TEST_TRANSPORT=${TEST_TRANSPORT:-tcp} 43 NET_TYPE=${NET_TYPE:-phy-fallback} \ 44 TEST_MODE=iso nvmftestinit 45 46 # Those are global intentionally - they will be unset in cleanup handler 47 48 incoming_vm=1 49 target_vm=2 50 incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm 51 target_vm_ctrlr=naa.VhostScsi0.$target_vm 52 53 rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" 54 rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" 55 56 # Run nvmf_tgt and two vhost instances: 57 # nvmf_tgt uses core id 2 (-m 0x4) 58 # First uses core id 0 59 # Second uses core id 1 60 # This force to use VM 1 and 2. 61 timing_enter start_nvmf_tgt 62 notice "Running nvmf_tgt..." 63 nvmfappstart -s 512 -m 0x4 --wait-for-rpc 64 # Override the trap set in place via nvmfappstart() 65 trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT 66 rpc_cmd framework_start_init 67 rpc_cmd nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 68 mapfile -t json < <("$rootdir/scripts/gen_nvme.sh") 69 rpc_cmd load_subsystem_config -j "'${json[*]}'" 70 timing_exit start_nvmf_tgt 71 72 vhost_run -n 0 -a "-m 0x1 -s 512 -u" 73 vhost_run -n 1 -a "-m 0x2 -s 512 -u" 74 75 local nvmf_target_ip=$NVMF_FIRST_TARGET_IP 76 77 if [[ -z "$nvmf_target_ip" ]]; then 78 fail "no NIC for nvmf target" 79 fi 80 81 notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..." 82 83 # Construct shared bdevs and controllers 84 rpc_cmd nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 85 rpc_cmd nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1 86 rpc_cmd nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t "$TEST_TRANSPORT" -a $nvmf_target_ip -s 4420 87 88 $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t "$TEST_TRANSPORT" -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1" 89 $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr 90 $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1 91 92 $rpc_1 bdev_nvme_attach_controller -b Nvme0 -t "$TEST_TRANSPORT" -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1" 93 $rpc_1 vhost_create_scsi_controller $target_vm_ctrlr 94 $rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1 95 96 notice "Setting up VMs" 97 vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \ 98 --migrate-to=$target_vm --memory=1024 --vhost-name=0 99 vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \ 100 --vhost-name=1 101 102 # Run everything 103 vm_run $incoming_vm $target_vm 104 105 # Wait only for incoming VM, as target is waiting for migration 106 vm_wait_for_boot 300 $incoming_vm 107 108 notice "Configuration done" 109 110 timing_exit migration_tc2_configure_vhost 111} 112 113function migration_tc2_error_cleanup() { 114 vm_kill_all 115 migration_tc2_cleanup_nvmf_tgt 116 migration_tc2_cleanup_vhost_config 117 notice "Migration TC2 FAILED" 118} 119 120function migration_tc2() { 121 # Use 2 VMs: 122 # incoming VM - the one we want to migrate 123 # targe VM - the one which will accept migration 124 # VM uses 1 GiB memory, here we use light IO workload to keep number of dirty pages 125 # is in low rate of VM's memory, see https://github.com/spdk/spdk/issues/2805. 126 local job_file="$testdir/migration-tc2.job" 127 local log_file 128 log_file="/root/$(basename ${job_file%%.*}).log" 129 130 migration_tc2_configure_vhost 131 132 # Run fio before migration 133 notice "Starting FIO" 134 vm_check_scsi_location $incoming_vm 135 run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)" 136 137 # Wait a while to let the FIO time to issue some IO 138 sleep 5 139 140 # Check if fio is still running before migration 141 if ! is_fio_running $incoming_vm; then 142 vm_exec $incoming_vm "cat $log_file" 143 error "FIO is not running before migration: process crashed or finished too early" 144 fi 145 146 vm_migrate $incoming_vm 147 sleep 3 148 149 # Check if fio is still running after migration 150 if ! is_fio_running $target_vm; then 151 vm_exec $target_vm "cat $log_file" 152 error "FIO is not running after migration: process crashed or finished too early" 153 fi 154 155 notice "Waiting for fio to finish" 156 local timeout=20 157 while is_fio_running $target_vm; do 158 sleep 1 159 echo -n "." 160 if ((timeout-- == 0)); then 161 error "timeout while waiting for FIO!" 162 fi 163 done 164 165 notice "Fio result is:" 166 vm_exec $target_vm "cat $log_file" 167 168 migration_tc2_cleanup_vhost_config 169 notice "Migration TC2 SUCCESS" 170} 171