1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "CUnit/Basic.h" 37 #include "spdk_cunit.h" 38 #include "spdk/thread.h" 39 #include "spdk_internal/mock.h" 40 #include "common/lib/test_env.c" 41 #include "unit/lib/json_mock.c" 42 43 #include "vhost/vhost.c" 44 45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id, 46 uint16_t last_avail_idx, uint16_t last_used_idx), 0); 47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id, 48 uint16_t *last_avail_idx, uint16_t *last_used_idx), 0); 49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks, 50 (struct spdk_vhost_session *vsession)); 51 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name, 52 uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0); 53 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0); 54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0); 55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0); 56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0); 57 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx, 58 uint64_t offset, uint64_t len)); 59 60 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0); 61 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0); 62 DEFINE_STUB(rte_vhost_get_vhost_vring, int, 63 (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0); 64 DEFINE_STUB(rte_vhost_enable_guest_notification, int, 65 (int vid, uint16_t queue_id, int enable), 0); 66 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0); 67 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0); 68 DEFINE_STUB(rte_vhost_driver_callback_register, int, 69 (const char *path, struct vhost_device_ops const *const ops), 0); 70 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0); 71 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0); 72 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0); 73 74 DEFINE_STUB(rte_vhost_set_last_inflight_io_split, int, 75 (int vid, uint16_t vring_idx, uint16_t idx), 0); 76 DEFINE_STUB(rte_vhost_clr_inflight_desc_split, int, 77 (int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx), 0); 78 DEFINE_STUB(rte_vhost_set_last_inflight_io_packed, int, 79 (int vid, uint16_t vring_idx, uint16_t head), 0); 80 DEFINE_STUB(rte_vhost_clr_inflight_desc_packed, int, 81 (int vid, uint16_t vring_idx, uint16_t head), 0); 82 DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len)); 83 DEFINE_STUB_V(vhost_session_mem_register, (struct rte_vhost_memory *mem)); 84 DEFINE_STUB_V(vhost_session_mem_unregister, (struct rte_vhost_memory *mem)); 85 DEFINE_STUB(vhost_get_negotiated_features, int, 86 (int vid, uint64_t *negotiated_features), 0); 87 DEFINE_STUB(rte_vhost_get_vhost_ring_inflight, int, 88 (int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring), 0); 89 DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int, 90 (int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx), 0); 91 DEFINE_STUB(vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0); 92 93 void * 94 spdk_call_unaffinitized(void *cb(void *arg), void *arg) 95 { 96 return cb(arg); 97 } 98 99 static struct spdk_vhost_dev_backend g_vdev_backend; 100 101 static int 102 test_setup(void) 103 { 104 return 0; 105 } 106 107 static int 108 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask) 109 { 110 struct spdk_vhost_dev *vdev = NULL; 111 int rc; 112 113 /* spdk_vhost_dev must be allocated on a cache line boundary. */ 114 rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev)); 115 CU_ASSERT(rc == 0); 116 SPDK_CU_ASSERT_FATAL(vdev != NULL); 117 memset(vdev, 0, sizeof(*vdev)); 118 rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend); 119 if (rc == 0) { 120 *vdev_p = vdev; 121 } else { 122 free(vdev); 123 *vdev_p = NULL; 124 } 125 126 return rc; 127 } 128 129 static void 130 start_vdev(struct spdk_vhost_dev *vdev) 131 { 132 struct rte_vhost_memory *mem; 133 struct spdk_vhost_session *vsession = NULL; 134 int rc; 135 136 mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region)); 137 SPDK_CU_ASSERT_FATAL(mem != NULL); 138 mem->nregions = 2; 139 mem->regions[0].guest_phys_addr = 0; 140 mem->regions[0].size = 0x400000; /* 4 MB */ 141 mem->regions[0].host_user_addr = 0x1000000; 142 mem->regions[1].guest_phys_addr = 0x400000; 143 mem->regions[1].size = 0x400000; /* 4 MB */ 144 mem->regions[1].host_user_addr = 0x2000000; 145 146 assert(TAILQ_EMPTY(&vdev->vsessions)); 147 /* spdk_vhost_dev must be allocated on a cache line boundary. */ 148 rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession)); 149 CU_ASSERT(rc == 0); 150 SPDK_CU_ASSERT_FATAL(vsession != NULL); 151 vsession->started = true; 152 vsession->vid = 0; 153 vsession->mem = mem; 154 TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq); 155 } 156 157 static void 158 stop_vdev(struct spdk_vhost_dev *vdev) 159 { 160 struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions); 161 162 TAILQ_REMOVE(&vdev->vsessions, vsession, tailq); 163 free(vsession->mem); 164 free(vsession); 165 } 166 167 static void 168 cleanup_vdev(struct spdk_vhost_dev *vdev) 169 { 170 if (!TAILQ_EMPTY(&vdev->vsessions)) { 171 stop_vdev(vdev); 172 } 173 vhost_dev_unregister(vdev); 174 free(vdev); 175 } 176 177 static void 178 desc_to_iov_test(void) 179 { 180 struct spdk_vhost_dev *vdev; 181 struct spdk_vhost_session *vsession; 182 struct iovec iov[SPDK_VHOST_IOVS_MAX]; 183 uint16_t iov_index; 184 struct vring_desc desc; 185 int rc; 186 187 spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true); 188 189 rc = alloc_vdev(&vdev, "vdev_name_0", "0x1"); 190 SPDK_CU_ASSERT_FATAL(rc == 0 && vdev); 191 start_vdev(vdev); 192 193 vsession = TAILQ_FIRST(&vdev->vsessions); 194 195 /* Test simple case where iov falls fully within a 2MB page. */ 196 desc.addr = 0x110000; 197 desc.len = 0x1000; 198 iov_index = 0; 199 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 200 CU_ASSERT(rc == 0); 201 CU_ASSERT(iov_index == 1); 202 CU_ASSERT(iov[0].iov_base == (void *)0x1110000); 203 CU_ASSERT(iov[0].iov_len == 0x1000); 204 /* 205 * Always memset the iov to ensure each test validates data written by its call 206 * to the function under test. 207 */ 208 memset(iov, 0, sizeof(iov)); 209 210 /* Same test, but ensure it respects the non-zero starting iov_index. */ 211 iov_index = SPDK_VHOST_IOVS_MAX - 1; 212 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 213 CU_ASSERT(rc == 0); 214 CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX); 215 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000); 216 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000); 217 memset(iov, 0, sizeof(iov)); 218 219 /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */ 220 iov_index = SPDK_VHOST_IOVS_MAX; 221 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 222 CU_ASSERT(rc != 0); 223 memset(iov, 0, sizeof(iov)); 224 225 /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */ 226 desc.addr = 0x1F0000; 227 desc.len = 0x20000; 228 iov_index = 0; 229 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 230 CU_ASSERT(rc == 0); 231 CU_ASSERT(iov_index == 1); 232 CU_ASSERT(iov[0].iov_base == (void *)0x11F0000); 233 CU_ASSERT(iov[0].iov_len == 0x20000); 234 memset(iov, 0, sizeof(iov)); 235 236 /* Same test, but ensure it respects the non-zero starting iov_index. */ 237 iov_index = SPDK_VHOST_IOVS_MAX - 1; 238 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 239 CU_ASSERT(rc == 0); 240 CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX); 241 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000); 242 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000); 243 memset(iov, 0, sizeof(iov)); 244 245 /* Test case where iov spans a vhost memory region. */ 246 desc.addr = 0x3F0000; 247 desc.len = 0x20000; 248 iov_index = 0; 249 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc); 250 CU_ASSERT(rc == 0); 251 CU_ASSERT(iov_index == 2); 252 CU_ASSERT(iov[0].iov_base == (void *)0x13F0000); 253 CU_ASSERT(iov[0].iov_len == 0x10000); 254 CU_ASSERT(iov[1].iov_base == (void *)0x2000000); 255 CU_ASSERT(iov[1].iov_len == 0x10000); 256 memset(iov, 0, sizeof(iov)); 257 258 cleanup_vdev(vdev); 259 260 CU_ASSERT(true); 261 } 262 263 static void 264 create_controller_test(void) 265 { 266 struct spdk_vhost_dev *vdev, *vdev2; 267 int ret; 268 char long_name[PATH_MAX]; 269 270 spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true); 271 272 /* Create device with no name */ 273 ret = alloc_vdev(&vdev, NULL, "0x1"); 274 CU_ASSERT(ret != 0); 275 276 /* Create device with incorrect cpumask */ 277 ret = alloc_vdev(&vdev, "vdev_name_0", "0x2"); 278 CU_ASSERT(ret != 0); 279 280 /* Create device with too long name and path */ 281 memset(long_name, 'x', sizeof(long_name)); 282 long_name[PATH_MAX - 1] = 0; 283 snprintf(dev_dirname, sizeof(dev_dirname), "some_path/"); 284 ret = alloc_vdev(&vdev, long_name, "0x1"); 285 CU_ASSERT(ret != 0); 286 dev_dirname[0] = 0; 287 288 /* Create device when device name is already taken */ 289 ret = alloc_vdev(&vdev, "vdev_name_0", "0x1"); 290 SPDK_CU_ASSERT_FATAL(ret == 0 && vdev); 291 ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1"); 292 CU_ASSERT(ret != 0); 293 cleanup_vdev(vdev); 294 } 295 296 static void 297 session_find_by_vid_test(void) 298 { 299 struct spdk_vhost_dev *vdev; 300 struct spdk_vhost_session *vsession; 301 struct spdk_vhost_session *tmp; 302 int rc; 303 304 rc = alloc_vdev(&vdev, "vdev_name_0", "0x1"); 305 SPDK_CU_ASSERT_FATAL(rc == 0 && vdev); 306 start_vdev(vdev); 307 308 vsession = TAILQ_FIRST(&vdev->vsessions); 309 310 tmp = vhost_session_find_by_vid(vsession->vid); 311 CU_ASSERT(tmp == vsession); 312 313 /* Search for a device with incorrect vid */ 314 tmp = vhost_session_find_by_vid(vsession->vid + 0xFF); 315 CU_ASSERT(tmp == NULL); 316 317 cleanup_vdev(vdev); 318 } 319 320 static void 321 remove_controller_test(void) 322 { 323 struct spdk_vhost_dev *vdev; 324 int ret; 325 326 ret = alloc_vdev(&vdev, "vdev_name_0", "0x1"); 327 SPDK_CU_ASSERT_FATAL(ret == 0 && vdev); 328 329 /* Remove device when controller is in use */ 330 start_vdev(vdev); 331 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions)); 332 ret = vhost_dev_unregister(vdev); 333 CU_ASSERT(ret != 0); 334 335 cleanup_vdev(vdev); 336 } 337 338 static void 339 vq_avail_ring_get_test(void) 340 { 341 struct spdk_vhost_virtqueue vq = {}; 342 uint16_t avail_mem[34]; 343 uint16_t reqs[32]; 344 uint16_t reqs_len, ret, i; 345 346 /* Basic example reap all requests */ 347 vq.vring.avail = (struct vring_avail *)avail_mem; 348 vq.vring.size = 32; 349 vq.last_avail_idx = 24; 350 vq.vring.avail->idx = 29; 351 reqs_len = 6; 352 353 for (i = 0; i < 32; i++) { 354 vq.vring.avail->ring[i] = i; 355 } 356 357 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len); 358 CU_ASSERT(ret == 5); 359 CU_ASSERT(vq.last_avail_idx == 29); 360 for (i = 0; i < ret; i++) { 361 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]); 362 } 363 364 /* Basic example reap only some requests */ 365 vq.last_avail_idx = 20; 366 vq.vring.avail->idx = 29; 367 reqs_len = 6; 368 369 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len); 370 CU_ASSERT(ret == reqs_len); 371 CU_ASSERT(vq.last_avail_idx == 26); 372 for (i = 0; i < ret; i++) { 373 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]); 374 } 375 376 /* Test invalid example */ 377 vq.last_avail_idx = 20; 378 vq.vring.avail->idx = 156; 379 reqs_len = 6; 380 381 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len); 382 CU_ASSERT(ret == 0); 383 384 /* Test overflow in the avail->idx variable. */ 385 vq.last_avail_idx = 65535; 386 vq.vring.avail->idx = 4; 387 reqs_len = 6; 388 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len); 389 CU_ASSERT(ret == 5); 390 CU_ASSERT(vq.last_avail_idx == 4); 391 CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]); 392 for (i = 1; i < ret; i++) { 393 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]); 394 } 395 } 396 397 static bool 398 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx, 399 int16_t guest_used_phase) 400 { 401 return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) == 402 !!guest_used_phase); 403 } 404 405 static void 406 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx, 407 int16_t *guest_avail_phase) 408 { 409 if (*guest_avail_phase) { 410 vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL; 411 vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED; 412 } else { 413 vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL; 414 vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED; 415 } 416 417 if (++(*guest_last_avail_idx) >= vq->vring.size) { 418 *guest_last_avail_idx -= vq->vring.size; 419 *guest_avail_phase = !(*guest_avail_phase); 420 } 421 } 422 423 static int16_t 424 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx, 425 int16_t *guest_used_phase) 426 { 427 int16_t buffer_id = -1; 428 429 if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) { 430 buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id; 431 if (++(*guest_last_used_idx) >= vq->vring.size) { 432 *guest_last_used_idx -= vq->vring.size; 433 *guest_used_phase = !(*guest_used_phase); 434 } 435 436 return buffer_id; 437 } 438 439 return -1; 440 } 441 442 static void 443 vq_packed_ring_test(void) 444 { 445 struct spdk_vhost_session vs = {}; 446 struct spdk_vhost_virtqueue vq = {}; 447 struct vring_packed_desc descs[4]; 448 uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0; 449 uint16_t guest_avail_phase = 1, guest_used_phase = 1; 450 int i; 451 int16_t chain_num; 452 453 vq.vring.desc_packed = descs; 454 vq.vring.size = 4; 455 456 /* avail and used wrap counter are initialized to 1 */ 457 vq.packed.avail_phase = 1; 458 vq.packed.used_phase = 1; 459 vq.packed.packed_ring = true; 460 memset(descs, 0, sizeof(descs)); 461 462 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false); 463 464 /* Guest send requests */ 465 for (i = 0; i < vq.vring.size; i++) { 466 descs[guest_last_avail_idx].id = i; 467 /* Set the desc available */ 468 vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase); 469 } 470 CU_ASSERT(guest_last_avail_idx == 0); 471 CU_ASSERT(guest_avail_phase == 0); 472 473 /* Host handle available descs */ 474 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true); 475 i = 0; 476 while (vhost_vq_packed_ring_is_avail(&vq)) { 477 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++); 478 CU_ASSERT(chain_num == 1); 479 } 480 481 /* Host complete them out of order: 1, 0, 2. */ 482 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0); 483 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0); 484 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0); 485 486 /* Host has got all the available request but only complete three requests */ 487 CU_ASSERT(vq.last_avail_idx == 0); 488 CU_ASSERT(vq.packed.avail_phase == 0); 489 CU_ASSERT(vq.last_used_idx == 3); 490 CU_ASSERT(vq.packed.used_phase == 1); 491 492 /* Guest handle completed requests */ 493 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1); 494 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0); 495 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2); 496 CU_ASSERT(guest_last_used_idx == 3); 497 CU_ASSERT(guest_used_phase == 1); 498 499 /* There are three descs available the guest can send three request again */ 500 for (i = 0; i < 3; i++) { 501 descs[guest_last_avail_idx].id = 2 - i; 502 /* Set the desc available */ 503 vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase); 504 } 505 506 /* Host handle available descs */ 507 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true); 508 i = 2; 509 while (vhost_vq_packed_ring_is_avail(&vq)) { 510 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--); 511 CU_ASSERT(chain_num == 1); 512 } 513 514 /* There are four requests in Host, the new three ones and left one */ 515 CU_ASSERT(vq.last_avail_idx == 3); 516 /* Available wrap conter should overturn */ 517 CU_ASSERT(vq.packed.avail_phase == 0); 518 519 /* Host complete all the requests */ 520 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1, 0); 521 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1, 0); 522 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1, 0); 523 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1, 0); 524 525 CU_ASSERT(vq.last_used_idx == vq.last_avail_idx); 526 CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase); 527 528 /* Guest handle completed requests */ 529 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1); 530 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0); 531 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3); 532 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2); 533 534 CU_ASSERT(guest_last_avail_idx == guest_last_used_idx); 535 CU_ASSERT(guest_avail_phase == guest_used_phase); 536 } 537 538 int 539 main(int argc, char **argv) 540 { 541 CU_pSuite suite = NULL; 542 unsigned int num_failures; 543 544 CU_set_error_action(CUEA_ABORT); 545 CU_initialize_registry(); 546 547 suite = CU_add_suite("vhost_suite", test_setup, NULL); 548 549 CU_ADD_TEST(suite, desc_to_iov_test); 550 CU_ADD_TEST(suite, create_controller_test); 551 CU_ADD_TEST(suite, session_find_by_vid_test); 552 CU_ADD_TEST(suite, remove_controller_test); 553 CU_ADD_TEST(suite, vq_avail_ring_get_test); 554 CU_ADD_TEST(suite, vq_packed_ring_test); 555 556 CU_basic_set_mode(CU_BRM_VERBOSE); 557 CU_basic_run_tests(); 558 num_failures = CU_get_number_of_failures(); 559 CU_cleanup_registry(); 560 561 return num_failures; 562 } 563