1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Marvell. 3 */ 4 5 #include "test.h" 6 #include <string.h> 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 #include <rte_mempool.h> 10 #include <rte_mbuf.h> 11 #include <rte_random.h> 12 13 #ifdef RTE_EXEC_ENV_WINDOWS 14 static int 15 test_event_dma_adapter(void) 16 { 17 printf("event_dma_adapter not supported on Windows, skipping test\n"); 18 return TEST_SKIPPED; 19 } 20 21 #else 22 23 #include <rte_bus_vdev.h> 24 #include <rte_dmadev.h> 25 #include <rte_eventdev.h> 26 #include <rte_event_dma_adapter.h> 27 #include <rte_service.h> 28 29 #define NUM_MBUFS (8191) 30 #define MBUF_CACHE_SIZE (256) 31 #define TEST_APP_PORT_ID 0 32 #define TEST_APP_EV_QUEUE_ID 0 33 #define TEST_APP_EV_PRIORITY 0 34 #define TEST_APP_EV_FLOWID 0xAABB 35 #define TEST_DMA_EV_QUEUE_ID 1 36 #define TEST_ADAPTER_ID 0 37 #define TEST_DMA_DEV_ID 0 38 #define TEST_DMA_VCHAN_ID 0 39 #define PACKET_LENGTH 1024 40 #define NB_TEST_PORTS 1 41 #define NB_TEST_QUEUES 2 42 #define NUM_CORES 2 43 #define DMA_OP_POOL_SIZE 128 44 #define TEST_MAX_OP 32 45 #define TEST_RINGSIZE 512 46 47 #define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH) 48 49 /* Handle log statements in same manner as test macros */ 50 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 51 52 struct event_dma_adapter_test_params { 53 struct rte_mempool *src_mbuf_pool; 54 struct rte_mempool *dst_mbuf_pool; 55 struct rte_mempool *op_mpool; 56 uint8_t dma_event_port_id; 57 uint8_t internal_port_op_fwd; 58 }; 59 60 struct rte_event dma_response_info = { 61 .queue_id = TEST_APP_EV_QUEUE_ID, 62 .sched_type = RTE_SCHED_TYPE_ATOMIC, 63 .flow_id = TEST_APP_EV_FLOWID, 64 .priority = TEST_APP_EV_PRIORITY, 65 .op = RTE_EVENT_OP_NEW, 66 }; 67 68 static struct event_dma_adapter_test_params params; 69 static uint8_t dma_adapter_setup_done; 70 static uint32_t slcore_id; 71 static int evdev; 72 73 static int 74 send_recv_ev(struct rte_event *ev) 75 { 76 struct rte_event recv_ev[TEST_MAX_OP]; 77 uint16_t nb_enqueued = 0; 78 int i = 0; 79 80 if (params.internal_port_op_fwd) { 81 nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev, 82 TEST_MAX_OP); 83 } else { 84 while (nb_enqueued < TEST_MAX_OP) { 85 nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, 86 &ev[nb_enqueued], TEST_MAX_OP - 87 nb_enqueued); 88 } 89 } 90 91 TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n"); 92 93 while (i < TEST_MAX_OP) { 94 if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1) 95 continue; 96 i++; 97 } 98 99 TEST_ASSERT_EQUAL(i, TEST_MAX_OP, "Test failed. Failed to dequeue events.\n"); 100 101 return TEST_SUCCESS; 102 } 103 104 static int 105 test_dma_adapter_stats(void) 106 { 107 struct rte_event_dma_adapter_stats stats; 108 109 rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats); 110 printf(" +------------------------------------------------------+\n"); 111 printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID); 112 printf(" + Event port poll count 0x%" PRIx64 "\n", 113 stats.event_poll_count); 114 printf(" + Event dequeue count 0x%" PRIx64 "\n", 115 stats.event_deq_count); 116 printf(" + DMA dev enqueue count 0x%" PRIx64 "\n", 117 stats.dma_enq_count); 118 printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n", 119 stats.dma_enq_fail_count); 120 printf(" + DMA dev dequeue count 0x%" PRIx64 "\n", 121 stats.dma_deq_count); 122 printf(" + Event enqueue count 0x%" PRIx64 "\n", 123 stats.event_enq_count); 124 printf(" + Event enqueue retry count 0x%" PRIx64 "\n", 125 stats.event_enq_retry_count); 126 printf(" + Event enqueue fail count 0x%" PRIx64 "\n", 127 stats.event_enq_fail_count); 128 printf(" +------------------------------------------------------+\n"); 129 130 rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID); 131 return TEST_SUCCESS; 132 } 133 134 static int 135 test_dma_adapter_params(void) 136 { 137 struct rte_event_dma_adapter_runtime_params out_params; 138 struct rte_event_dma_adapter_runtime_params in_params; 139 struct rte_event event; 140 uint32_t cap; 141 int err, rc; 142 143 err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); 144 TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n"); 145 146 if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { 147 err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 148 TEST_DMA_VCHAN_ID, &event); 149 } else 150 err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 151 TEST_DMA_VCHAN_ID, NULL); 152 153 TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n"); 154 155 err = rte_event_dma_adapter_runtime_params_init(&in_params); 156 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 157 err = rte_event_dma_adapter_runtime_params_init(&out_params); 158 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 159 160 /* Case 1: Get the default value of mbufs processed by adapter */ 161 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 162 if (err == -ENOTSUP) { 163 rc = TEST_SKIPPED; 164 goto vchan_del; 165 } 166 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 167 168 /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */ 169 in_params.max_nb = 32; 170 171 err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); 172 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 173 174 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 175 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 176 TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", 177 in_params.max_nb, out_params.max_nb); 178 179 /* Case 3: Set max_nb = 192 */ 180 in_params.max_nb = 192; 181 182 err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); 183 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 184 185 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 186 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 187 TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", 188 in_params.max_nb, out_params.max_nb); 189 190 /* Case 4: Set max_nb = 256 */ 191 in_params.max_nb = 256; 192 193 err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); 194 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 195 196 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 197 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 198 TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", 199 in_params.max_nb, out_params.max_nb); 200 201 /* Case 5: Set max_nb = 30(<BATCH_SIZE) */ 202 in_params.max_nb = 30; 203 204 err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); 205 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 206 207 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 208 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 209 TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", 210 in_params.max_nb, out_params.max_nb); 211 212 /* Case 6: Set max_nb = 512 */ 213 in_params.max_nb = 512; 214 215 err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params); 216 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 217 218 err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params); 219 TEST_ASSERT(err == 0, "Expected 0 got %d", err); 220 TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u", 221 in_params.max_nb, out_params.max_nb); 222 223 rc = TEST_SUCCESS; 224 vchan_del: 225 err = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 226 TEST_DMA_VCHAN_ID); 227 TEST_ASSERT_SUCCESS(err, "Failed to delete vchan\n"); 228 229 return rc; 230 } 231 232 static int 233 test_op_forward_mode(void) 234 { 235 struct rte_mbuf *src_mbuf[TEST_MAX_OP]; 236 struct rte_mbuf *dst_mbuf[TEST_MAX_OP]; 237 struct rte_event_dma_adapter_op *op; 238 struct rte_event ev[TEST_MAX_OP]; 239 int ret, i; 240 241 ret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP); 242 TEST_ASSERT_SUCCESS(ret, "alloc src mbufs failed.\n"); 243 244 ret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP); 245 TEST_ASSERT_SUCCESS(ret, "alloc dst mbufs failed.\n"); 246 247 for (i = 0; i < TEST_MAX_OP; i++) { 248 memset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH); 249 memset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH); 250 } 251 252 for (i = 0; i < TEST_MAX_OP; i++) { 253 rte_mempool_get(params.op_mpool, (void **)&op); 254 TEST_ASSERT_NOT_NULL(op, "Failed to allocate dma operation struct\n"); 255 256 /* Update Op */ 257 op->src_dst_seg[0].addr = rte_pktmbuf_iova(src_mbuf[i]); 258 op->src_dst_seg[1].addr = rte_pktmbuf_iova(dst_mbuf[i]); 259 op->src_dst_seg[0].length = PACKET_LENGTH; 260 op->src_dst_seg[1].length = PACKET_LENGTH; 261 op->nb_src = 1; 262 op->nb_dst = 1; 263 op->flags = RTE_DMA_OP_FLAG_SUBMIT; 264 op->op_mp = params.op_mpool; 265 op->dma_dev_id = TEST_DMA_DEV_ID; 266 op->vchan = TEST_DMA_VCHAN_ID; 267 op->event_meta = dma_response_info.event; 268 269 /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */ 270 memset(&ev[i], 0, sizeof(struct rte_event)); 271 ev[i].event = 0; 272 ev[i].op = RTE_EVENT_OP_NEW; 273 ev[i].event_type = RTE_EVENT_TYPE_DMADEV; 274 if (params.internal_port_op_fwd) 275 ev[i].queue_id = TEST_APP_EV_QUEUE_ID; 276 else 277 ev[i].queue_id = TEST_DMA_EV_QUEUE_ID; 278 ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC; 279 ev[i].flow_id = 0xAABB; 280 ev[i].event_ptr = op; 281 } 282 283 ret = send_recv_ev(ev); 284 TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n"); 285 286 test_dma_adapter_stats(); 287 288 for (i = 0; i < TEST_MAX_OP; i++) { 289 op = ev[i].event_ptr; 290 ret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *), 291 rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH); 292 293 TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n"); 294 295 rte_mempool_put(op->op_mp, op); 296 } 297 298 rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP); 299 rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP); 300 301 return TEST_SUCCESS; 302 } 303 304 static int 305 map_adapter_service_core(void) 306 { 307 uint32_t adapter_service_id; 308 int ret; 309 310 if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) { 311 uint32_t core_list[NUM_CORES]; 312 313 ret = rte_service_lcore_list(core_list, NUM_CORES); 314 TEST_ASSERT(ret >= 0, "Failed to get service core list!"); 315 316 if (core_list[0] != slcore_id) { 317 TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), 318 "Failed to add service core"); 319 TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), 320 "Failed to start service core"); 321 } 322 323 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 324 adapter_service_id, slcore_id, 1), 325 "Failed to map adapter service"); 326 } 327 328 return TEST_SUCCESS; 329 } 330 331 static int 332 test_with_op_forward_mode(void) 333 { 334 uint32_t cap; 335 int ret; 336 337 ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); 338 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 339 340 if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 341 !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 342 map_adapter_service_core(); 343 else { 344 if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) 345 return TEST_SKIPPED; 346 } 347 348 TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID), 349 "Failed to start event dma adapter"); 350 351 ret = test_op_forward_mode(); 352 TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n"); 353 return TEST_SUCCESS; 354 } 355 356 static int 357 configure_dmadev(void) 358 { 359 const struct rte_dma_conf conf = { .nb_vchans = 1}; 360 const struct rte_dma_vchan_conf qconf = { 361 .direction = RTE_DMA_DIR_MEM_TO_MEM, 362 .nb_desc = TEST_RINGSIZE, 363 }; 364 struct rte_dma_info info; 365 unsigned int elt_size; 366 int ret; 367 368 ret = rte_dma_count_avail(); 369 RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n"); 370 371 ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info); 372 TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n"); 373 374 if (info.max_vchans < 1) 375 RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n", 376 TEST_DMA_DEV_ID); 377 378 if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0) 379 RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n"); 380 381 if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0) 382 RTE_LOG(ERR, USER1, "Error with vchan configuration\n"); 383 384 ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info); 385 if (ret != 0 || info.nb_vchans != 1) 386 RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n", 387 TEST_DMA_DEV_ID); 388 389 params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS, 390 MBUF_CACHE_SIZE, 0, MBUF_SIZE, 391 rte_socket_id()); 392 RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n"); 393 394 params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS, 395 MBUF_CACHE_SIZE, 0, MBUF_SIZE, 396 rte_socket_id()); 397 RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n"); 398 399 elt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2); 400 params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0, 401 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0); 402 RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n"); 403 404 return TEST_SUCCESS; 405 } 406 407 static inline void 408 evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info) 409 { 410 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 411 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 412 dev_conf->nb_event_ports = NB_TEST_PORTS; 413 dev_conf->nb_event_queues = NB_TEST_QUEUES; 414 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 415 dev_conf->nb_event_port_dequeue_depth = 416 info->max_event_port_dequeue_depth; 417 dev_conf->nb_event_port_enqueue_depth = 418 info->max_event_port_enqueue_depth; 419 dev_conf->nb_event_port_enqueue_depth = 420 info->max_event_port_enqueue_depth; 421 dev_conf->nb_events_limit = 422 info->max_num_events; 423 } 424 425 static int 426 configure_eventdev(void) 427 { 428 struct rte_event_queue_conf queue_conf; 429 struct rte_event_dev_config devconf; 430 struct rte_event_dev_info info; 431 uint32_t queue_count; 432 uint32_t port_count; 433 uint8_t qid; 434 int ret; 435 436 if (!rte_event_dev_count()) { 437 /* If there is no hardware eventdev, or no software vdev was 438 * specified on the command line, create an instance of 439 * event_sw. 440 */ 441 LOG_DBG("Failed to find a valid event device... " 442 "testing with event_sw device\n"); 443 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 444 "Error creating eventdev"); 445 evdev = rte_event_dev_get_dev_id("event_sw0"); 446 } 447 448 ret = rte_event_dev_info_get(evdev, &info); 449 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n"); 450 451 evdev_set_conf_values(&devconf, &info); 452 453 ret = rte_event_dev_configure(evdev, &devconf); 454 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n"); 455 456 /* Set up event queue */ 457 ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count); 458 TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n"); 459 TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n"); 460 461 qid = TEST_APP_EV_QUEUE_ID; 462 ret = rte_event_queue_setup(evdev, qid, NULL); 463 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid); 464 465 queue_conf.nb_atomic_flows = info.max_event_queue_flows; 466 queue_conf.nb_atomic_order_sequences = 32; 467 queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; 468 queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; 469 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; 470 471 qid = TEST_DMA_EV_QUEUE_ID; 472 ret = rte_event_queue_setup(evdev, qid, &queue_conf); 473 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid); 474 475 /* Set up event port */ 476 ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT, 477 &port_count); 478 TEST_ASSERT_SUCCESS(ret, "Port count get failed\n"); 479 TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n"); 480 481 ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL); 482 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n", 483 TEST_APP_PORT_ID); 484 485 qid = TEST_APP_EV_QUEUE_ID; 486 ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1); 487 TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n", 488 TEST_APP_PORT_ID); 489 490 return TEST_SUCCESS; 491 } 492 493 static void 494 test_dma_adapter_free(void) 495 { 496 rte_event_dma_adapter_free(TEST_ADAPTER_ID); 497 } 498 499 static int 500 test_dma_adapter_create(void) 501 { 502 struct rte_event_dev_info evdev_info = {0}; 503 struct rte_event_port_conf conf = {0}; 504 int ret; 505 506 ret = rte_event_dev_info_get(evdev, &evdev_info); 507 TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); 508 509 conf.new_event_threshold = evdev_info.max_num_events; 510 conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth; 511 conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth; 512 513 /* Create adapter with default port creation callback */ 514 ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0); 515 TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); 516 517 return TEST_SUCCESS; 518 } 519 520 static int 521 test_dma_adapter_vchan_add_del(void) 522 { 523 struct rte_event event; 524 uint32_t cap; 525 int ret; 526 527 ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); 528 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 529 530 if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) { 531 ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 532 TEST_DMA_VCHAN_ID, &event); 533 } else 534 ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 535 TEST_DMA_VCHAN_ID, NULL); 536 537 TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n"); 538 539 ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 540 TEST_DMA_VCHAN_ID); 541 TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n"); 542 543 return TEST_SUCCESS; 544 } 545 546 static int 547 configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode) 548 { 549 struct rte_event_dev_info evdev_info = {0}; 550 struct rte_event_port_conf conf = {0}; 551 struct rte_event event; 552 uint32_t cap; 553 int ret; 554 555 ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap); 556 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 557 558 /* Skip mode and capability mismatch check for SW eventdev */ 559 if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && 560 !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 561 !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)) 562 goto adapter_create; 563 564 if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) { 565 if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) 566 params.internal_port_op_fwd = 1; 567 else 568 return -ENOTSUP; 569 } 570 571 adapter_create: 572 ret = rte_event_dev_info_get(evdev, &evdev_info); 573 TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); 574 575 conf.new_event_threshold = evdev_info.max_num_events; 576 conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth; 577 conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth; 578 579 /* Create adapter with default port creation callback */ 580 ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode); 581 TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n"); 582 583 event.event = dma_response_info.event; 584 if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) 585 ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 586 TEST_DMA_VCHAN_ID, &event); 587 else 588 ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 589 TEST_DMA_VCHAN_ID, NULL); 590 591 TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n"); 592 593 if (!params.internal_port_op_fwd) { 594 ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID, 595 ¶ms.dma_event_port_id); 596 TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n"); 597 } 598 599 return TEST_SUCCESS; 600 } 601 602 static void 603 test_dma_adapter_stop(void) 604 { 605 uint32_t evdev_service_id, adapter_service_id; 606 607 /* retrieve service ids & stop services */ 608 if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, 609 &adapter_service_id) == 0) { 610 rte_service_runstate_set(adapter_service_id, 0); 611 rte_service_lcore_stop(slcore_id); 612 rte_service_lcore_del(slcore_id); 613 rte_event_dma_adapter_stop(TEST_ADAPTER_ID); 614 } 615 616 if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { 617 rte_service_runstate_set(evdev_service_id, 0); 618 rte_service_lcore_stop(slcore_id); 619 rte_service_lcore_del(slcore_id); 620 rte_dma_stop(TEST_DMA_DEV_ID); 621 rte_event_dev_stop(evdev); 622 } else { 623 rte_dma_stop(TEST_DMA_DEV_ID); 624 rte_event_dev_stop(evdev); 625 } 626 } 627 628 static int 629 test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode) 630 { 631 uint32_t evdev_service_id; 632 uint8_t qid; 633 int ret; 634 635 if (!dma_adapter_setup_done) { 636 ret = configure_event_dma_adapter(mode); 637 if (ret) 638 return ret; 639 if (!params.internal_port_op_fwd) { 640 qid = TEST_DMA_EV_QUEUE_ID; 641 ret = rte_event_port_link(evdev, 642 params.dma_event_port_id, &qid, NULL, 1); 643 TEST_ASSERT(ret >= 0, "Failed to link queue %d " 644 "port=%u\n", qid, 645 params.dma_event_port_id); 646 } 647 dma_adapter_setup_done = 1; 648 } 649 650 /* retrieve service ids */ 651 if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { 652 /* add a service core and start it */ 653 TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), 654 "Failed to add service core"); 655 TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), 656 "Failed to start service core"); 657 658 /* map services to it */ 659 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id, 660 slcore_id, 1), "Failed to map evdev service"); 661 662 /* set services to running */ 663 TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id, 664 1), "Failed to start evdev service"); 665 } 666 667 /* start the eventdev */ 668 TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev), 669 "Failed to start event device"); 670 671 /* start the dma dev */ 672 TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID), 673 "Failed to start dma device"); 674 675 return TEST_SUCCESS; 676 } 677 678 static int 679 test_dma_adapter_conf_op_forward_mode(void) 680 { 681 enum rte_event_dma_adapter_mode mode; 682 683 mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD; 684 685 return test_dma_adapter_conf(mode); 686 } 687 688 static int 689 testsuite_setup(void) 690 { 691 int ret; 692 693 slcore_id = rte_get_next_lcore(-1, 1, 0); 694 TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores " 695 "are required to run this autotest\n"); 696 697 /* Setup and start event device. */ 698 ret = configure_eventdev(); 699 TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n"); 700 701 /* Setup and start dma device. */ 702 ret = configure_dmadev(); 703 TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n"); 704 705 return TEST_SUCCESS; 706 } 707 708 static void 709 dma_adapter_teardown(void) 710 { 711 int ret; 712 713 ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID); 714 if (ret < 0) 715 RTE_LOG(ERR, USER1, "Failed to stop adapter!"); 716 717 ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID, 718 TEST_DMA_VCHAN_ID); 719 if (ret < 0) 720 RTE_LOG(ERR, USER1, "Failed to delete vchan!"); 721 722 ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID); 723 if (ret < 0) 724 RTE_LOG(ERR, USER1, "Failed to free adapter!"); 725 726 dma_adapter_setup_done = 0; 727 } 728 729 static void 730 dma_teardown(void) 731 { 732 /* Free mbuf mempool */ 733 if (params.src_mbuf_pool != NULL) { 734 RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n", 735 rte_mempool_avail_count(params.src_mbuf_pool)); 736 rte_mempool_free(params.src_mbuf_pool); 737 params.src_mbuf_pool = NULL; 738 } 739 740 if (params.dst_mbuf_pool != NULL) { 741 RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n", 742 rte_mempool_avail_count(params.dst_mbuf_pool)); 743 rte_mempool_free(params.dst_mbuf_pool); 744 params.dst_mbuf_pool = NULL; 745 } 746 747 /* Free ops mempool */ 748 if (params.op_mpool != NULL) { 749 RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n", 750 rte_mempool_avail_count(params.op_mpool)); 751 rte_mempool_free(params.op_mpool); 752 params.op_mpool = NULL; 753 } 754 } 755 756 static void 757 eventdev_teardown(void) 758 { 759 rte_event_dev_stop(evdev); 760 } 761 762 static void 763 testsuite_teardown(void) 764 { 765 dma_adapter_teardown(); 766 dma_teardown(); 767 eventdev_teardown(); 768 } 769 770 static struct unit_test_suite functional_testsuite = { 771 .suite_name = "Event dma adapter test suite", 772 .setup = testsuite_setup, 773 .teardown = testsuite_teardown, 774 .unit_test_cases = { 775 776 TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create), 777 778 TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, 779 test_dma_adapter_vchan_add_del), 780 781 TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, 782 test_dma_adapter_stats), 783 784 TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free, 785 test_dma_adapter_params), 786 787 TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop, 788 test_with_op_forward_mode), 789 790 TEST_CASES_END() /**< NULL terminate unit test array */ 791 } 792 }; 793 794 static int 795 test_event_dma_adapter(void) 796 { 797 return unit_test_suite_runner(&functional_testsuite); 798 } 799 800 #endif /* !RTE_EXEC_ENV_WINDOWS */ 801 802 REGISTER_DRIVER_TEST(event_dma_adapter_autotest, test_event_dma_adapter); 803