1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 HiSilicon Limited 3 * Copyright(c) 2021 Intel Corporation 4 */ 5 6 #include <inttypes.h> 7 8 #include <rte_dmadev.h> 9 #include <rte_mbuf.h> 10 #include <rte_pause.h> 11 #include <rte_cycles.h> 12 #include <rte_random.h> 13 #include <rte_bus_vdev.h> 14 #include <rte_dmadev_pmd.h> 15 16 #include "test.h" 17 #include "test_dmadev_api.h" 18 19 #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0) 20 21 #define COPY_LEN 1024 22 23 static struct rte_mempool *pool; 24 static uint16_t id_count; 25 26 static void 27 __rte_format_printf(3, 4) 28 print_err(const char *func, int lineno, const char *format, ...) 29 { 30 va_list ap; 31 32 fprintf(stderr, "In %s:%d - ", func, lineno); 33 va_start(ap, format); 34 vfprintf(stderr, format, ap); 35 va_end(ap); 36 } 37 38 static int 39 runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations, 40 int16_t dev_id, uint16_t vchan, bool check_err_stats) 41 { 42 struct rte_dma_stats stats; 43 int i; 44 45 rte_dma_stats_reset(dev_id, vchan); 46 printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable, 47 check_err_stats ? " " : "(errors expected)"); 48 for (i = 0; i < iterations; i++) { 49 if (test_fn(dev_id, vchan) < 0) 50 return -1; 51 52 rte_dma_stats_get(dev_id, 0, &stats); 53 printf("Ops submitted: %"PRIu64"\t", stats.submitted); 54 printf("Ops completed: %"PRIu64"\t", stats.completed); 55 printf("Errors: %"PRIu64"\r", stats.errors); 56 57 if (stats.completed != stats.submitted) 58 ERR_RETURN("\nError, not all submitted jobs are reported as completed\n"); 59 if (check_err_stats && stats.errors != 0) 60 ERR_RETURN("\nErrors reported during op processing, aborting tests\n"); 61 } 62 printf("\n"); 63 return 0; 64 } 65 66 static void 67 await_hw(int16_t dev_id, uint16_t vchan) 68 { 69 enum rte_dma_vchan_status st; 70 71 if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) { 72 /* for drivers that don't support this op, just sleep for 1 millisecond */ 73 rte_delay_us_sleep(1000); 74 return; 75 } 76 77 /* for those that do, *max* end time is one second from now, but all should be faster */ 78 const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz(); 79 while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) { 80 rte_pause(); 81 rte_dma_vchan_status(dev_id, vchan, &st); 82 } 83 } 84 85 /* run a series of copy tests just using some different options for enqueues and completions */ 86 static int 87 do_multi_copies(int16_t dev_id, uint16_t vchan, 88 int split_batches, /* submit 2 x 16 or 1 x 32 burst */ 89 int split_completions, /* gather 2 x 16 or 1 x 32 completions */ 90 int use_completed_status) /* use completed or completed_status function */ 91 { 92 struct rte_mbuf *srcs[32], *dsts[32]; 93 enum rte_dma_status_code sc[32]; 94 unsigned int i, j; 95 bool dma_err = false; 96 97 /* Enqueue burst of copies and hit doorbell */ 98 for (i = 0; i < RTE_DIM(srcs); i++) { 99 uint64_t *src_data; 100 101 if (split_batches && i == RTE_DIM(srcs) / 2) 102 rte_dma_submit(dev_id, vchan); 103 104 srcs[i] = rte_pktmbuf_alloc(pool); 105 dsts[i] = rte_pktmbuf_alloc(pool); 106 if (srcs[i] == NULL || dsts[i] == NULL) 107 ERR_RETURN("Error allocating buffers\n"); 108 109 src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *); 110 for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++) 111 src_data[j] = rte_rand(); 112 113 if (rte_dma_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off, 114 dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++) 115 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i); 116 } 117 rte_dma_submit(dev_id, vchan); 118 119 await_hw(dev_id, vchan); 120 121 if (split_completions) { 122 /* gather completions in two halves */ 123 uint16_t half_len = RTE_DIM(srcs) / 2; 124 int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err); 125 if (ret != half_len || dma_err) 126 ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n", 127 ret, half_len, dma_err); 128 129 ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err); 130 if (ret != half_len || dma_err) 131 ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n", 132 ret, half_len, dma_err); 133 } else { 134 /* gather all completions in one go, using either 135 * completed or completed_status fns 136 */ 137 if (!use_completed_status) { 138 int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err); 139 if (n != RTE_DIM(srcs) || dma_err) 140 ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n", 141 n, RTE_DIM(srcs), dma_err); 142 } else { 143 int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc); 144 if (n != RTE_DIM(srcs)) 145 ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n", 146 n, RTE_DIM(srcs)); 147 148 for (j = 0; j < (uint16_t)n; j++) 149 if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL) 150 ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n", 151 j, sc[j]); 152 } 153 } 154 155 /* check for empty */ 156 int ret = use_completed_status ? 157 rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) : 158 rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err); 159 if (ret != 0) 160 ERR_RETURN("Error with completion check - ops unexpectedly returned\n"); 161 162 for (i = 0; i < RTE_DIM(srcs); i++) { 163 char *src_data, *dst_data; 164 165 src_data = rte_pktmbuf_mtod(srcs[i], char *); 166 dst_data = rte_pktmbuf_mtod(dsts[i], char *); 167 for (j = 0; j < COPY_LEN; j++) 168 if (src_data[j] != dst_data[j]) 169 ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j); 170 171 rte_pktmbuf_free(srcs[i]); 172 rte_pktmbuf_free(dsts[i]); 173 } 174 return 0; 175 } 176 177 static int 178 test_enqueue_copies(int16_t dev_id, uint16_t vchan) 179 { 180 enum rte_dma_status_code status; 181 unsigned int i; 182 uint16_t id; 183 184 /* test doing a single copy */ 185 do { 186 struct rte_mbuf *src, *dst; 187 char *src_data, *dst_data; 188 189 src = rte_pktmbuf_alloc(pool); 190 dst = rte_pktmbuf_alloc(pool); 191 src_data = rte_pktmbuf_mtod(src, char *); 192 dst_data = rte_pktmbuf_mtod(dst, char *); 193 194 for (i = 0; i < COPY_LEN; i++) 195 src_data[i] = rte_rand() & 0xFF; 196 197 id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst), 198 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT); 199 if (id != id_count) 200 ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n", 201 id, id_count); 202 203 /* give time for copy to finish, then check it was done */ 204 await_hw(dev_id, vchan); 205 206 for (i = 0; i < COPY_LEN; i++) 207 if (dst_data[i] != src_data[i]) 208 ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i, 209 dst_data[i], src_data[i]); 210 211 /* now check completion works */ 212 id = ~id; 213 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1) 214 ERR_RETURN("Error with rte_dma_completed\n"); 215 216 if (id != id_count) 217 ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n", 218 id, id_count); 219 220 /* check for completed and id when no job done */ 221 id = ~id; 222 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0) 223 ERR_RETURN("Error with rte_dma_completed when no job done\n"); 224 if (id != id_count) 225 ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n", 226 id, id_count); 227 228 /* check for completed_status and id when no job done */ 229 id = ~id; 230 if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0) 231 ERR_RETURN("Error with rte_dma_completed_status when no job done\n"); 232 if (id != id_count) 233 ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n", 234 id, id_count); 235 236 rte_pktmbuf_free(src); 237 rte_pktmbuf_free(dst); 238 239 /* now check completion returns nothing more */ 240 if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0) 241 ERR_RETURN("Error with rte_dma_completed in empty check\n"); 242 243 id_count++; 244 245 } while (0); 246 247 /* test doing a multiple single copies */ 248 do { 249 const uint16_t max_ops = 4; 250 struct rte_mbuf *src, *dst; 251 char *src_data, *dst_data; 252 uint16_t count; 253 254 src = rte_pktmbuf_alloc(pool); 255 dst = rte_pktmbuf_alloc(pool); 256 src_data = rte_pktmbuf_mtod(src, char *); 257 dst_data = rte_pktmbuf_mtod(dst, char *); 258 259 for (i = 0; i < COPY_LEN; i++) 260 src_data[i] = rte_rand() & 0xFF; 261 262 /* perform the same copy <max_ops> times */ 263 for (i = 0; i < max_ops; i++) 264 if (rte_dma_copy(dev_id, vchan, 265 rte_pktmbuf_iova(src), 266 rte_pktmbuf_iova(dst), 267 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++) 268 ERR_RETURN("Error with rte_dma_copy\n"); 269 270 await_hw(dev_id, vchan); 271 272 count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL); 273 if (count != max_ops) 274 ERR_RETURN("Error with rte_dma_completed, got %u not %u\n", 275 count, max_ops); 276 277 if (id != id_count - 1) 278 ERR_RETURN("Error, incorrect job id returned: got %u not %u\n", 279 id, id_count - 1); 280 281 for (i = 0; i < COPY_LEN; i++) 282 if (dst_data[i] != src_data[i]) 283 ERR_RETURN("Data mismatch at char %u\n", i); 284 285 rte_pktmbuf_free(src); 286 rte_pktmbuf_free(dst); 287 } while (0); 288 289 /* test doing multiple copies */ 290 return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */ 291 /* enqueue 2 batches and then complete both */ 292 || do_multi_copies(dev_id, vchan, 1, 0, 0) 293 /* enqueue 1 batch, then complete in two halves */ 294 || do_multi_copies(dev_id, vchan, 0, 1, 0) 295 /* test using completed_status in place of regular completed API */ 296 || do_multi_copies(dev_id, vchan, 0, 0, 1); 297 } 298 299 /* Failure handling test cases - global macros and variables for those tests*/ 300 #define COMP_BURST_SZ 16 301 #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0) 302 303 static int 304 test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence, 305 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) 306 { 307 /* Test single full batch statuses with failures */ 308 enum rte_dma_status_code status[COMP_BURST_SZ]; 309 struct rte_dma_stats baseline, stats; 310 uint16_t invalid_addr_id = 0; 311 uint16_t idx; 312 uint16_t count, status_count; 313 unsigned int i; 314 bool error = false; 315 int err_count = 0; 316 317 rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */ 318 for (i = 0; i < COMP_BURST_SZ; i++) { 319 int id = rte_dma_copy(dev_id, vchan, 320 (i == fail_idx ? 0 : (srcs[i]->buf_iova + srcs[i]->data_off)), 321 dsts[i]->buf_iova + dsts[i]->data_off, 322 COPY_LEN, OPT_FENCE(i)); 323 if (id < 0) 324 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i); 325 if (i == fail_idx) 326 invalid_addr_id = id; 327 } 328 rte_dma_submit(dev_id, vchan); 329 rte_dma_stats_get(dev_id, vchan, &stats); 330 if (stats.submitted != baseline.submitted + COMP_BURST_SZ) 331 ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n", 332 stats.submitted, baseline.submitted + COMP_BURST_SZ); 333 334 await_hw(dev_id, vchan); 335 336 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); 337 if (count != fail_idx) 338 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n", 339 count, fail_idx); 340 if (!error) 341 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n", 342 fail_idx); 343 if (idx != invalid_addr_id - 1) 344 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n", 345 fail_idx, idx, invalid_addr_id - 1); 346 347 /* all checks ok, now verify calling completed() again always returns 0 */ 348 for (i = 0; i < 10; i++) 349 if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0 350 || error == false || idx != (invalid_addr_id - 1)) 351 ERR_RETURN("Error with follow-up completed calls for fail idx %u\n", 352 fail_idx); 353 354 status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, 355 &idx, status); 356 /* some HW may stop on error and be restarted after getting error status for single value 357 * To handle this case, if we get just one error back, wait for more completions and get 358 * status for rest of the burst 359 */ 360 if (status_count == 1) { 361 await_hw(dev_id, vchan); 362 status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1, 363 &idx, &status[1]); 364 } 365 /* check that at this point we have all status values */ 366 if (status_count != COMP_BURST_SZ - count) 367 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n", 368 fail_idx, status_count, COMP_BURST_SZ - count); 369 /* now verify just one failure followed by multiple successful or skipped entries */ 370 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL) 371 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n", 372 fail_idx); 373 for (i = 1; i < status_count; i++) 374 /* after a failure in a burst, depending on ordering/fencing, 375 * operations may be successful or skipped because of previous error. 376 */ 377 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL 378 && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED) 379 ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n", 380 fail_idx, count + i, COMP_BURST_SZ); 381 382 /* check the completed + errors stats are as expected */ 383 rte_dma_stats_get(dev_id, vchan, &stats); 384 if (stats.completed != baseline.completed + COMP_BURST_SZ) 385 ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n", 386 stats.completed, baseline.completed + COMP_BURST_SZ); 387 for (i = 0; i < status_count; i++) 388 err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL); 389 if (stats.errors != baseline.errors + err_count) 390 ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n", 391 stats.errors, baseline.errors + err_count); 392 393 return 0; 394 } 395 396 static int 397 test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence, 398 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) 399 { 400 /* Test gathering batch statuses one at a time */ 401 enum rte_dma_status_code status[COMP_BURST_SZ]; 402 uint16_t invalid_addr_id = 0; 403 uint16_t idx; 404 uint16_t count = 0, status_count = 0; 405 unsigned int j; 406 bool error = false; 407 408 for (j = 0; j < COMP_BURST_SZ; j++) { 409 int id = rte_dma_copy(dev_id, vchan, 410 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)), 411 dsts[j]->buf_iova + dsts[j]->data_off, 412 COPY_LEN, OPT_FENCE(j)); 413 if (id < 0) 414 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); 415 if (j == fail_idx) 416 invalid_addr_id = id; 417 } 418 rte_dma_submit(dev_id, vchan); 419 await_hw(dev_id, vchan); 420 421 /* use regular "completed" until we hit error */ 422 while (!error) { 423 uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error); 424 count += n; 425 if (n > 1 || count >= COMP_BURST_SZ) 426 ERR_RETURN("Error - too many completions got\n"); 427 if (n == 0 && !error) 428 ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n", 429 count); 430 } 431 if (idx != invalid_addr_id - 1) 432 ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n", 433 idx, invalid_addr_id - 1); 434 435 /* use completed_status until we hit end of burst */ 436 while (count + status_count < COMP_BURST_SZ) { 437 uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx, 438 &status[status_count]); 439 await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */ 440 status_count += n; 441 if (n != 1) 442 ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n", 443 n); 444 } 445 446 /* check for single failure */ 447 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL) 448 ERR_RETURN("Error, unexpected successful DMA transaction\n"); 449 for (j = 1; j < status_count; j++) 450 if (status[j] != RTE_DMA_STATUS_SUCCESSFUL 451 && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED) 452 ERR_RETURN("Error, unexpected DMA error reported\n"); 453 454 return 0; 455 } 456 457 static int 458 test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan, 459 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) 460 { 461 /* When error occurs just collect a single error using "completed_status()" 462 * before going to back to completed() calls 463 */ 464 enum rte_dma_status_code status; 465 uint16_t invalid_addr_id = 0; 466 uint16_t idx; 467 uint16_t count, status_count, count2; 468 unsigned int j; 469 bool error = false; 470 471 for (j = 0; j < COMP_BURST_SZ; j++) { 472 int id = rte_dma_copy(dev_id, vchan, 473 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)), 474 dsts[j]->buf_iova + dsts[j]->data_off, 475 COPY_LEN, 0); 476 if (id < 0) 477 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); 478 if (j == fail_idx) 479 invalid_addr_id = id; 480 } 481 rte_dma_submit(dev_id, vchan); 482 await_hw(dev_id, vchan); 483 484 /* get up to the error point */ 485 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); 486 if (count != fail_idx) 487 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n", 488 count, fail_idx); 489 if (!error) 490 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n", 491 fail_idx); 492 if (idx != invalid_addr_id - 1) 493 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n", 494 fail_idx, idx, invalid_addr_id - 1); 495 496 /* get the error code */ 497 status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status); 498 if (status_count != 1) 499 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n", 500 fail_idx, status_count, COMP_BURST_SZ - count); 501 if (status == RTE_DMA_STATUS_SUCCESSFUL) 502 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n", 503 fail_idx); 504 505 /* delay in case time needed after err handled to complete other jobs */ 506 await_hw(dev_id, vchan); 507 508 /* get the rest of the completions without status */ 509 count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); 510 if (error == true) 511 ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n", 512 fail_idx); 513 if (count + status_count + count2 != COMP_BURST_SZ) 514 ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n", 515 count + status_count + count2, COMP_BURST_SZ); 516 517 return 0; 518 } 519 520 static int 521 test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts, 522 const unsigned int *fail, size_t num_fail) 523 { 524 /* test having multiple errors in one go */ 525 enum rte_dma_status_code status[COMP_BURST_SZ]; 526 unsigned int i, j; 527 uint16_t count, err_count = 0; 528 bool error = false; 529 530 /* enqueue and gather completions in one go */ 531 for (j = 0; j < COMP_BURST_SZ; j++) { 532 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off; 533 /* set up for failure if the current index is anywhere is the fails array */ 534 for (i = 0; i < num_fail; i++) 535 if (j == fail[i]) 536 src = 0; 537 538 int id = rte_dma_copy(dev_id, vchan, 539 src, dsts[j]->buf_iova + dsts[j]->data_off, 540 COPY_LEN, 0); 541 if (id < 0) 542 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); 543 } 544 rte_dma_submit(dev_id, vchan); 545 await_hw(dev_id, vchan); 546 547 count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status); 548 while (count < COMP_BURST_SZ) { 549 await_hw(dev_id, vchan); 550 551 uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count, 552 NULL, &status[count]); 553 if (ret == 0) 554 ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n", 555 count, COMP_BURST_SZ); 556 count += ret; 557 } 558 for (i = 0; i < count; i++) 559 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL) 560 err_count++; 561 562 if (err_count != num_fail) 563 ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n", 564 err_count, num_fail); 565 566 /* enqueue and gather completions in bursts, but getting errors one at a time */ 567 for (j = 0; j < COMP_BURST_SZ; j++) { 568 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off; 569 /* set up for failure if the current index is anywhere is the fails array */ 570 for (i = 0; i < num_fail; i++) 571 if (j == fail[i]) 572 src = 0; 573 574 int id = rte_dma_copy(dev_id, vchan, 575 src, dsts[j]->buf_iova + dsts[j]->data_off, 576 COPY_LEN, 0); 577 if (id < 0) 578 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); 579 } 580 rte_dma_submit(dev_id, vchan); 581 await_hw(dev_id, vchan); 582 583 count = 0; 584 err_count = 0; 585 while (count + err_count < COMP_BURST_SZ) { 586 count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error); 587 if (error) { 588 uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1, 589 NULL, status); 590 if (ret != 1) 591 ERR_RETURN("Error getting error-status for completions\n"); 592 err_count += ret; 593 await_hw(dev_id, vchan); 594 } 595 } 596 if (err_count != num_fail) 597 ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n", 598 err_count, num_fail); 599 600 return 0; 601 } 602 603 static int 604 test_completion_status(int16_t dev_id, uint16_t vchan, bool fence) 605 { 606 const unsigned int fail[] = {0, 7, 14, 15}; 607 struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ]; 608 unsigned int i; 609 610 for (i = 0; i < COMP_BURST_SZ; i++) { 611 srcs[i] = rte_pktmbuf_alloc(pool); 612 dsts[i] = rte_pktmbuf_alloc(pool); 613 } 614 615 for (i = 0; i < RTE_DIM(fail); i++) { 616 if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0) 617 return -1; 618 619 if (test_individual_status_query_with_failure(dev_id, vchan, fence, 620 srcs, dsts, fail[i]) < 0) 621 return -1; 622 623 /* test is run the same fenced, or unfenced, but no harm in running it twice */ 624 if (test_single_item_status_query_with_failure(dev_id, vchan, 625 srcs, dsts, fail[i]) < 0) 626 return -1; 627 } 628 629 if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0) 630 return -1; 631 632 for (i = 0; i < COMP_BURST_SZ; i++) { 633 rte_pktmbuf_free(srcs[i]); 634 rte_pktmbuf_free(dsts[i]); 635 } 636 return 0; 637 } 638 639 static int 640 test_completion_handling(int16_t dev_id, uint16_t vchan) 641 { 642 return test_completion_status(dev_id, vchan, false) /* without fences */ 643 || test_completion_status(dev_id, vchan, true); /* with fences */ 644 } 645 646 static int 647 test_enqueue_fill(int16_t dev_id, uint16_t vchan) 648 { 649 const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89}; 650 struct rte_mbuf *dst; 651 char *dst_data; 652 uint64_t pattern = 0xfedcba9876543210; 653 unsigned int i, j; 654 655 dst = rte_pktmbuf_alloc(pool); 656 if (dst == NULL) 657 ERR_RETURN("Failed to allocate mbuf\n"); 658 dst_data = rte_pktmbuf_mtod(dst, char *); 659 660 for (i = 0; i < RTE_DIM(lengths); i++) { 661 /* reset dst_data */ 662 memset(dst_data, 0, rte_pktmbuf_data_len(dst)); 663 664 /* perform the fill operation */ 665 int id = rte_dma_fill(dev_id, vchan, pattern, 666 rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT); 667 if (id < 0) 668 ERR_RETURN("Error with rte_dma_fill\n"); 669 await_hw(dev_id, vchan); 670 671 if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1) 672 ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]); 673 /* check the data from the fill operation is correct */ 674 for (j = 0; j < lengths[i]; j++) { 675 char pat_byte = ((char *)&pattern)[j % 8]; 676 if (dst_data[j] != pat_byte) 677 ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n", 678 lengths[i], dst_data[j], pat_byte); 679 } 680 /* check that the data after the fill operation was not written to */ 681 for (; j < rte_pktmbuf_data_len(dst); j++) 682 if (dst_data[j] != 0) 683 ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n", 684 lengths[i], dst_data[j], 0); 685 } 686 687 rte_pktmbuf_free(dst); 688 return 0; 689 } 690 691 static int 692 test_burst_capacity(int16_t dev_id, uint16_t vchan) 693 { 694 #define CAP_TEST_BURST_SIZE 64 695 const int ring_space = rte_dma_burst_capacity(dev_id, vchan); 696 struct rte_mbuf *src, *dst; 697 int i, j, iter; 698 int cap, ret; 699 bool dma_err; 700 701 src = rte_pktmbuf_alloc(pool); 702 dst = rte_pktmbuf_alloc(pool); 703 704 /* to test capacity, we enqueue elements and check capacity is reduced 705 * by one each time - rebaselining the expected value after each burst 706 * as the capacity is only for a burst. We enqueue multiple bursts to 707 * fill up half the ring, before emptying it again. We do this multiple 708 * times to ensure that we get to test scenarios where we get ring 709 * wrap-around and wrap-around of the ids returned (at UINT16_MAX). 710 */ 711 for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) { 712 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) { 713 cap = rte_dma_burst_capacity(dev_id, vchan); 714 715 for (j = 0; j < CAP_TEST_BURST_SIZE; j++) { 716 ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), 717 rte_pktmbuf_iova(dst), COPY_LEN, 0); 718 if (ret < 0) 719 ERR_RETURN("Error with rte_dmadev_copy\n"); 720 721 if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1)) 722 ERR_RETURN("Error, ring capacity did not change as expected\n"); 723 } 724 if (rte_dma_submit(dev_id, vchan) < 0) 725 ERR_RETURN("Error, failed to submit burst\n"); 726 727 if (cap < rte_dma_burst_capacity(dev_id, vchan)) 728 ERR_RETURN("Error, avail ring capacity has gone up, not down\n"); 729 } 730 await_hw(dev_id, vchan); 731 732 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) { 733 ret = rte_dma_completed(dev_id, vchan, 734 CAP_TEST_BURST_SIZE, NULL, &dma_err); 735 if (ret != CAP_TEST_BURST_SIZE || dma_err) { 736 enum rte_dma_status_code status; 737 738 rte_dma_completed_status(dev_id, vchan, 1, NULL, &status); 739 ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n", 740 ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status); 741 } 742 } 743 cap = rte_dma_burst_capacity(dev_id, vchan); 744 if (cap != ring_space) 745 ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n", 746 cap, ring_space); 747 } 748 749 rte_pktmbuf_free(src); 750 rte_pktmbuf_free(dst); 751 752 return 0; 753 } 754 755 static int 756 test_dmadev_instance(int16_t dev_id) 757 { 758 #define TEST_RINGSIZE 512 759 #define CHECK_ERRS true 760 struct rte_dma_stats stats; 761 struct rte_dma_info info; 762 const struct rte_dma_conf conf = { .nb_vchans = 1}; 763 const struct rte_dma_vchan_conf qconf = { 764 .direction = RTE_DMA_DIR_MEM_TO_MEM, 765 .nb_desc = TEST_RINGSIZE, 766 }; 767 const int vchan = 0; 768 int ret; 769 770 ret = rte_dma_info_get(dev_id, &info); 771 if (ret != 0) 772 ERR_RETURN("Error with rte_dma_info_get()\n"); 773 774 printf("\n### Test dmadev instance %u [%s]\n", 775 dev_id, info.dev_name); 776 777 if (info.max_vchans < 1) 778 ERR_RETURN("Error, no channels available on device id %u\n", dev_id); 779 780 if (rte_dma_configure(dev_id, &conf) != 0) 781 ERR_RETURN("Error with rte_dma_configure()\n"); 782 783 if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0) 784 ERR_RETURN("Error with queue configuration\n"); 785 786 ret = rte_dma_info_get(dev_id, &info); 787 if (ret != 0 || info.nb_vchans != 1) 788 ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id); 789 790 if (rte_dma_start(dev_id) != 0) 791 ERR_RETURN("Error with rte_dma_start()\n"); 792 793 if (rte_dma_stats_get(dev_id, vchan, &stats) != 0) 794 ERR_RETURN("Error with rte_dma_stats_get()\n"); 795 796 if (rte_dma_burst_capacity(dev_id, vchan) < 32) 797 ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests"); 798 799 if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0) 800 ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", " 801 "submitted = %"PRIu64", errors = %"PRIu64"\n", 802 stats.completed, stats.submitted, stats.errors); 803 id_count = 0; 804 805 /* create a mempool for running tests */ 806 pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL", 807 TEST_RINGSIZE * 2, /* n == num elements */ 808 32, /* cache size */ 809 0, /* priv size */ 810 2048, /* data room size */ 811 info.numa_node); 812 if (pool == NULL) 813 ERR_RETURN("Error with mempool creation\n"); 814 815 /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */ 816 if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0) 817 goto err; 818 819 /* run some burst capacity tests */ 820 if (rte_dma_burst_capacity(dev_id, vchan) < 64) 821 printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n", 822 dev_id); 823 else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0) 824 goto err; 825 826 /* to test error handling we can provide null pointers for source or dest in copies. This 827 * requires VA mode in DPDK, since NULL(0) is a valid physical address. 828 * We also need hardware that can report errors back. 829 */ 830 if (rte_eal_iova_mode() != RTE_IOVA_VA) 831 printf("DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n", dev_id); 832 else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0) 833 printf("DMA Dev %u: device does not report errors, skipping error handling tests\n", 834 dev_id); 835 else if (runtest("error handling", test_completion_handling, 1, 836 dev_id, vchan, !CHECK_ERRS) < 0) 837 goto err; 838 839 if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0) 840 printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id); 841 else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0) 842 goto err; 843 844 rte_mempool_free(pool); 845 rte_dma_stop(dev_id); 846 rte_dma_stats_reset(dev_id, vchan); 847 return 0; 848 849 err: 850 rte_mempool_free(pool); 851 rte_dma_stop(dev_id); 852 return -1; 853 } 854 855 static int 856 test_apis(void) 857 { 858 const char *pmd = "dma_skeleton"; 859 int id; 860 int ret; 861 862 /* attempt to create skeleton instance - ignore errors due to one being already present */ 863 rte_vdev_init(pmd, NULL); 864 id = rte_dma_get_dev_id_by_name(pmd); 865 if (id < 0) 866 return TEST_SKIPPED; 867 printf("\n### Test dmadev infrastructure using skeleton driver\n"); 868 ret = test_dma_api(id); 869 870 return ret; 871 } 872 873 static int 874 test_dma(void) 875 { 876 int i; 877 878 /* basic sanity on dmadev infrastructure */ 879 if (test_apis() < 0) 880 ERR_RETURN("Error performing API tests\n"); 881 882 if (rte_dma_count_avail() == 0) 883 return TEST_SKIPPED; 884 885 RTE_DMA_FOREACH_DEV(i) 886 if (test_dmadev_instance(i) < 0) 887 ERR_RETURN("Error, test failure for device %d\n", i); 888 889 return 0; 890 } 891 892 REGISTER_TEST_COMMAND(dmadev_autotest, test_dma); 893