xref: /dpdk/app/test/test_dmadev.c (revision 2aba4f16cd4e112b1c252f5244e4442ad38ca720)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_dmadev.h>
9 #include <rte_mbuf.h>
10 #include <rte_pause.h>
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_dmadev_pmd.h>
15 
16 #include "test.h"
17 #include "test_dmadev_api.h"
18 
19 #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
20 
21 #define TEST_NAME_MAX_LEN 80
22 #define TEST_RINGSIZE 512
23 #define COPY_LEN 2048
24 
25 static struct rte_dma_info info;
26 static struct rte_mempool *pool;
27 static bool check_err_stats;
28 static int16_t test_dev_id;
29 static uint16_t id_count;
30 static uint16_t vchan;
31 
32 enum {
33 	TEST_PARAM_REMOTE_ADDR = 0,
34 	TEST_PARAM_MAX,
35 };
36 
37 static const char * const dma_test_param[] = {
38 	[TEST_PARAM_REMOTE_ADDR] = "remote_addr",
39 };
40 
41 static uint64_t env_test_param[TEST_PARAM_MAX];
42 
43 enum {
44 	TEST_M2D_AUTO_FREE = 0,
45 	TEST_MAX,
46 };
47 
48 struct dma_add_test {
49 	const char *name;
50 	bool enabled;
51 };
52 
53 struct dma_add_test dma_add_test[] = {
54 	[TEST_M2D_AUTO_FREE] = {.name = "m2d_auto_free", .enabled = false},
55 };
56 
57 static void
58 __rte_format_printf(3, 4)
print_err(const char * func,int lineno,const char * format,...)59 print_err(const char *func, int lineno, const char *format, ...)
60 {
61 	va_list ap;
62 
63 	fprintf(stderr, "In %s:%d - ", func, lineno);
64 	va_start(ap, format);
65 	vfprintf(stderr, format, ap);
66 	va_end(ap);
67 }
68 
69 struct runtest_param {
70 	const char name[TEST_NAME_MAX_LEN];
71 	int (*test_fn)(int16_t dev_id, uint16_t vchan);
72 	int iterations;
73 };
74 
75 static int
runtest(const void * args)76 runtest(const void *args)
77 {
78 	int (*test_fn)(int16_t dev_id, uint16_t vchan);
79 	const struct runtest_param *param = args;
80 	struct rte_dma_stats stats;
81 	const char *printable;
82 	int iterations;
83 	int16_t dev_id;
84 	int i;
85 
86 	printable = param->name;
87 	iterations = param->iterations;
88 	test_fn = param->test_fn;
89 	dev_id = test_dev_id;
90 
91 	rte_dma_stats_reset(dev_id, vchan);
92 	printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
93 			check_err_stats ? " " : "(errors expected)");
94 	for (i = 0; i < iterations; i++) {
95 		if (test_fn(dev_id, vchan) < 0)
96 			return -1;
97 
98 		rte_dma_stats_get(dev_id, 0, &stats);
99 		printf("Ops submitted: %"PRIu64"\t", stats.submitted);
100 		printf("Ops completed: %"PRIu64"\t", stats.completed);
101 		printf("Errors: %"PRIu64"\r", stats.errors);
102 
103 		if (stats.completed != stats.submitted)
104 			ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
105 		if (check_err_stats && stats.errors != 0)
106 			ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
107 	}
108 	printf("\n");
109 	return 0;
110 }
111 
112 static void
await_hw(int16_t dev_id,uint16_t vchan)113 await_hw(int16_t dev_id, uint16_t vchan)
114 {
115 	enum rte_dma_vchan_status st;
116 
117 	if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
118 		/* for drivers that don't support this op, just sleep for 1 millisecond */
119 		rte_delay_us_sleep(1000);
120 		return;
121 	}
122 
123 	/* for those that do, *max* end time is one second from now, but all should be faster */
124 	const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
125 	while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
126 		rte_pause();
127 		rte_dma_vchan_status(dev_id, vchan, &st);
128 	}
129 }
130 
131 /* run a series of copy tests just using some different options for enqueues and completions */
132 static int
do_multi_copies(int16_t dev_id,uint16_t vchan,int split_batches,int split_completions,int use_completed_status)133 do_multi_copies(int16_t dev_id, uint16_t vchan,
134 		int split_batches,     /* submit 2 x 16 or 1 x 32 burst */
135 		int split_completions, /* gather 2 x 16 or 1 x 32 completions */
136 		int use_completed_status) /* use completed or completed_status function */
137 {
138 	struct rte_mbuf *srcs[32], *dsts[32];
139 	enum rte_dma_status_code sc[32];
140 	unsigned int i, j;
141 	bool dma_err = false;
142 
143 	/* Enqueue burst of copies and hit doorbell */
144 	for (i = 0; i < RTE_DIM(srcs); i++) {
145 		uint64_t *src_data;
146 
147 		if (split_batches && i == RTE_DIM(srcs) / 2)
148 			rte_dma_submit(dev_id, vchan);
149 
150 		srcs[i] = rte_pktmbuf_alloc(pool);
151 		dsts[i] = rte_pktmbuf_alloc(pool);
152 		if (srcs[i] == NULL || dsts[i] == NULL)
153 			ERR_RETURN("Error allocating buffers\n");
154 
155 		src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
156 		for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
157 			src_data[j] = rte_rand();
158 
159 		if (rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(srcs[i]),
160 				 rte_mbuf_data_iova(dsts[i]), COPY_LEN, 0) != id_count++)
161 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
162 	}
163 	rte_dma_submit(dev_id, vchan);
164 
165 	await_hw(dev_id, vchan);
166 
167 	if (split_completions) {
168 		/* gather completions in two halves */
169 		uint16_t half_len = RTE_DIM(srcs) / 2;
170 		int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
171 		if (ret != half_len || dma_err)
172 			ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
173 					ret, half_len, dma_err);
174 
175 		ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
176 		if (ret != half_len || dma_err)
177 			ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
178 					ret, half_len, dma_err);
179 	} else {
180 		/* gather all completions in one go, using either
181 		 * completed or completed_status fns
182 		 */
183 		if (!use_completed_status) {
184 			int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
185 			if (n != RTE_DIM(srcs) || dma_err)
186 				ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
187 						n, RTE_DIM(srcs), dma_err);
188 		} else {
189 			int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
190 			if (n != RTE_DIM(srcs))
191 				ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
192 						n, RTE_DIM(srcs));
193 
194 			for (j = 0; j < (uint16_t)n; j++)
195 				if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
196 					ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
197 							j, sc[j]);
198 		}
199 	}
200 
201 	/* check for empty */
202 	int ret = use_completed_status ?
203 			rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
204 			rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
205 	if (ret != 0)
206 		ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
207 
208 	for (i = 0; i < RTE_DIM(srcs); i++) {
209 		char *src_data, *dst_data;
210 
211 		src_data = rte_pktmbuf_mtod(srcs[i], char *);
212 		dst_data = rte_pktmbuf_mtod(dsts[i], char *);
213 		for (j = 0; j < COPY_LEN; j++)
214 			if (src_data[j] != dst_data[j])
215 				ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
216 
217 		rte_pktmbuf_free(srcs[i]);
218 		rte_pktmbuf_free(dsts[i]);
219 	}
220 	return 0;
221 }
222 
223 static int
test_single_copy(int16_t dev_id,uint16_t vchan)224 test_single_copy(int16_t dev_id, uint16_t vchan)
225 {
226 	uint16_t i;
227 	uint16_t id;
228 	enum rte_dma_status_code status;
229 	struct rte_mbuf *src, *dst;
230 	char *src_data, *dst_data;
231 
232 	src = rte_pktmbuf_alloc(pool);
233 	dst = rte_pktmbuf_alloc(pool);
234 	src_data = rte_pktmbuf_mtod(src, char *);
235 	dst_data = rte_pktmbuf_mtod(dst, char *);
236 
237 	for (i = 0; i < COPY_LEN; i++)
238 		src_data[i] = rte_rand() & 0xFF;
239 
240 	id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
241 			COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
242 	if (id != id_count)
243 		ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
244 				id, id_count);
245 
246 	/* give time for copy to finish, then check it was done */
247 	await_hw(dev_id, vchan);
248 
249 	for (i = 0; i < COPY_LEN; i++)
250 		if (dst_data[i] != src_data[i])
251 			ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
252 					dst_data[i], src_data[i]);
253 
254 	/* now check completion works */
255 	id = ~id;
256 	if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
257 		ERR_RETURN("Error with rte_dma_completed\n");
258 
259 	if (id != id_count)
260 		ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
261 				id, id_count);
262 
263 	/* check for completed and id when no job done */
264 	id = ~id;
265 	if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
266 		ERR_RETURN("Error with rte_dma_completed when no job done\n");
267 	if (id != id_count)
268 		ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
269 				id, id_count);
270 
271 	/* check for completed_status and id when no job done */
272 	id = ~id;
273 	if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
274 		ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
275 	if (id != id_count)
276 		ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
277 				id, id_count);
278 
279 	rte_pktmbuf_free(src);
280 	rte_pktmbuf_free(dst);
281 
282 	/* now check completion returns nothing more */
283 	if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
284 		ERR_RETURN("Error with rte_dma_completed in empty check\n");
285 
286 	id_count++;
287 
288 	return 0;
289 }
290 
291 static int
test_enqueue_copies(int16_t dev_id,uint16_t vchan)292 test_enqueue_copies(int16_t dev_id, uint16_t vchan)
293 {
294 	unsigned int i;
295 
296 	/* test doing a single copy */
297 	if (test_single_copy(dev_id, vchan) < 0)
298 		return -1;
299 
300 	/* test doing a multiple single copies */
301 	do {
302 		uint16_t id;
303 		const uint16_t max_ops = 4;
304 		struct rte_mbuf *src, *dst;
305 		char *src_data, *dst_data;
306 		uint16_t count;
307 
308 		src = rte_pktmbuf_alloc(pool);
309 		dst = rte_pktmbuf_alloc(pool);
310 		src_data = rte_pktmbuf_mtod(src, char *);
311 		dst_data = rte_pktmbuf_mtod(dst, char *);
312 
313 		for (i = 0; i < COPY_LEN; i++)
314 			src_data[i] = rte_rand() & 0xFF;
315 
316 		/* perform the same copy <max_ops> times */
317 		for (i = 0; i < max_ops; i++)
318 			if (rte_dma_copy(dev_id, vchan,
319 					rte_pktmbuf_iova(src),
320 					rte_pktmbuf_iova(dst),
321 					COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
322 				ERR_RETURN("Error with rte_dma_copy\n");
323 
324 		await_hw(dev_id, vchan);
325 
326 		count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
327 		if (count != max_ops)
328 			ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
329 					count, max_ops);
330 
331 		if (id != id_count - 1)
332 			ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
333 					id, id_count - 1);
334 
335 		for (i = 0; i < COPY_LEN; i++)
336 			if (dst_data[i] != src_data[i])
337 				ERR_RETURN("Data mismatch at char %u\n", i);
338 
339 		rte_pktmbuf_free(src);
340 		rte_pktmbuf_free(dst);
341 	} while (0);
342 
343 	/* test doing multiple copies */
344 	return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
345 			/* enqueue 2 batches and then complete both */
346 			|| do_multi_copies(dev_id, vchan, 1, 0, 0)
347 			/* enqueue 1 batch, then complete in two halves */
348 			|| do_multi_copies(dev_id, vchan, 0, 1, 0)
349 			/* test using completed_status in place of regular completed API */
350 			|| do_multi_copies(dev_id, vchan, 0, 0, 1);
351 }
352 
353 static int
test_stop_start(int16_t dev_id,uint16_t vchan)354 test_stop_start(int16_t dev_id, uint16_t vchan)
355 {
356 	/* device is already started on input, should be (re)started on output */
357 
358 	uint16_t id = 0;
359 	enum rte_dma_status_code status = RTE_DMA_STATUS_SUCCESSFUL;
360 
361 	/* - test stopping a device works ok,
362 	 * - then do a start-stop without doing a copy
363 	 * - finally restart the device
364 	 * checking for errors at each stage, and validating we can still copy at the end.
365 	 */
366 	if (rte_dma_stop(dev_id) < 0)
367 		ERR_RETURN("Error stopping device\n");
368 
369 	if (rte_dma_start(dev_id) < 0)
370 		ERR_RETURN("Error restarting device\n");
371 	if (rte_dma_stop(dev_id) < 0)
372 		ERR_RETURN("Error stopping device after restart (no jobs executed)\n");
373 
374 	if (rte_dma_start(dev_id) < 0)
375 		ERR_RETURN("Error restarting device after multiple stop-starts\n");
376 
377 	/* before doing a copy, we need to know what the next id will be it should
378 	 * either be:
379 	 * - the last completed job before start if driver does not reset id on stop
380 	 * - or -1 i.e. next job is 0, if driver does reset the job ids on stop
381 	 */
382 	if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
383 		ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
384 	id += 1; /* id_count is next job id */
385 	if (id != id_count && id != 0)
386 		ERR_RETURN("Unexpected next id from device after stop-start. Got %u, expected %u or 0\n",
387 				id, id_count);
388 
389 	id_count = id;
390 	if (test_single_copy(dev_id, vchan) < 0)
391 		ERR_RETURN("Error performing copy after device restart\n");
392 	return 0;
393 }
394 
395 static int
test_enqueue_sg_copies(int16_t dev_id,uint16_t vchan)396 test_enqueue_sg_copies(int16_t dev_id, uint16_t vchan)
397 {
398 	unsigned int src_len, dst_len, n_sge, len, i, j, k;
399 	char orig_src[COPY_LEN], orig_dst[COPY_LEN];
400 	struct rte_dma_info info = { 0 };
401 	enum rte_dma_status_code status;
402 	uint16_t id, n_src, n_dst;
403 
404 	if (rte_dma_info_get(dev_id, &info) < 0)
405 		ERR_RETURN("Failed to get dev info");
406 
407 	if (info.max_sges < 2)
408 		ERR_RETURN("Test needs minimum 2 SG pointers");
409 
410 	n_sge = info.max_sges;
411 
412 	for (n_src = 1; n_src <= n_sge; n_src++) {
413 		for (n_dst = 1; n_dst <= n_sge; n_dst++) {
414 			/* Normalize SG buffer lengths */
415 			len = COPY_LEN;
416 			len -= (len % (n_src * n_dst));
417 			dst_len = len / n_dst;
418 			src_len = len / n_src;
419 
420 			struct rte_dma_sge sg_src[n_sge], sg_dst[n_sge];
421 			struct rte_mbuf *src[n_sge], *dst[n_sge];
422 			char *src_data[n_sge], *dst_data[n_sge];
423 
424 			for (i = 0 ; i < len; i++)
425 				orig_src[i] = rte_rand() & 0xFF;
426 
427 			memset(orig_dst, 0, len);
428 
429 			for (i = 0; i < n_src; i++) {
430 				src[i] = rte_pktmbuf_alloc(pool);
431 				RTE_ASSERT(src[i] != NULL);
432 				sg_src[i].addr = rte_pktmbuf_iova(src[i]);
433 				sg_src[i].length = src_len;
434 				src_data[i] = rte_pktmbuf_mtod(src[i], char *);
435 			}
436 
437 			for (k = 0; k < n_dst; k++) {
438 				dst[k] = rte_pktmbuf_alloc(pool);
439 				RTE_ASSERT(dst[k] != NULL);
440 				sg_dst[k].addr = rte_pktmbuf_iova(dst[k]);
441 				sg_dst[k].length = dst_len;
442 				dst_data[k] = rte_pktmbuf_mtod(dst[k], char *);
443 			}
444 
445 			for (i = 0; i < n_src; i++) {
446 				for (j = 0; j < src_len; j++)
447 					src_data[i][j] = orig_src[i * src_len + j];
448 			}
449 
450 			for (k = 0; k < n_dst; k++)
451 				memset(dst_data[k], 0, dst_len);
452 
453 			printf("\tsrc segs: %2d [seg len: %4d] - dst segs: %2d [seg len : %4d]\n",
454 				n_src, src_len, n_dst, dst_len);
455 
456 			id = rte_dma_copy_sg(dev_id, vchan, sg_src, sg_dst, n_src, n_dst,
457 					     RTE_DMA_OP_FLAG_SUBMIT);
458 
459 			if (id != id_count)
460 				ERR_RETURN("Error with rte_dma_copy_sg, got %u, expected %u\n",
461 					id, id_count);
462 
463 			/* Give time for copy to finish, then check it was done */
464 			await_hw(dev_id, vchan);
465 
466 			for (k = 0; k < n_dst; k++)
467 				memcpy((&orig_dst[0] + k * dst_len), dst_data[k], dst_len);
468 
469 			if (memcmp(orig_src, orig_dst, COPY_LEN))
470 				ERR_RETURN("Data mismatch");
471 
472 			/* Verify completion */
473 			id = ~id;
474 			if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
475 				ERR_RETURN("Error with rte_dma_completed\n");
476 
477 			/* Verify expected index(id_count) */
478 			if (id != id_count)
479 				ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
480 						id, id_count);
481 
482 			/* Check for completed and id when no job done */
483 			id = ~id;
484 			if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
485 				ERR_RETURN("Error with rte_dma_completed when no job done\n");
486 
487 			if (id != id_count)
488 				ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
489 					   id, id_count);
490 
491 			/* Check for completed_status and id when no job done */
492 			id = ~id;
493 			if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
494 				ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
495 			if (id != id_count)
496 				ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
497 						id, 0);
498 
499 			for (i = 0; i < n_src; i++)
500 				rte_pktmbuf_free(src[i]);
501 			for (i = 0; i < n_dst; i++)
502 				rte_pktmbuf_free(dst[i]);
503 
504 			/* Verify that completion returns nothing more */
505 			if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
506 				ERR_RETURN("Error with rte_dma_completed in empty check\n");
507 
508 			id_count++;
509 		}
510 	}
511 	return 0;
512 }
513 
514 /* Failure handling test cases - global macros and variables for those tests*/
515 #define COMP_BURST_SZ	16
516 #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
517 
518 static int
test_failure_in_full_burst(int16_t dev_id,uint16_t vchan,bool fence,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)519 test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
520 		struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
521 {
522 	/* Test single full batch statuses with failures */
523 	enum rte_dma_status_code status[COMP_BURST_SZ];
524 	struct rte_dma_stats baseline, stats;
525 	uint16_t invalid_addr_id = 0;
526 	uint16_t idx;
527 	uint16_t count, status_count;
528 	unsigned int i;
529 	bool error = false;
530 	int err_count = 0;
531 
532 	rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
533 	for (i = 0; i < COMP_BURST_SZ; i++) {
534 		int id = rte_dma_copy(dev_id, vchan,
535 				      (i == fail_idx ? 0 : rte_mbuf_data_iova(srcs[i])),
536 				      rte_mbuf_data_iova(dsts[i]), COPY_LEN, OPT_FENCE(i));
537 		if (id < 0)
538 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
539 		if (i == fail_idx)
540 			invalid_addr_id = id;
541 	}
542 	rte_dma_submit(dev_id, vchan);
543 	rte_dma_stats_get(dev_id, vchan, &stats);
544 	if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
545 		ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
546 				stats.submitted, baseline.submitted + COMP_BURST_SZ);
547 
548 	await_hw(dev_id, vchan);
549 
550 	count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
551 	if (count != fail_idx)
552 		ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
553 				count, fail_idx);
554 	if (!error)
555 		ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
556 				fail_idx);
557 	if (idx != invalid_addr_id - 1)
558 		ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
559 				fail_idx, idx, invalid_addr_id - 1);
560 
561 	/* all checks ok, now verify calling completed() again always returns 0 */
562 	for (i = 0; i < 10; i++)
563 		if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
564 				|| error == false || idx != (invalid_addr_id - 1))
565 			ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
566 					fail_idx);
567 
568 	status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
569 			&idx, status);
570 	/* some HW may stop on error and be restarted after getting error status for single value
571 	 * To handle this case, if we get just one error back, wait for more completions and get
572 	 * status for rest of the burst
573 	 */
574 	if (status_count == 1) {
575 		await_hw(dev_id, vchan);
576 		status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
577 					&idx, &status[1]);
578 	}
579 	/* check that at this point we have all status values */
580 	if (status_count != COMP_BURST_SZ - count)
581 		ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
582 				fail_idx, status_count, COMP_BURST_SZ - count);
583 	/* now verify just one failure followed by multiple successful or skipped entries */
584 	if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
585 		ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
586 				fail_idx);
587 	for (i = 1; i < status_count; i++)
588 		/* after a failure in a burst, depending on ordering/fencing,
589 		 * operations may be successful or skipped because of previous error.
590 		 */
591 		if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
592 				&& status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
593 			ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
594 					fail_idx, count + i, COMP_BURST_SZ);
595 
596 	/* check the completed + errors stats are as expected */
597 	rte_dma_stats_get(dev_id, vchan, &stats);
598 	if (stats.completed != baseline.completed + COMP_BURST_SZ)
599 		ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
600 				stats.completed, baseline.completed + COMP_BURST_SZ);
601 	for (i = 0; i < status_count; i++)
602 		err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
603 	if (stats.errors != baseline.errors + err_count)
604 		ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
605 				stats.errors, baseline.errors + err_count);
606 
607 	return 0;
608 }
609 
610 static int
test_individual_status_query_with_failure(int16_t dev_id,uint16_t vchan,bool fence,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)611 test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
612 		struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
613 {
614 	/* Test gathering batch statuses one at a time */
615 	enum rte_dma_status_code status[COMP_BURST_SZ];
616 	uint16_t invalid_addr_id = 0;
617 	uint16_t idx;
618 	uint16_t count = 0, status_count = 0;
619 	unsigned int j;
620 	bool error = false;
621 
622 	for (j = 0; j < COMP_BURST_SZ; j++) {
623 		int id = rte_dma_copy(dev_id, vchan,
624 				      (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
625 				      rte_mbuf_data_iova(dsts[j]), COPY_LEN, OPT_FENCE(j));
626 		if (id < 0)
627 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
628 		if (j == fail_idx)
629 			invalid_addr_id = id;
630 	}
631 	rte_dma_submit(dev_id, vchan);
632 	await_hw(dev_id, vchan);
633 
634 	/* use regular "completed" until we hit error */
635 	while (!error) {
636 		uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
637 		count += n;
638 		if (n > 1 || count >= COMP_BURST_SZ)
639 			ERR_RETURN("Error - too many completions got\n");
640 		if (n == 0 && !error)
641 			ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
642 					count);
643 	}
644 	if (idx != invalid_addr_id - 1)
645 		ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
646 				idx, invalid_addr_id - 1);
647 
648 	/* use completed_status until we hit end of burst */
649 	while (count + status_count < COMP_BURST_SZ) {
650 		uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
651 				&status[status_count]);
652 		await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
653 		status_count += n;
654 		if (n != 1)
655 			ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
656 					n);
657 	}
658 
659 	/* check for single failure */
660 	if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
661 		ERR_RETURN("Error, unexpected successful DMA transaction\n");
662 	for (j = 1; j < status_count; j++)
663 		if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
664 				&& status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
665 			ERR_RETURN("Error, unexpected DMA error reported\n");
666 
667 	return 0;
668 }
669 
670 static int
test_single_item_status_query_with_failure(int16_t dev_id,uint16_t vchan,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)671 test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
672 		struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
673 {
674 	/* When error occurs just collect a single error using "completed_status()"
675 	 * before going to back to completed() calls
676 	 */
677 	enum rte_dma_status_code status;
678 	uint16_t invalid_addr_id = 0;
679 	uint16_t idx;
680 	uint16_t count, status_count, count2;
681 	unsigned int j;
682 	bool error = false;
683 
684 	for (j = 0; j < COMP_BURST_SZ; j++) {
685 		int id = rte_dma_copy(dev_id, vchan,
686 				      (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
687 				      rte_mbuf_data_iova(dsts[j]), COPY_LEN, 0);
688 		if (id < 0)
689 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
690 		if (j == fail_idx)
691 			invalid_addr_id = id;
692 	}
693 	rte_dma_submit(dev_id, vchan);
694 	await_hw(dev_id, vchan);
695 
696 	/* get up to the error point */
697 	count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
698 	if (count != fail_idx)
699 		ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
700 				count, fail_idx);
701 	if (!error)
702 		ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
703 				fail_idx);
704 	if (idx != invalid_addr_id - 1)
705 		ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
706 				fail_idx, idx, invalid_addr_id - 1);
707 
708 	/* get the error code */
709 	status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
710 	if (status_count != 1)
711 		ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
712 				fail_idx, status_count, COMP_BURST_SZ - count);
713 	if (status == RTE_DMA_STATUS_SUCCESSFUL)
714 		ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
715 				fail_idx);
716 
717 	/* delay in case time needed after err handled to complete other jobs */
718 	await_hw(dev_id, vchan);
719 
720 	/* get the rest of the completions without status */
721 	count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
722 	if (error == true)
723 		ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
724 				fail_idx);
725 	if (count + status_count + count2 != COMP_BURST_SZ)
726 		ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
727 				count + status_count + count2, COMP_BURST_SZ);
728 
729 	return 0;
730 }
731 
732 static int
test_multi_failure(int16_t dev_id,uint16_t vchan,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,const unsigned int * fail,size_t num_fail)733 test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
734 		const unsigned int *fail, size_t num_fail)
735 {
736 	/* test having multiple errors in one go */
737 	enum rte_dma_status_code status[COMP_BURST_SZ];
738 	unsigned int i, j;
739 	uint16_t count, err_count = 0;
740 	bool error = false;
741 
742 	/* enqueue and gather completions in one go */
743 	for (j = 0; j < COMP_BURST_SZ; j++) {
744 		uintptr_t src = rte_mbuf_data_iova(srcs[j]);
745 		/* set up for failure if the current index is anywhere is the fails array */
746 		for (i = 0; i < num_fail; i++)
747 			if (j == fail[i])
748 				src = 0;
749 
750 		int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
751 				      COPY_LEN, 0);
752 		if (id < 0)
753 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
754 	}
755 	rte_dma_submit(dev_id, vchan);
756 	await_hw(dev_id, vchan);
757 
758 	count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
759 	while (count < COMP_BURST_SZ) {
760 		await_hw(dev_id, vchan);
761 
762 		uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
763 				NULL, &status[count]);
764 		if (ret == 0)
765 			ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
766 					count, COMP_BURST_SZ);
767 		count += ret;
768 	}
769 	for (i = 0; i < count; i++)
770 		if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
771 			err_count++;
772 
773 	if (err_count != num_fail)
774 		ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
775 			err_count, num_fail);
776 
777 	/* enqueue and gather completions in bursts, but getting errors one at a time */
778 	for (j = 0; j < COMP_BURST_SZ; j++) {
779 		uintptr_t src = rte_mbuf_data_iova(srcs[j]);
780 		/* set up for failure if the current index is anywhere is the fails array */
781 		for (i = 0; i < num_fail; i++)
782 			if (j == fail[i])
783 				src = 0;
784 
785 		int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
786 				      COPY_LEN, 0);
787 		if (id < 0)
788 			ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
789 	}
790 	rte_dma_submit(dev_id, vchan);
791 	await_hw(dev_id, vchan);
792 
793 	count = 0;
794 	err_count = 0;
795 	while (count + err_count < COMP_BURST_SZ) {
796 		count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
797 		if (error) {
798 			uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
799 					NULL, status);
800 			if (ret != 1)
801 				ERR_RETURN("Error getting error-status for completions\n");
802 			err_count += ret;
803 			await_hw(dev_id, vchan);
804 		}
805 	}
806 	if (err_count != num_fail)
807 		ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
808 				err_count, num_fail);
809 
810 	return 0;
811 }
812 
813 static int
test_completion_status(int16_t dev_id,uint16_t vchan,bool fence)814 test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
815 {
816 	const unsigned int fail[] = {0, 7, 14, 15};
817 	struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
818 	unsigned int i;
819 
820 	for (i = 0; i < COMP_BURST_SZ; i++) {
821 		srcs[i] = rte_pktmbuf_alloc(pool);
822 		dsts[i] = rte_pktmbuf_alloc(pool);
823 	}
824 
825 	for (i = 0; i < RTE_DIM(fail); i++) {
826 		if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
827 			return -1;
828 
829 		if (test_individual_status_query_with_failure(dev_id, vchan, fence,
830 				srcs, dsts, fail[i]) < 0)
831 			return -1;
832 
833 		/* test is run the same fenced, or unfenced, but no harm in running it twice */
834 		if (test_single_item_status_query_with_failure(dev_id, vchan,
835 				srcs, dsts, fail[i]) < 0)
836 			return -1;
837 	}
838 
839 	if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
840 		return -1;
841 
842 	for (i = 0; i < COMP_BURST_SZ; i++) {
843 		rte_pktmbuf_free(srcs[i]);
844 		rte_pktmbuf_free(dsts[i]);
845 	}
846 	return 0;
847 }
848 
849 static int
test_completion_handling(int16_t dev_id,uint16_t vchan)850 test_completion_handling(int16_t dev_id, uint16_t vchan)
851 {
852 	return test_completion_status(dev_id, vchan, false)              /* without fences */
853 			|| test_completion_status(dev_id, vchan, true);  /* with fences */
854 }
855 
856 static int
test_enqueue_fill(int16_t dev_id,uint16_t vchan)857 test_enqueue_fill(int16_t dev_id, uint16_t vchan)
858 {
859 	const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
860 	struct rte_mbuf *dst;
861 	char *dst_data;
862 	uint64_t pattern = 0xfedcba9876543210;
863 	unsigned int i, j;
864 
865 	dst = rte_pktmbuf_alloc(pool);
866 	if (dst == NULL)
867 		ERR_RETURN("Failed to allocate mbuf\n");
868 	dst_data = rte_pktmbuf_mtod(dst, char *);
869 
870 	for (i = 0; i < RTE_DIM(lengths); i++) {
871 		/* reset dst_data */
872 		memset(dst_data, 0, rte_pktmbuf_data_len(dst));
873 
874 		/* perform the fill operation */
875 		int id = rte_dma_fill(dev_id, vchan, pattern,
876 				rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
877 		if (id < 0)
878 			ERR_RETURN("Error with rte_dma_fill\n");
879 		await_hw(dev_id, vchan);
880 
881 		if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
882 			ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
883 		/* check the data from the fill operation is correct */
884 		for (j = 0; j < lengths[i]; j++) {
885 			char pat_byte = ((char *)&pattern)[j % 8];
886 			if (dst_data[j] != pat_byte)
887 				ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
888 						lengths[i], dst_data[j], pat_byte);
889 		}
890 		/* check that the data after the fill operation was not written to */
891 		for (; j < rte_pktmbuf_data_len(dst); j++)
892 			if (dst_data[j] != 0)
893 				ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
894 						lengths[i], dst_data[j], 0);
895 	}
896 
897 	rte_pktmbuf_free(dst);
898 	return 0;
899 }
900 
901 static int
test_burst_capacity(int16_t dev_id,uint16_t vchan)902 test_burst_capacity(int16_t dev_id, uint16_t vchan)
903 {
904 #define CAP_TEST_BURST_SIZE	64
905 	const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
906 	struct rte_mbuf *src, *dst;
907 	int i, j, iter;
908 	int cap, ret;
909 	bool dma_err;
910 
911 	src = rte_pktmbuf_alloc(pool);
912 	dst = rte_pktmbuf_alloc(pool);
913 
914 	/* to test capacity, we enqueue elements and check capacity is reduced
915 	 * by one each time - rebaselining the expected value after each burst
916 	 * as the capacity is only for a burst. We enqueue multiple bursts to
917 	 * fill up half the ring, before emptying it again. We do this multiple
918 	 * times to ensure that we get to test scenarios where we get ring
919 	 * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
920 	 */
921 	for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
922 		for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
923 			cap = rte_dma_burst_capacity(dev_id, vchan);
924 
925 			for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
926 				ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
927 						rte_pktmbuf_iova(dst), COPY_LEN, 0);
928 				if (ret < 0)
929 					ERR_RETURN("Error with rte_dmadev_copy\n");
930 
931 				if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
932 					ERR_RETURN("Error, ring capacity did not change as expected\n");
933 			}
934 			if (rte_dma_submit(dev_id, vchan) < 0)
935 				ERR_RETURN("Error, failed to submit burst\n");
936 
937 			if (cap < rte_dma_burst_capacity(dev_id, vchan))
938 				ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
939 		}
940 		await_hw(dev_id, vchan);
941 
942 		for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
943 			ret = rte_dma_completed(dev_id, vchan,
944 					CAP_TEST_BURST_SIZE, NULL, &dma_err);
945 			if (ret != CAP_TEST_BURST_SIZE || dma_err) {
946 				enum rte_dma_status_code status;
947 
948 				rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
949 				ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
950 						ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
951 			}
952 		}
953 		cap = rte_dma_burst_capacity(dev_id, vchan);
954 		if (cap != ring_space)
955 			ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
956 					cap, ring_space);
957 	}
958 
959 	rte_pktmbuf_free(src);
960 	rte_pktmbuf_free(dst);
961 
962 	return 0;
963 }
964 
965 static int
test_m2d_auto_free(int16_t dev_id,uint16_t vchan)966 test_m2d_auto_free(int16_t dev_id, uint16_t vchan)
967 {
968 #define NR_MBUF 256
969 	struct rte_mempool_cache *cache;
970 	struct rte_mbuf *src[NR_MBUF];
971 	uint32_t buf_cnt1, buf_cnt2;
972 	struct rte_mempool_ops *ops;
973 	uint16_t nb_done = 0;
974 	bool dma_err = false;
975 	int retry = 100;
976 	int i, ret = 0;
977 	rte_iova_t dst;
978 
979 	dst = (rte_iova_t)env_test_param[TEST_PARAM_REMOTE_ADDR];
980 
981 	/* Capture buffer count before allocating source buffer. */
982 	cache = rte_mempool_default_cache(pool, rte_lcore_id());
983 	ops = rte_mempool_get_ops(pool->ops_index);
984 	buf_cnt1 = ops->get_count(pool) + cache->len;
985 
986 	if (rte_pktmbuf_alloc_bulk(pool, src, NR_MBUF) != 0)
987 		ERR_RETURN("alloc src mbufs failed.\n");
988 
989 	if ((buf_cnt1 - NR_MBUF) != (ops->get_count(pool) + cache->len)) {
990 		printf("Buffer count check failed.\n");
991 		ret = -1;
992 		goto done;
993 	}
994 
995 	for (i = 0; i < NR_MBUF; i++) {
996 		ret = rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(src[i]), dst,
997 				   COPY_LEN, RTE_DMA_OP_FLAG_AUTO_FREE);
998 
999 		if (ret < 0) {
1000 			printf("rte_dma_copy returned error.\n");
1001 			goto done;
1002 		}
1003 	}
1004 
1005 	rte_dma_submit(dev_id, vchan);
1006 	do {
1007 		nb_done += rte_dma_completed(dev_id, vchan, (NR_MBUF - nb_done), NULL, &dma_err);
1008 		if (dma_err)
1009 			break;
1010 		/* Sleep for 1 millisecond */
1011 		rte_delay_us_sleep(1000);
1012 	} while (retry-- && (nb_done < NR_MBUF));
1013 
1014 	buf_cnt2 = ops->get_count(pool) + cache->len;
1015 	if ((buf_cnt1 != buf_cnt2) || dma_err) {
1016 		printf("Free mem to dev buffer test failed.\n");
1017 		ret = -1;
1018 	}
1019 
1020 done:
1021 	/* If the test passes source buffer will be freed in hardware. */
1022 	if (ret < 0)
1023 		rte_pktmbuf_free_bulk(&src[nb_done], (NR_MBUF - nb_done));
1024 
1025 	return ret;
1026 }
1027 
1028 static int
prepare_m2d_auto_free(int16_t dev_id,uint16_t vchan)1029 prepare_m2d_auto_free(int16_t dev_id, uint16_t vchan)
1030 {
1031 	const struct rte_dma_vchan_conf qconf = {
1032 		.direction = RTE_DMA_DIR_MEM_TO_DEV,
1033 		.nb_desc = TEST_RINGSIZE,
1034 		.auto_free.m2d.pool = pool,
1035 		.dst_port.port_type = RTE_DMA_PORT_PCIE,
1036 		.dst_port.pcie.coreid = 0,
1037 	};
1038 
1039 	/* Stop the device to reconfigure vchan. */
1040 	if (rte_dma_stop(dev_id) < 0)
1041 		ERR_RETURN("Error stopping device %u\n", dev_id);
1042 
1043 	if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1044 		ERR_RETURN("Error with queue configuration\n");
1045 
1046 	if (rte_dma_start(dev_id) != 0)
1047 		ERR_RETURN("Error with rte_dma_start()\n");
1048 
1049 	return 0;
1050 }
1051 
1052 static int
test_dmadev_sg_copy_setup(void)1053 test_dmadev_sg_copy_setup(void)
1054 {
1055 	int ret = TEST_SUCCESS;
1056 
1057 	if ((info.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0)
1058 		return TEST_SKIPPED;
1059 
1060 	return ret;
1061 }
1062 
1063 static int
test_dmadev_burst_setup(void)1064 test_dmadev_burst_setup(void)
1065 {
1066 	if (rte_dma_burst_capacity(test_dev_id, vchan) < 64) {
1067 		RTE_LOG(ERR, USER1,
1068 			"DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
1069 			test_dev_id);
1070 		return TEST_SKIPPED;
1071 	}
1072 
1073 	return TEST_SUCCESS;
1074 }
1075 
1076 static int
test_dmadev_err_handling_setup(void)1077 test_dmadev_err_handling_setup(void)
1078 {
1079 	int ret = TEST_SKIPPED;
1080 
1081 	/* to test error handling we can provide null pointers for source or dest in copies. This
1082 	 * requires VA mode in DPDK, since NULL(0) is a valid physical address.
1083 	 * We also need hardware that can report errors back.
1084 	 */
1085 	if (rte_eal_iova_mode() != RTE_IOVA_VA)
1086 		RTE_LOG(ERR, USER1,
1087 			"DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n",
1088 			test_dev_id);
1089 	else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
1090 		RTE_LOG(ERR, USER1,
1091 			"DMA Dev %u: device does not report errors, skipping error handling tests\n",
1092 			test_dev_id);
1093 	else
1094 		ret = TEST_SUCCESS;
1095 
1096 	return ret;
1097 }
1098 
1099 static int
test_dmadev_fill_setup(void)1100 test_dmadev_fill_setup(void)
1101 {
1102 	int ret = TEST_SUCCESS;
1103 
1104 	if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0) {
1105 		RTE_LOG(ERR, USER1,
1106 			"DMA Dev %u: No device fill support, skipping fill tests\n", test_dev_id);
1107 		ret = TEST_SKIPPED;
1108 	}
1109 
1110 	return ret;
1111 }
1112 
1113 static int
test_dmadev_autofree_setup(void)1114 test_dmadev_autofree_setup(void)
1115 {
1116 	int ret = TEST_SKIPPED;
1117 
1118 	if ((info.dev_capa & RTE_DMA_CAPA_M2D_AUTO_FREE) &&
1119 	    dma_add_test[TEST_M2D_AUTO_FREE].enabled == true) {
1120 		if (prepare_m2d_auto_free(test_dev_id, vchan) != 0)
1121 			return ret;
1122 
1123 		ret = TEST_SUCCESS;
1124 	}
1125 
1126 	return ret;
1127 }
1128 
1129 static int
test_dmadev_setup(void)1130 test_dmadev_setup(void)
1131 {
1132 	int16_t dev_id = test_dev_id;
1133 	struct rte_dma_stats stats;
1134 	const struct rte_dma_conf conf = { .nb_vchans = 1};
1135 	const struct rte_dma_vchan_conf qconf = {
1136 			.direction = RTE_DMA_DIR_MEM_TO_MEM,
1137 			.nb_desc = TEST_RINGSIZE,
1138 	};
1139 	int ret;
1140 
1141 	ret = rte_dma_info_get(dev_id, &info);
1142 	if (ret != 0)
1143 		ERR_RETURN("Error with rte_dma_info_get()\n");
1144 
1145 	if (info.max_vchans < 1)
1146 		ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
1147 
1148 	if (rte_dma_configure(dev_id, &conf) != 0)
1149 		ERR_RETURN("Error with rte_dma_configure()\n");
1150 
1151 	if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1152 		ERR_RETURN("Error with queue configuration\n");
1153 
1154 	ret = rte_dma_info_get(dev_id, &info);
1155 	if (ret != 0 || info.nb_vchans != 1)
1156 		ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
1157 
1158 	if (rte_dma_start(dev_id) != 0)
1159 		ERR_RETURN("Error with rte_dma_start()\n");
1160 
1161 	if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
1162 		ERR_RETURN("Error with rte_dma_stats_get()\n");
1163 
1164 	if (rte_dma_burst_capacity(dev_id, vchan) < 32)
1165 		ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
1166 
1167 	if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
1168 		ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
1169 				"submitted = %"PRIu64", errors = %"PRIu64"\n",
1170 				stats.completed, stats.submitted, stats.errors);
1171 	id_count = 0;
1172 
1173 	/* create a mempool for running tests */
1174 	pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
1175 			TEST_RINGSIZE * 2, /* n == num elements */
1176 			32,  /* cache size */
1177 			0,   /* priv size */
1178 			COPY_LEN + RTE_PKTMBUF_HEADROOM, /* data room size */
1179 			info.numa_node);
1180 	if (pool == NULL)
1181 		ERR_RETURN("Error with mempool creation\n");
1182 
1183 	check_err_stats = false;
1184 	vchan = 0;
1185 
1186 	return 0;
1187 }
1188 
1189 static void
test_dmadev_teardown(void)1190 test_dmadev_teardown(void)
1191 {
1192 	rte_mempool_free(pool);
1193 	rte_dma_stop(test_dev_id);
1194 	rte_dma_stats_reset(test_dev_id, vchan);
1195 	test_dev_id = -EINVAL;
1196 }
1197 
1198 static int
test_dmadev_instance(int16_t dev_id)1199 test_dmadev_instance(int16_t dev_id)
1200 {
1201 	struct rte_dma_info dev_info;
1202 	enum {
1203 		  TEST_COPY = 0,
1204 		  TEST_COPY_SG,
1205 		  TEST_START,
1206 		  TEST_BURST,
1207 		  TEST_ERR,
1208 		  TEST_FILL,
1209 		  TEST_M2D,
1210 		  TEST_END
1211 	};
1212 
1213 	static struct runtest_param param[] = {
1214 		{"copy", test_enqueue_copies, 640},
1215 		{"sg_copy", test_enqueue_sg_copies, 1},
1216 		{"stop_start", test_stop_start, 1},
1217 		{"burst_capacity", test_burst_capacity, 1},
1218 		{"error_handling", test_completion_handling, 1},
1219 		{"fill", test_enqueue_fill, 1},
1220 		{"m2d_auto_free", test_m2d_auto_free, 128},
1221 	};
1222 
1223 	static struct unit_test_suite ts = {
1224 		.suite_name = "DMA dev instance testsuite",
1225 		.setup = test_dmadev_setup,
1226 		.teardown = test_dmadev_teardown,
1227 		.unit_test_cases = {
1228 			TEST_CASE_NAMED_WITH_DATA("copy",
1229 				NULL, NULL,
1230 				runtest, &param[TEST_COPY]),
1231 			TEST_CASE_NAMED_WITH_DATA("sg_copy",
1232 				test_dmadev_sg_copy_setup, NULL,
1233 				runtest, &param[TEST_COPY_SG]),
1234 			TEST_CASE_NAMED_WITH_DATA("stop_start",
1235 				NULL, NULL,
1236 				runtest, &param[TEST_START]),
1237 			TEST_CASE_NAMED_WITH_DATA("burst_capacity",
1238 				test_dmadev_burst_setup, NULL,
1239 				runtest, &param[TEST_BURST]),
1240 			TEST_CASE_NAMED_WITH_DATA("error_handling",
1241 				test_dmadev_err_handling_setup, NULL,
1242 				runtest, &param[TEST_ERR]),
1243 			TEST_CASE_NAMED_WITH_DATA("fill",
1244 				test_dmadev_fill_setup, NULL,
1245 				runtest, &param[TEST_FILL]),
1246 			TEST_CASE_NAMED_WITH_DATA("m2d_autofree",
1247 				test_dmadev_autofree_setup, NULL,
1248 				runtest, &param[TEST_M2D]),
1249 			TEST_CASES_END()
1250 		}
1251 	};
1252 
1253 	int ret;
1254 
1255 	if (rte_dma_info_get(dev_id, &dev_info) < 0)
1256 		return TEST_SKIPPED;
1257 
1258 	test_dev_id = dev_id;
1259 	printf("\n### Test dmadev instance %u [%s]\n",
1260 		   test_dev_id, dev_info.dev_name);
1261 
1262 	ret = unit_test_suite_runner(&ts);
1263 	test_dev_id = -EINVAL;
1264 
1265 	return ret;
1266 }
1267 
1268 static void
parse_dma_env_var(void)1269 parse_dma_env_var(void)
1270 {
1271 	char *dma_env_param_str = getenv("DPDK_ADD_DMA_TEST_PARAM");
1272 	char *dma_env_test_str = getenv("DPDK_ADD_DMA_TEST");
1273 	char *params[32] = {0};
1274 	char *tests[32] = {0};
1275 	char *var[2] = {0};
1276 	int n_var = 0;
1277 	int i, j;
1278 
1279 	/* Additional test from commandline. */
1280 	if (dma_env_test_str && strlen(dma_env_test_str) > 0) {
1281 		n_var = rte_strsplit(dma_env_test_str, strlen(dma_env_test_str), tests,
1282 				RTE_DIM(tests), ',');
1283 		for (i = 0; i < n_var; i++) {
1284 			for (j = 0; j < TEST_MAX; j++) {
1285 				if (!strcmp(tests[i], dma_add_test[j].name))
1286 					dma_add_test[j].enabled = true;
1287 			}
1288 		}
1289 	}
1290 
1291 	/* Commandline variables for test */
1292 	if (dma_env_param_str && strlen(dma_env_param_str) > 0) {
1293 		n_var = rte_strsplit(dma_env_param_str, strlen(dma_env_param_str), params,
1294 				       RTE_DIM(params), ',');
1295 		for (i = 0; i < n_var; i++) {
1296 			rte_strsplit(params[i], strlen(params[i]), var,	RTE_DIM(var), '=');
1297 			for (j = 0; j < TEST_PARAM_MAX; j++) {
1298 				if (!strcmp(var[0], dma_test_param[j]))
1299 					env_test_param[j] = strtoul(var[1], NULL, 16);
1300 			}
1301 		}
1302 	}
1303 }
1304 
1305 static int
test_dma(void)1306 test_dma(void)
1307 {
1308 	const char *pmd = "dma_skeleton";
1309 	int i;
1310 
1311 	parse_dma_env_var();
1312 
1313 	/* attempt to create skeleton instance - ignore errors due to one being already present*/
1314 	rte_vdev_init(pmd, NULL);
1315 
1316 	if (rte_dma_count_avail() == 0)
1317 		return TEST_SKIPPED;
1318 
1319 	RTE_DMA_FOREACH_DEV(i) {
1320 		if (test_dma_api(i) < 0)
1321 			ERR_RETURN("Error performing API tests\n");
1322 
1323 		if (test_dmadev_instance(i) < 0)
1324 			ERR_RETURN("Error, test failure for device %d\n", i);
1325 	}
1326 
1327 	return 0;
1328 }
1329 
1330 REGISTER_DRIVER_TEST(dmadev_autotest, test_dma);
1331