xref: /dpdk/app/test/test_event_crypto_adapter.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <rte_common.h>
8 #include <rte_mempool.h>
9 #include <rte_mbuf.h>
10 #include <rte_cryptodev.h>
11 #include <rte_eventdev.h>
12 #include <rte_bus_vdev.h>
13 #include <rte_service.h>
14 #include <rte_event_crypto_adapter.h>
15 #include "test.h"
16 
17 #define PKT_TRACE                  0
18 #define NUM                        1
19 #define DEFAULT_NUM_XFORMS        (2)
20 #define NUM_MBUFS                 (8191)
21 #define MBUF_CACHE_SIZE           (256)
22 #define MAXIMUM_IV_LENGTH         (16)
23 #define DEFAULT_NUM_OPS_INFLIGHT  (128)
24 #define MAX_NB_SESSIONS            4
25 #define TEST_APP_PORT_ID           0
26 #define TEST_APP_EV_QUEUE_ID       0
27 #define TEST_APP_EV_PRIORITY       0
28 #define TEST_APP_EV_FLOWID         0xAABB
29 #define TEST_CRYPTO_EV_QUEUE_ID    1
30 #define TEST_ADAPTER_ID            0
31 #define TEST_CDEV_ID               0
32 #define TEST_CDEV_QP_ID            0
33 #define PACKET_LENGTH              64
34 #define NB_TEST_PORTS              1
35 #define NB_TEST_QUEUES             2
36 #define NUM_CORES                  1
37 #define CRYPTODEV_NAME_NULL_PMD    crypto_null
38 
39 #define MBUF_SIZE              (sizeof(struct rte_mbuf) + \
40 				RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
41 #define IV_OFFSET              (sizeof(struct rte_crypto_op) + \
42 				sizeof(struct rte_crypto_sym_op) + \
43 				DEFAULT_NUM_XFORMS * \
44 				sizeof(struct rte_crypto_sym_xform))
45 
46 /* Handle log statements in same manner as test macros */
47 #define LOG_DBG(...)    RTE_LOG(DEBUG, EAL, __VA_ARGS__)
48 
49 static const uint8_t text_64B[] = {
50 	0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50,
51 	0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0,
52 	0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36,
53 	0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e,
54 	0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c,
55 	0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e,
56 	0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6,
57 	0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c
58 };
59 
60 struct event_crypto_adapter_test_params {
61 	struct rte_mempool *mbuf_pool;
62 	struct rte_mempool *op_mpool;
63 	struct rte_mempool *session_mpool;
64 	struct rte_mempool *session_priv_mpool;
65 	struct rte_cryptodev_config *config;
66 	uint8_t crypto_event_port_id;
67 };
68 
69 struct rte_event response_info = {
70 	.queue_id = TEST_APP_EV_QUEUE_ID,
71 	.sched_type = RTE_SCHED_TYPE_ATOMIC,
72 	.flow_id = TEST_APP_EV_FLOWID,
73 	.priority = TEST_APP_EV_PRIORITY
74 };
75 
76 struct rte_event_crypto_request request_info = {
77 	.cdev_id = TEST_CDEV_ID,
78 	.queue_pair_id = TEST_CDEV_QP_ID
79 };
80 
81 static struct event_crypto_adapter_test_params params;
82 static uint8_t crypto_adapter_setup_done;
83 static uint32_t slcore_id;
84 static int evdev;
85 
86 static struct rte_mbuf *
87 alloc_fill_mbuf(struct rte_mempool *mpool, const uint8_t *data,
88 		size_t len, uint8_t blocksize)
89 {
90 	struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
91 	size_t t_len = len - (blocksize ? (len % blocksize) : 0);
92 
93 	if (m) {
94 		char *dst = rte_pktmbuf_append(m, t_len);
95 
96 		if (!dst) {
97 			rte_pktmbuf_free(m);
98 			return NULL;
99 		}
100 
101 		rte_memcpy(dst, (const void *)data, t_len);
102 	}
103 	return m;
104 }
105 
106 static int
107 send_recv_ev(struct rte_event *ev)
108 {
109 	struct rte_crypto_op *op;
110 	struct rte_event recv_ev;
111 	int ret;
112 
113 	ret = rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, ev, NUM);
114 	TEST_ASSERT_EQUAL(ret, NUM,
115 			  "Failed to send event to crypto adapter\n");
116 
117 	while (rte_event_dequeue_burst(evdev,
118 			TEST_APP_PORT_ID, &recv_ev, NUM, 0) == 0)
119 		rte_pause();
120 
121 	op = recv_ev.event_ptr;
122 #if PKT_TRACE
123 	struct rte_mbuf *m = op->sym->m_src;
124 	rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
125 #endif
126 	rte_pktmbuf_free(op->sym->m_src);
127 	rte_crypto_op_free(op);
128 
129 	return TEST_SUCCESS;
130 }
131 
132 static int
133 test_crypto_adapter_stats(void)
134 {
135 	struct rte_event_crypto_adapter_stats stats;
136 
137 	rte_event_crypto_adapter_stats_get(TEST_ADAPTER_ID, &stats);
138 	printf(" +------------------------------------------------------+\n");
139 	printf(" + Crypto adapter stats for instance %u:\n", TEST_ADAPTER_ID);
140 	printf(" + Event port poll count          %" PRIx64 "\n",
141 		stats.event_poll_count);
142 	printf(" + Event dequeue count            %" PRIx64 "\n",
143 		stats.event_deq_count);
144 	printf(" + Cryptodev enqueue count        %" PRIx64 "\n",
145 		stats.crypto_enq_count);
146 	printf(" + Cryptodev enqueue failed count %" PRIx64 "\n",
147 		stats.crypto_enq_fail);
148 	printf(" + Cryptodev dequeue count        %" PRIx64 "\n",
149 		stats.crypto_deq_count);
150 	printf(" + Event enqueue count            %" PRIx64 "\n",
151 		stats.event_enq_count);
152 	printf(" + Event enqueue retry count      %" PRIx64 "\n",
153 		stats.event_enq_retry_count);
154 	printf(" + Event enqueue fail count       %" PRIx64 "\n",
155 		stats.event_enq_fail_count);
156 	printf(" +------------------------------------------------------+\n");
157 
158 	rte_event_crypto_adapter_stats_reset(TEST_ADAPTER_ID);
159 	return TEST_SUCCESS;
160 }
161 
162 static int
163 test_op_forward_mode(uint8_t session_less)
164 {
165 	struct rte_crypto_sym_xform cipher_xform;
166 	struct rte_cryptodev_sym_session *sess;
167 	union rte_event_crypto_metadata m_data;
168 	struct rte_crypto_sym_op *sym_op;
169 	struct rte_crypto_op *op;
170 	struct rte_mbuf *m;
171 	struct rte_event ev;
172 	uint32_t cap;
173 	int ret;
174 	uint8_t cipher_key[17];
175 
176 	memset(&m_data, 0, sizeof(m_data));
177 
178 	m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
179 	TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
180 #if PKT_TRACE
181 	rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
182 #endif
183 	/* Setup Cipher Parameters */
184 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
185 	cipher_xform.next = NULL;
186 
187 	cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
188 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
189 
190 	cipher_xform.cipher.key.data = cipher_key;
191 	cipher_xform.cipher.key.length = 16;
192 	cipher_xform.cipher.iv.offset = IV_OFFSET;
193 	cipher_xform.cipher.iv.length = 16;
194 
195 	op = rte_crypto_op_alloc(params.op_mpool,
196 			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
197 	TEST_ASSERT_NOT_NULL(op,
198 		"Failed to allocate symmetric crypto operation struct\n");
199 
200 	sym_op = op->sym;
201 
202 	if (!session_less) {
203 		sess = rte_cryptodev_sym_session_create(
204 				params.session_mpool);
205 		TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
206 
207 		/* Create Crypto session*/
208 		ret = rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
209 				&cipher_xform, params.session_priv_mpool);
210 		TEST_ASSERT_SUCCESS(ret, "Failed to init session\n");
211 
212 		ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
213 							evdev, &cap);
214 		TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
215 
216 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
217 			/* Fill in private user data information */
218 			rte_memcpy(&m_data.response_info, &response_info,
219 				sizeof(response_info));
220 			rte_memcpy(&m_data.request_info, &request_info,
221 				sizeof(request_info));
222 			rte_cryptodev_sym_session_set_user_data(sess,
223 						&m_data, sizeof(m_data));
224 		}
225 
226 		rte_crypto_op_attach_sym_session(op, sess);
227 	} else {
228 		struct rte_crypto_sym_xform *first_xform;
229 
230 		rte_crypto_op_sym_xforms_alloc(op, NUM);
231 		op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
232 		first_xform = &cipher_xform;
233 		sym_op->xform = first_xform;
234 		uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
235 				(sizeof(struct rte_crypto_sym_xform) * 2);
236 		op->private_data_offset = len;
237 		/* Fill in private data information */
238 		rte_memcpy(&m_data.response_info, &response_info,
239 			   sizeof(response_info));
240 		rte_memcpy(&m_data.request_info, &request_info,
241 			   sizeof(request_info));
242 		rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
243 	}
244 
245 	sym_op->m_src = m;
246 	sym_op->cipher.data.offset = 0;
247 	sym_op->cipher.data.length = PACKET_LENGTH;
248 
249 	/* Fill in event info and update event_ptr with rte_crypto_op */
250 	memset(&ev, 0, sizeof(ev));
251 	ev.queue_id = TEST_CRYPTO_EV_QUEUE_ID;
252 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
253 	ev.flow_id = 0xAABB;
254 	ev.event_ptr = op;
255 
256 	ret = send_recv_ev(&ev);
257 	TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to "
258 				"crypto adapter\n");
259 
260 	test_crypto_adapter_stats();
261 
262 	return TEST_SUCCESS;
263 }
264 
265 static int
266 map_adapter_service_core(void)
267 {
268 	uint32_t adapter_service_id;
269 	int ret;
270 
271 	if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
272 						&adapter_service_id) == 0) {
273 		uint32_t core_list[NUM_CORES];
274 
275 		ret = rte_service_lcore_list(core_list, NUM_CORES);
276 		TEST_ASSERT(ret >= 0, "Failed to get service core list!");
277 
278 		if (core_list[0] != slcore_id) {
279 			TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
280 						"Failed to add service core");
281 			TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
282 						"Failed to start service core");
283 		}
284 
285 		TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
286 					adapter_service_id, slcore_id, 1),
287 					"Failed to map adapter service");
288 	}
289 
290 	return TEST_SUCCESS;
291 }
292 
293 static int
294 test_sessionless_with_op_forward_mode(void)
295 {
296 	uint32_t cap;
297 	int ret;
298 
299 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
300 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
301 
302 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
303 	    !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
304 		map_adapter_service_core();
305 	else {
306 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
307 			return TEST_SKIPPED;
308 	}
309 
310 	TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
311 				"Failed to start event crypto adapter");
312 
313 	ret = test_op_forward_mode(1);
314 	TEST_ASSERT_SUCCESS(ret, "Sessionless - FORWARD mode test failed\n");
315 	return TEST_SUCCESS;
316 }
317 
318 static int
319 test_session_with_op_forward_mode(void)
320 {
321 	uint32_t cap;
322 	int ret;
323 
324 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
325 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
326 
327 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
328 	    !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
329 		map_adapter_service_core();
330 	else {
331 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
332 			return TEST_SKIPPED;
333 	}
334 
335 	TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID
336 				), "Failed to start event crypto adapter");
337 
338 	ret = test_op_forward_mode(0);
339 	TEST_ASSERT_SUCCESS(ret, "Session based - FORWARD mode test failed\n");
340 	return TEST_SUCCESS;
341 }
342 
343 static int
344 send_op_recv_ev(struct rte_crypto_op *op)
345 {
346 	struct rte_crypto_op *recv_op;
347 	struct rte_event ev;
348 	int ret;
349 
350 	ret = rte_cryptodev_enqueue_burst(TEST_CDEV_ID, TEST_CDEV_QP_ID,
351 					  &op, NUM);
352 	TEST_ASSERT_EQUAL(ret, NUM, "Failed to enqueue to cryptodev\n");
353 	memset(&ev, 0, sizeof(ev));
354 
355 	while (rte_event_dequeue_burst(evdev,
356 		TEST_APP_PORT_ID, &ev, NUM, 0) == 0)
357 		rte_pause();
358 
359 	recv_op = ev.event_ptr;
360 #if PKT_TRACE
361 	struct rte_mbuf *m = recv_op->sym->m_src;
362 	rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
363 #endif
364 	rte_pktmbuf_free(recv_op->sym->m_src);
365 	rte_crypto_op_free(recv_op);
366 
367 	return TEST_SUCCESS;
368 }
369 
370 static int
371 test_op_new_mode(uint8_t session_less)
372 {
373 	struct rte_crypto_sym_xform cipher_xform;
374 	struct rte_cryptodev_sym_session *sess;
375 	union rte_event_crypto_metadata m_data;
376 	struct rte_crypto_sym_op *sym_op;
377 	struct rte_crypto_op *op;
378 	struct rte_mbuf *m;
379 	uint32_t cap;
380 	int ret;
381 	uint8_t cipher_key[17];
382 
383 	memset(&m_data, 0, sizeof(m_data));
384 
385 	m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
386 	TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
387 #if PKT_TRACE
388 	rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
389 #endif
390 	/* Setup Cipher Parameters */
391 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
392 	cipher_xform.next = NULL;
393 
394 	cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
395 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
396 
397 	cipher_xform.cipher.key.data = cipher_key;
398 	cipher_xform.cipher.key.length = 16;
399 	cipher_xform.cipher.iv.offset = IV_OFFSET;
400 	cipher_xform.cipher.iv.length = 16;
401 
402 	op = rte_crypto_op_alloc(params.op_mpool,
403 			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
404 	TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n");
405 
406 	sym_op = op->sym;
407 
408 	if (!session_less) {
409 		sess = rte_cryptodev_sym_session_create(
410 				params.session_mpool);
411 		TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
412 
413 		ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
414 							evdev, &cap);
415 		TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
416 
417 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
418 			/* Fill in private user data information */
419 			rte_memcpy(&m_data.response_info, &response_info,
420 				   sizeof(m_data));
421 			rte_cryptodev_sym_session_set_user_data(sess,
422 						&m_data, sizeof(m_data));
423 		}
424 		ret = rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
425 				&cipher_xform, params.session_priv_mpool);
426 		TEST_ASSERT_SUCCESS(ret, "Failed to init session\n");
427 
428 		rte_crypto_op_attach_sym_session(op, sess);
429 	} else {
430 		struct rte_crypto_sym_xform *first_xform;
431 
432 		rte_crypto_op_sym_xforms_alloc(op, NUM);
433 		op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
434 		first_xform = &cipher_xform;
435 		sym_op->xform = first_xform;
436 		uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
437 				(sizeof(struct rte_crypto_sym_xform) * 2);
438 		op->private_data_offset = len;
439 		/* Fill in private data information */
440 		rte_memcpy(&m_data.response_info, &response_info,
441 			   sizeof(m_data));
442 		rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
443 	}
444 
445 	sym_op->m_src = m;
446 	sym_op->cipher.data.offset = 0;
447 	sym_op->cipher.data.length = PACKET_LENGTH;
448 
449 	ret = send_op_recv_ev(op);
450 	TEST_ASSERT_SUCCESS(ret, "Failed to enqueue op to cryptodev\n");
451 
452 	test_crypto_adapter_stats();
453 
454 	return TEST_SUCCESS;
455 }
456 
457 static int
458 test_sessionless_with_op_new_mode(void)
459 {
460 	uint32_t cap;
461 	int ret;
462 
463 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
464 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
465 
466 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
467 	    !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
468 		map_adapter_service_core();
469 	else {
470 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
471 			return TEST_SKIPPED;
472 	}
473 
474 	/* start the event crypto adapter */
475 	TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
476 				"Failed to start event crypto adapter");
477 
478 	ret = test_op_new_mode(1);
479 	TEST_ASSERT_SUCCESS(ret, "Sessionless - NEW mode test failed\n");
480 	return TEST_SUCCESS;
481 }
482 
483 static int
484 test_session_with_op_new_mode(void)
485 {
486 	uint32_t cap;
487 	int ret;
488 
489 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
490 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
491 
492 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
493 	    !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
494 		map_adapter_service_core();
495 	else {
496 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
497 			return TEST_SKIPPED;
498 	}
499 
500 	TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
501 				"Failed to start event crypto adapter");
502 
503 	ret = test_op_new_mode(0);
504 	TEST_ASSERT_SUCCESS(ret, "Session based - NEW mode test failed\n");
505 	return TEST_SUCCESS;
506 }
507 
508 static int
509 configure_cryptodev(void)
510 {
511 	struct rte_cryptodev_qp_conf qp_conf;
512 	struct rte_cryptodev_config conf;
513 	struct rte_cryptodev_info info;
514 	unsigned int session_size;
515 	uint8_t nb_devs;
516 	int ret;
517 
518 	params.mbuf_pool = rte_pktmbuf_pool_create(
519 			"CRYPTO_ADAPTER_MBUFPOOL",
520 			NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
521 			rte_socket_id());
522 	if (params.mbuf_pool == NULL) {
523 		RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n");
524 		return TEST_FAILED;
525 	}
526 
527 	params.op_mpool = rte_crypto_op_pool_create(
528 			"EVENT_CRYPTO_SYM_OP_POOL",
529 			RTE_CRYPTO_OP_TYPE_SYMMETRIC,
530 			NUM_MBUFS, MBUF_CACHE_SIZE,
531 			DEFAULT_NUM_XFORMS *
532 			sizeof(struct rte_crypto_sym_xform) +
533 			MAXIMUM_IV_LENGTH,
534 			rte_socket_id());
535 	if (params.op_mpool == NULL) {
536 		RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
537 		return TEST_FAILED;
538 	}
539 
540 	/* Create a NULL crypto device */
541 	nb_devs = rte_cryptodev_device_count_by_driver(
542 			rte_cryptodev_driver_id_get(
543 			RTE_STR(CRYPTODEV_NAME_NULL_PMD)));
544 	if (!nb_devs) {
545 		ret = rte_vdev_init(
546 			RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
547 
548 		TEST_ASSERT(ret == 0, "Failed to create pmd:%s instance\n",
549 			    RTE_STR(CRYPTODEV_NAME_NULL_PMD));
550 	}
551 
552 	nb_devs = rte_cryptodev_count();
553 	if (!nb_devs) {
554 		RTE_LOG(ERR, USER1, "No crypto devices found!\n");
555 		return TEST_FAILED;
556 	}
557 
558 	/*
559 	 * Create mempool with maximum number of sessions * 2,
560 	 * to include the session headers & private data
561 	 */
562 	session_size = rte_cryptodev_sym_get_private_session_size(TEST_CDEV_ID);
563 	session_size += sizeof(union rte_event_crypto_metadata);
564 
565 	params.session_mpool = rte_cryptodev_sym_session_pool_create(
566 			"CRYPTO_ADAPTER_SESSION_MP",
567 			MAX_NB_SESSIONS, 0, 0, 0, SOCKET_ID_ANY);
568 	TEST_ASSERT_NOT_NULL(params.session_mpool,
569 			"session mempool allocation failed\n");
570 
571 	params.session_priv_mpool = rte_mempool_create(
572 				"CRYPTO_AD_SESS_MP_PRIV",
573 				MAX_NB_SESSIONS,
574 				session_size,
575 				0, 0, NULL, NULL, NULL,
576 				NULL, SOCKET_ID_ANY,
577 				0);
578 	TEST_ASSERT_NOT_NULL(params.session_priv_mpool,
579 			"session mempool allocation failed\n");
580 
581 	rte_cryptodev_info_get(TEST_CDEV_ID, &info);
582 	conf.nb_queue_pairs = info.max_nb_queue_pairs;
583 	conf.socket_id = SOCKET_ID_ANY;
584 	conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
585 
586 	TEST_ASSERT_SUCCESS(rte_cryptodev_configure(TEST_CDEV_ID, &conf),
587 			"Failed to configure cryptodev %u with %u qps\n",
588 			TEST_CDEV_ID, conf.nb_queue_pairs);
589 
590 	qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT;
591 	qp_conf.mp_session = params.session_mpool;
592 	qp_conf.mp_session_private = params.session_priv_mpool;
593 
594 	TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
595 			TEST_CDEV_ID, TEST_CDEV_QP_ID, &qp_conf,
596 			rte_cryptodev_socket_id(TEST_CDEV_ID)),
597 			"Failed to setup queue pair %u on cryptodev %u\n",
598 			TEST_CDEV_QP_ID, TEST_CDEV_ID);
599 
600 	return TEST_SUCCESS;
601 }
602 
603 static inline void
604 evdev_set_conf_values(struct rte_event_dev_config *dev_conf,
605 			struct rte_event_dev_info *info)
606 {
607 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
608 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
609 	dev_conf->nb_event_ports = NB_TEST_PORTS;
610 	dev_conf->nb_event_queues = NB_TEST_QUEUES;
611 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
612 	dev_conf->nb_event_port_dequeue_depth =
613 			info->max_event_port_dequeue_depth;
614 	dev_conf->nb_event_port_enqueue_depth =
615 			info->max_event_port_enqueue_depth;
616 	dev_conf->nb_event_port_enqueue_depth =
617 			info->max_event_port_enqueue_depth;
618 	dev_conf->nb_events_limit =
619 			info->max_num_events;
620 }
621 
622 static int
623 configure_eventdev(void)
624 {
625 	struct rte_event_queue_conf queue_conf;
626 	struct rte_event_dev_config devconf;
627 	struct rte_event_dev_info info;
628 	uint32_t queue_count;
629 	uint32_t port_count;
630 	int ret;
631 	uint8_t qid;
632 
633 	if (!rte_event_dev_count()) {
634 		/* If there is no hardware eventdev, or no software vdev was
635 		 * specified on the command line, create an instance of
636 		 * event_sw.
637 		 */
638 		LOG_DBG("Failed to find a valid event device... "
639 			"testing with event_sw device\n");
640 		TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
641 					"Error creating eventdev");
642 		evdev = rte_event_dev_get_dev_id("event_sw0");
643 	}
644 
645 	ret = rte_event_dev_info_get(evdev, &info);
646 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
647 
648 	evdev_set_conf_values(&devconf, &info);
649 
650 	ret = rte_event_dev_configure(evdev, &devconf);
651 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
652 
653 	/* Set up event queue */
654 	ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
655 					&queue_count);
656 	TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
657 	TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
658 
659 	qid = TEST_APP_EV_QUEUE_ID;
660 	ret = rte_event_queue_setup(evdev, qid, NULL);
661 	TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
662 
663 	queue_conf.nb_atomic_flows = info.max_event_queue_flows;
664 	queue_conf.nb_atomic_order_sequences = 32;
665 	queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
666 	queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
667 	queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
668 
669 	qid = TEST_CRYPTO_EV_QUEUE_ID;
670 	ret = rte_event_queue_setup(evdev, qid, &queue_conf);
671 	TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
672 
673 	/* Set up event port */
674 	ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
675 					&port_count);
676 	TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
677 	TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
678 
679 	ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
680 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
681 			    TEST_APP_PORT_ID);
682 
683 	qid = TEST_APP_EV_QUEUE_ID;
684 	ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
685 	TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
686 		    TEST_APP_PORT_ID);
687 
688 	return TEST_SUCCESS;
689 }
690 
691 static void
692 test_crypto_adapter_free(void)
693 {
694 	rte_event_crypto_adapter_free(TEST_ADAPTER_ID);
695 }
696 
697 static int
698 test_crypto_adapter_create(void)
699 {
700 	struct rte_event_port_conf conf = {
701 		.dequeue_depth = 8,
702 		.enqueue_depth = 8,
703 		.new_event_threshold = 1200,
704 	};
705 	int ret;
706 
707 	/* Create adapter with default port creation callback */
708 	ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
709 					      TEST_CDEV_ID,
710 					      &conf, 0);
711 	TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
712 
713 	return TEST_SUCCESS;
714 }
715 
716 static int
717 test_crypto_adapter_qp_add_del(void)
718 {
719 	uint32_t cap;
720 	int ret;
721 
722 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
723 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
724 
725 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
726 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
727 				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
728 	} else
729 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
730 					TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
731 
732 	TEST_ASSERT_SUCCESS(ret, "Failed to create add queue pair\n");
733 
734 	ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID,
735 					TEST_CDEV_ID, TEST_CDEV_QP_ID);
736 	TEST_ASSERT_SUCCESS(ret, "Failed to delete add queue pair\n");
737 
738 	return TEST_SUCCESS;
739 }
740 
741 static int
742 configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
743 {
744 	struct rte_event_port_conf conf = {
745 		.dequeue_depth = 8,
746 		.enqueue_depth = 8,
747 		.new_event_threshold = 1200,
748 	};
749 
750 	uint32_t cap;
751 	int ret;
752 
753 	/* Create adapter with default port creation callback */
754 	ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
755 					      TEST_CDEV_ID,
756 					      &conf, mode);
757 	TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
758 
759 	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
760 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
761 
762 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
763 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
764 				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
765 	} else
766 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
767 				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
768 
769 	TEST_ASSERT_SUCCESS(ret, "Failed to add queue pair\n");
770 
771 	ret = rte_event_crypto_adapter_event_port_get(TEST_ADAPTER_ID,
772 				&params.crypto_event_port_id);
773 	TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
774 
775 	return TEST_SUCCESS;
776 }
777 
778 static void
779 test_crypto_adapter_stop(void)
780 {
781 	uint32_t evdev_service_id, adapter_service_id;
782 
783 	/* retrieve service ids & stop services */
784 	if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
785 						&adapter_service_id) == 0) {
786 		rte_service_runstate_set(adapter_service_id, 0);
787 		rte_service_lcore_stop(slcore_id);
788 		rte_service_lcore_del(slcore_id);
789 		rte_event_crypto_adapter_stop(TEST_ADAPTER_ID);
790 	}
791 
792 	if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
793 		rte_service_runstate_set(evdev_service_id, 0);
794 		rte_service_lcore_stop(slcore_id);
795 		rte_service_lcore_del(slcore_id);
796 		rte_event_dev_stop(evdev);
797 	}
798 }
799 
800 static int
801 test_crypto_adapter_conf(enum rte_event_crypto_adapter_mode mode)
802 {
803 	uint32_t evdev_service_id;
804 	uint8_t qid;
805 	int ret;
806 
807 	if (!crypto_adapter_setup_done) {
808 		ret = configure_event_crypto_adapter(mode);
809 		if (!ret) {
810 			qid = TEST_CRYPTO_EV_QUEUE_ID;
811 			ret = rte_event_port_link(evdev,
812 				params.crypto_event_port_id, &qid, NULL, 1);
813 			TEST_ASSERT(ret >= 0, "Failed to link queue %d "
814 					"port=%u\n", qid,
815 					params.crypto_event_port_id);
816 		}
817 		crypto_adapter_setup_done = 1;
818 	}
819 
820 	/* retrieve service ids */
821 	if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
822 		/* add a service core and start it */
823 		TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
824 					"Failed to add service core");
825 		TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
826 					"Failed to start service core");
827 
828 		/* map services to it */
829 		TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
830 				slcore_id, 1), "Failed to map evdev service");
831 
832 		/* set services to running */
833 		TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
834 					1), "Failed to start evdev service");
835 	}
836 
837 	/* start the eventdev */
838 	TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
839 				"Failed to start event device");
840 
841 	return TEST_SUCCESS;
842 }
843 
844 static int
845 test_crypto_adapter_conf_op_forward_mode(void)
846 {
847 	enum rte_event_crypto_adapter_mode mode;
848 
849 	mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD;
850 	TEST_ASSERT_SUCCESS(test_crypto_adapter_conf(mode),
851 				"Failed to config crypto adapter");
852 
853 	return TEST_SUCCESS;
854 }
855 
856 static int
857 test_crypto_adapter_conf_op_new_mode(void)
858 {
859 	enum rte_event_crypto_adapter_mode mode;
860 
861 	mode = RTE_EVENT_CRYPTO_ADAPTER_OP_NEW;
862 	TEST_ASSERT_SUCCESS(test_crypto_adapter_conf(mode),
863 				"Failed to config crypto adapter");
864 
865 	return TEST_SUCCESS;
866 }
867 
868 
869 static int
870 testsuite_setup(void)
871 {
872 	int ret;
873 
874 	slcore_id = rte_get_next_lcore(-1, 1, 0);
875 	TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
876 			"are required to run this autotest\n");
877 
878 	/* Setup and start event device. */
879 	ret = configure_eventdev();
880 	TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
881 
882 	/* Setup and start crypto device. */
883 	ret = configure_cryptodev();
884 	TEST_ASSERT_SUCCESS(ret, "cryptodev initialization failed\n");
885 
886 	return TEST_SUCCESS;
887 }
888 
889 static void
890 crypto_teardown(void)
891 {
892 	/* Free mbuf mempool */
893 	if (params.mbuf_pool != NULL) {
894 		RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_MBUFPOOL count %u\n",
895 		rte_mempool_avail_count(params.mbuf_pool));
896 		rte_mempool_free(params.mbuf_pool);
897 		params.mbuf_pool = NULL;
898 	}
899 
900 	/* Free session mempool */
901 	if (params.session_mpool != NULL) {
902 		RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_SESSION_MP count %u\n",
903 		rte_mempool_avail_count(params.session_mpool));
904 		rte_mempool_free(params.session_mpool);
905 		params.session_mpool = NULL;
906 	}
907 	if (params.session_priv_mpool != NULL) {
908 		rte_mempool_avail_count(params.session_priv_mpool);
909 		rte_mempool_free(params.session_priv_mpool);
910 		params.session_priv_mpool = NULL;
911 	}
912 
913 	/* Free ops mempool */
914 	if (params.op_mpool != NULL) {
915 		RTE_LOG(DEBUG, USER1, "EVENT_CRYPTO_SYM_OP_POOL count %u\n",
916 		rte_mempool_avail_count(params.op_mpool));
917 		rte_mempool_free(params.op_mpool);
918 		params.op_mpool = NULL;
919 	}
920 }
921 
922 static void
923 eventdev_teardown(void)
924 {
925 	rte_event_dev_stop(evdev);
926 }
927 
928 static void
929 testsuite_teardown(void)
930 {
931 	crypto_teardown();
932 	eventdev_teardown();
933 }
934 
935 static struct unit_test_suite functional_testsuite = {
936 	.suite_name = "Event crypto adapter test suite",
937 	.setup = testsuite_setup,
938 	.teardown = testsuite_teardown,
939 	.unit_test_cases = {
940 
941 		TEST_CASE_ST(NULL, test_crypto_adapter_free,
942 				test_crypto_adapter_create),
943 
944 		TEST_CASE_ST(test_crypto_adapter_create,
945 				test_crypto_adapter_free,
946 				test_crypto_adapter_qp_add_del),
947 
948 		TEST_CASE_ST(test_crypto_adapter_create,
949 				test_crypto_adapter_free,
950 				test_crypto_adapter_stats),
951 
952 		TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
953 				test_crypto_adapter_stop,
954 				test_session_with_op_forward_mode),
955 
956 		TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
957 				test_crypto_adapter_stop,
958 				test_sessionless_with_op_forward_mode),
959 
960 		TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
961 				test_crypto_adapter_stop,
962 				test_session_with_op_new_mode),
963 
964 		TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
965 				test_crypto_adapter_stop,
966 				test_sessionless_with_op_new_mode),
967 
968 		TEST_CASES_END() /**< NULL terminate unit test array */
969 	}
970 };
971 
972 static int
973 test_event_crypto_adapter(void)
974 {
975 	return unit_test_suite_runner(&functional_testsuite);
976 }
977 
978 REGISTER_TEST_COMMAND(event_crypto_adapter_autotest,
979 		test_event_crypto_adapter);
980