1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
3 */
4
5 #include "test.h"
6
7 #include <stdio.h>
8 #include <rte_ip.h>
9 #include <rte_malloc.h>
10 #include <rte_ring.h>
11 #include <rte_mbuf.h>
12 #include <rte_cycles.h>
13
14 #ifdef RTE_EXEC_ENV_WINDOWS
15 static int
test_libipsec_perf(void)16 test_libipsec_perf(void)
17 {
18 printf("ipsec_perf not supported on Windows, skipping test\n");
19 return TEST_SKIPPED;
20 }
21
22 #else
23
24 #include <rte_ipsec.h>
25 #include <rte_random.h>
26
27 #include "test_cryptodev.h"
28
29 #define RING_SIZE 4096
30 #define BURST_SIZE 64
31 #define NUM_MBUF 4095
32 #define DEFAULT_SPI 7
33
34 struct ipsec_test_cfg {
35 uint32_t replay_win_sz;
36 uint32_t esn;
37 uint64_t flags;
38 enum rte_crypto_sym_xform_type type;
39 };
40
41 struct rte_mempool *mbuf_pool, *cop_pool;
42
43 struct stats_counter {
44 uint64_t nb_prepare_call;
45 uint64_t nb_prepare_pkt;
46 uint64_t nb_process_call;
47 uint64_t nb_process_pkt;
48 uint64_t prepare_ticks_elapsed;
49 uint64_t process_ticks_elapsed;
50 };
51
52 struct ipsec_sa {
53 struct rte_ipsec_session ss[2];
54 struct rte_ipsec_sa_prm sa_prm;
55 struct rte_security_ipsec_xform ipsec_xform;
56 struct rte_crypto_sym_xform cipher_xform;
57 struct rte_crypto_sym_xform auth_xform;
58 struct rte_crypto_sym_xform aead_xform;
59 struct rte_crypto_sym_xform *crypto_xforms;
60 struct rte_crypto_op *cop[BURST_SIZE];
61 enum rte_crypto_sym_xform_type type;
62 struct stats_counter cnt;
63 uint32_t replay_win_sz;
64 uint32_t sa_flags;
65 };
66
67 static const struct ipsec_test_cfg test_cfg[] = {
68 {0, 0, 0, RTE_CRYPTO_SYM_XFORM_AEAD},
69 {0, 0, 0, RTE_CRYPTO_SYM_XFORM_CIPHER},
70 {128, 1, 0, RTE_CRYPTO_SYM_XFORM_AEAD},
71 {128, 1, 0, RTE_CRYPTO_SYM_XFORM_CIPHER},
72
73 };
74
75 static struct rte_ipv4_hdr ipv4_outer = {
76 .version_ihl = IPVERSION << 4 |
77 sizeof(ipv4_outer) / RTE_IPV4_IHL_MULTIPLIER,
78 .time_to_live = IPDEFTTL,
79 .next_proto_id = IPPROTO_ESP,
80 .src_addr = RTE_IPV4(192, 168, 1, 100),
81 .dst_addr = RTE_IPV4(192, 168, 2, 100),
82 };
83
84 static struct rte_ring *ring_inb_prepare;
85 static struct rte_ring *ring_inb_process;
86 static struct rte_ring *ring_outb_prepare;
87 static struct rte_ring *ring_outb_process;
88
89 struct supported_cipher_algo {
90 const char *keyword;
91 enum rte_crypto_cipher_algorithm algo;
92 uint16_t iv_len;
93 uint16_t block_size;
94 uint16_t key_len;
95 };
96
97 struct supported_auth_algo {
98 const char *keyword;
99 enum rte_crypto_auth_algorithm algo;
100 uint16_t digest_len;
101 uint16_t key_len;
102 uint8_t key_not_req;
103 };
104
105 struct supported_aead_algo {
106 const char *keyword;
107 enum rte_crypto_aead_algorithm algo;
108 uint16_t iv_len;
109 uint16_t block_size;
110 uint16_t digest_len;
111 uint16_t key_len;
112 uint8_t aad_len;
113 };
114
115 const struct supported_cipher_algo cipher_algo[] = {
116 {
117 .keyword = "aes-128-cbc",
118 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
119 .iv_len = 16,
120 .block_size = 16,
121 .key_len = 16
122 }
123 };
124
125 const struct supported_auth_algo auth_algo[] = {
126 {
127 .keyword = "sha1-hmac",
128 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
129 .digest_len = 12,
130 .key_len = 20
131 }
132 };
133
134 const struct supported_aead_algo aead_algo[] = {
135 {
136 .keyword = "aes-128-gcm",
137 .algo = RTE_CRYPTO_AEAD_AES_GCM,
138 .iv_len = 8,
139 .block_size = 4,
140 .key_len = 20,
141 .digest_len = 16,
142 .aad_len = 8,
143 }
144 };
145
generate_mbuf_data(struct rte_mempool * mpool)146 static struct rte_mbuf *generate_mbuf_data(struct rte_mempool *mpool)
147 {
148 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mpool);
149
150 if (mbuf) {
151 mbuf->data_len = 64;
152 mbuf->pkt_len = 64;
153 }
154
155 return mbuf;
156 }
157
158 static int
fill_ipsec_param(struct ipsec_sa * sa)159 fill_ipsec_param(struct ipsec_sa *sa)
160 {
161 struct rte_ipsec_sa_prm *prm = &sa->sa_prm;
162
163 memset(prm, 0, sizeof(*prm));
164
165 prm->flags = sa->sa_flags;
166
167 /* setup ipsec xform */
168 prm->ipsec_xform = sa->ipsec_xform;
169 prm->ipsec_xform.salt = (uint32_t)rte_rand();
170 prm->ipsec_xform.replay_win_sz = sa->replay_win_sz;
171
172 /* setup tunnel related fields */
173 prm->tun.hdr_len = sizeof(ipv4_outer);
174 prm->tun.next_proto = IPPROTO_IPIP;
175 prm->tun.hdr = &ipv4_outer;
176
177 if (sa->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
178 sa->aead_xform.type = sa->type;
179 sa->aead_xform.aead.algo = aead_algo->algo;
180 sa->aead_xform.next = NULL;
181 sa->aead_xform.aead.digest_length = aead_algo->digest_len;
182 sa->aead_xform.aead.iv.offset = IV_OFFSET;
183 sa->aead_xform.aead.iv.length = 12;
184
185 if (sa->ipsec_xform.direction ==
186 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
187 sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
188 } else {
189 sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
190 }
191
192 sa->crypto_xforms = &sa->aead_xform;
193 } else {
194 sa->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
195 sa->cipher_xform.cipher.algo = cipher_algo->algo;
196 sa->cipher_xform.cipher.iv.offset = IV_OFFSET;
197 sa->cipher_xform.cipher.iv.length = 12;
198 sa->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
199 sa->auth_xform.auth.algo = auth_algo->algo;
200 sa->auth_xform.auth.digest_length = auth_algo->digest_len;
201
202
203 if (sa->ipsec_xform.direction ==
204 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
205 sa->cipher_xform.cipher.op =
206 RTE_CRYPTO_CIPHER_OP_DECRYPT;
207 sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
208 sa->cipher_xform.next = NULL;
209 sa->auth_xform.next = &sa->cipher_xform;
210 sa->crypto_xforms = &sa->auth_xform;
211 } else {
212 sa->cipher_xform.cipher.op =
213 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
214 sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
215 sa->auth_xform.next = NULL;
216 sa->cipher_xform.next = &sa->auth_xform;
217 sa->crypto_xforms = &sa->cipher_xform;
218 }
219 }
220
221 prm->crypto_xform = sa->crypto_xforms;
222
223 return TEST_SUCCESS;
224 }
225
226 static int
create_sa(enum rte_security_session_action_type action_type,struct ipsec_sa * sa)227 create_sa(enum rte_security_session_action_type action_type,
228 struct ipsec_sa *sa)
229 {
230 void *dummy_ses = NULL;
231 size_t sz;
232 int rc;
233
234 memset(&sa->ss[0], 0, sizeof(sa->ss[0]));
235
236 rc = fill_ipsec_param(sa);
237 if (rc != 0) {
238 printf("failed to fill ipsec param\n");
239 return TEST_FAILED;
240 }
241
242 sz = rte_ipsec_sa_size(&sa->sa_prm);
243 TEST_ASSERT(sz > 0, "rte_ipsec_sa_size() failed\n");
244
245 sa->ss[0].sa = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
246 TEST_ASSERT_NOT_NULL(sa->ss[0].sa,
247 "failed to allocate memory for rte_ipsec_sa\n");
248
249 sa->ss[0].type = action_type;
250 sa->ss[0].crypto.ses = dummy_ses;
251
252 rc = rte_ipsec_sa_init(sa->ss[0].sa, &sa->sa_prm, sz);
253 rc = (rc > 0 && (uint32_t)rc <= sz) ? 0 : -EINVAL;
254
255 if (rc == 0)
256 rc = rte_ipsec_session_prepare(&sa->ss[0]);
257 else
258 return TEST_FAILED;
259
260 return TEST_SUCCESS;
261 }
262
263 static int
packet_prepare(struct rte_mbuf ** buf,struct ipsec_sa * sa,uint16_t num_pkts)264 packet_prepare(struct rte_mbuf **buf, struct ipsec_sa *sa,
265 uint16_t num_pkts)
266 {
267 uint64_t time_stamp;
268 uint16_t k = 0, i;
269
270 for (i = 0; i < num_pkts; i++) {
271
272 sa->cop[i] = rte_crypto_op_alloc(cop_pool,
273 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
274
275 if (sa->cop[i] == NULL) {
276
277 RTE_LOG(ERR, USER1,
278 "Failed to allocate symmetric crypto op\n");
279
280 return k;
281 }
282 }
283
284 time_stamp = rte_rdtsc_precise();
285
286 k = rte_ipsec_pkt_crypto_prepare(&sa->ss[0], buf,
287 sa->cop, num_pkts);
288
289 time_stamp = rte_rdtsc_precise() - time_stamp;
290
291 if (k != num_pkts) {
292 RTE_LOG(ERR, USER1, "rte_ipsec_pkt_crypto_prepare fail\n");
293 return k;
294 }
295
296 sa->cnt.prepare_ticks_elapsed += time_stamp;
297 sa->cnt.nb_prepare_call++;
298 sa->cnt.nb_prepare_pkt += k;
299
300 for (i = 0; i < num_pkts; i++)
301 rte_crypto_op_free(sa->cop[i]);
302
303 return k;
304 }
305
306 static int
packet_process(struct rte_mbuf ** buf,struct ipsec_sa * sa,uint16_t num_pkts)307 packet_process(struct rte_mbuf **buf, struct ipsec_sa *sa,
308 uint16_t num_pkts)
309 {
310 uint64_t time_stamp;
311 uint16_t k = 0;
312
313 time_stamp = rte_rdtsc_precise();
314
315 k = rte_ipsec_pkt_process(&sa->ss[0], buf, num_pkts);
316
317 time_stamp = rte_rdtsc_precise() - time_stamp;
318
319 if (k != num_pkts) {
320 RTE_LOG(ERR, USER1, "rte_ipsec_pkt_process fail\n");
321 return k;
322 }
323
324 sa->cnt.process_ticks_elapsed += time_stamp;
325 sa->cnt.nb_process_call++;
326 sa->cnt.nb_process_pkt += k;
327
328 return k;
329 }
330
331 static int
create_traffic(struct ipsec_sa * sa,struct rte_ring * deq_ring,struct rte_ring * enq_ring,struct rte_ring * ring)332 create_traffic(struct ipsec_sa *sa, struct rte_ring *deq_ring,
333 struct rte_ring *enq_ring, struct rte_ring *ring)
334 {
335 struct rte_mbuf *mbuf[BURST_SIZE];
336 uint16_t num_pkts, n;
337
338 while (rte_ring_empty(deq_ring) == 0) {
339
340 num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf,
341 RTE_DIM(mbuf), NULL);
342
343 if (num_pkts == 0)
344 return TEST_FAILED;
345
346 n = packet_prepare(mbuf, sa, num_pkts);
347 if (n != num_pkts)
348 return TEST_FAILED;
349
350 num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf,
351 num_pkts, NULL);
352 if (num_pkts == 0)
353 return TEST_FAILED;
354 }
355
356 deq_ring = enq_ring;
357 enq_ring = ring;
358
359 while (rte_ring_empty(deq_ring) == 0) {
360
361 num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf,
362 RTE_DIM(mbuf), NULL);
363 if (num_pkts == 0)
364 return TEST_FAILED;
365
366 n = packet_process(mbuf, sa, num_pkts);
367 if (n != num_pkts)
368 return TEST_FAILED;
369
370 num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf,
371 num_pkts, NULL);
372 if (num_pkts == 0)
373 return TEST_FAILED;
374 }
375
376 return TEST_SUCCESS;
377 }
378
379 static void
fill_ipsec_sa_out(const struct ipsec_test_cfg * test_cfg,struct ipsec_sa * sa)380 fill_ipsec_sa_out(const struct ipsec_test_cfg *test_cfg,
381 struct ipsec_sa *sa)
382 {
383 sa->ipsec_xform.spi = DEFAULT_SPI;
384 sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
385 sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
386 sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
387 sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
388 sa->ipsec_xform.options.esn = test_cfg->esn;
389 sa->type = test_cfg->type;
390 sa->replay_win_sz = test_cfg->replay_win_sz;
391 sa->sa_flags = test_cfg->flags;
392 sa->cnt.nb_prepare_call = 0;
393 sa->cnt.nb_prepare_pkt = 0;
394 sa->cnt.nb_process_call = 0;
395 sa->cnt.nb_process_pkt = 0;
396 sa->cnt.process_ticks_elapsed = 0;
397 sa->cnt.prepare_ticks_elapsed = 0;
398
399 }
400
401 static void
fill_ipsec_sa_in(const struct ipsec_test_cfg * test_cfg,struct ipsec_sa * sa)402 fill_ipsec_sa_in(const struct ipsec_test_cfg *test_cfg,
403 struct ipsec_sa *sa)
404 {
405 sa->ipsec_xform.spi = DEFAULT_SPI;
406 sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
407 sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
408 sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
409 sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
410 sa->ipsec_xform.options.esn = test_cfg->esn;
411 sa->type = test_cfg->type;
412 sa->replay_win_sz = test_cfg->replay_win_sz;
413 sa->sa_flags = test_cfg->flags;
414 sa->cnt.nb_prepare_call = 0;
415 sa->cnt.nb_prepare_pkt = 0;
416 sa->cnt.nb_process_call = 0;
417 sa->cnt.nb_process_pkt = 0;
418 sa->cnt.process_ticks_elapsed = 0;
419 sa->cnt.prepare_ticks_elapsed = 0;
420 }
421
422 static int
init_sa_session(const struct ipsec_test_cfg * test_cfg,struct ipsec_sa * sa_out,struct ipsec_sa * sa_in)423 init_sa_session(const struct ipsec_test_cfg *test_cfg,
424 struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
425 {
426
427 int rc;
428
429 fill_ipsec_sa_in(test_cfg, sa_in);
430 fill_ipsec_sa_out(test_cfg, sa_out);
431
432 rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_out);
433 if (rc != 0) {
434 RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n");
435 return TEST_FAILED;
436 }
437
438 rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_in);
439 if (rc != 0) {
440 RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n");
441 return TEST_FAILED;
442 }
443
444 return TEST_SUCCESS;
445 }
446
447 static int
testsuite_setup(void)448 testsuite_setup(void)
449 {
450 struct rte_mbuf *mbuf;
451 int i;
452
453 mbuf_pool = rte_pktmbuf_pool_create("IPSEC_PERF_MBUFPOOL",
454 NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
455 rte_socket_id());
456 if (mbuf_pool == NULL) {
457 RTE_LOG(ERR, USER1, "Can't create MBUFPOOL\n");
458 return TEST_FAILED;
459 }
460
461 cop_pool = rte_crypto_op_pool_create(
462 "MBUF_CRYPTO_SYM_OP_POOL",
463 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
464 NUM_MBUFS, MBUF_CACHE_SIZE,
465 DEFAULT_NUM_XFORMS *
466 sizeof(struct rte_crypto_sym_xform) +
467 MAXIMUM_IV_LENGTH,
468 rte_socket_id());
469 if (cop_pool == NULL) {
470 RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
471 return TEST_FAILED;
472 }
473
474 ring_inb_prepare = rte_ring_create("ipsec_test_ring_inb_prepare",
475 RING_SIZE, SOCKET_ID_ANY, 0);
476 if (ring_inb_prepare == NULL)
477 return TEST_FAILED;
478
479 ring_inb_process = rte_ring_create("ipsec_test_ring_inb_process",
480 RING_SIZE, SOCKET_ID_ANY, 0);
481 if (ring_inb_process == NULL)
482 return TEST_FAILED;
483
484 ring_outb_prepare = rte_ring_create("ipsec_test_ring_outb_prepare",
485 RING_SIZE, SOCKET_ID_ANY, 0);
486 if (ring_outb_prepare == NULL)
487 return TEST_FAILED;
488
489 ring_outb_process = rte_ring_create("ipsec_test_ring_outb_process",
490 RING_SIZE, SOCKET_ID_ANY, 0);
491 if (ring_outb_process == NULL)
492 return TEST_FAILED;
493
494 for (i = 0; i < NUM_MBUF; i++) {
495 mbuf = generate_mbuf_data(mbuf_pool);
496
497 if (mbuf && rte_ring_sp_enqueue_bulk(ring_inb_prepare,
498 (void **)&mbuf, 1, NULL))
499 continue;
500 else
501 return TEST_FAILED;
502 }
503
504 return TEST_SUCCESS;
505 }
506
507 static int
measure_performance(struct ipsec_sa * sa_out,struct ipsec_sa * sa_in)508 measure_performance(struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
509 {
510 uint64_t time_diff = 0;
511 uint64_t begin = 0;
512 uint64_t hz = rte_get_timer_hz();
513
514 begin = rte_get_timer_cycles();
515
516 do {
517 if (create_traffic(sa_out, ring_inb_prepare, ring_inb_process,
518 ring_outb_prepare) < 0)
519 return TEST_FAILED;
520
521 if (create_traffic(sa_in, ring_outb_prepare, ring_outb_process,
522 ring_inb_prepare) < 0)
523 return TEST_FAILED;
524
525 time_diff = rte_get_timer_cycles() - begin;
526
527 } while (time_diff < (hz * 10));
528
529 return TEST_SUCCESS;
530 }
531
532 static void
print_metrics(const struct ipsec_test_cfg * test_cfg,struct ipsec_sa * sa_out,struct ipsec_sa * sa_in)533 print_metrics(const struct ipsec_test_cfg *test_cfg,
534 struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
535 {
536 printf("\nMetrics of libipsec prepare/process api:\n");
537
538 printf("replay window size = %u\n", test_cfg->replay_win_sz);
539 if (test_cfg->esn)
540 printf("replay esn is enabled\n");
541 else
542 printf("replay esn is disabled\n");
543 if (test_cfg->type == RTE_CRYPTO_SYM_XFORM_AEAD)
544 printf("AEAD algo is AES_GCM\n");
545 else
546 printf("CIPHER/AUTH algo is AES_CBC/SHA1\n");
547
548
549 printf("avg cycles for a pkt prepare in outbound is = %.2Lf\n",
550 (long double)sa_out->cnt.prepare_ticks_elapsed
551 / sa_out->cnt.nb_prepare_pkt);
552 printf("avg cycles for a pkt process in outbound is = %.2Lf\n",
553 (long double)sa_out->cnt.process_ticks_elapsed
554 / sa_out->cnt.nb_process_pkt);
555 printf("avg cycles for a pkt prepare in inbound is = %.2Lf\n",
556 (long double)sa_in->cnt.prepare_ticks_elapsed
557 / sa_in->cnt.nb_prepare_pkt);
558 printf("avg cycles for a pkt process in inbound is = %.2Lf\n",
559 (long double)sa_in->cnt.process_ticks_elapsed
560 / sa_in->cnt.nb_process_pkt);
561
562 }
563
564 static void
testsuite_teardown(void)565 testsuite_teardown(void)
566 {
567 if (mbuf_pool != NULL) {
568 RTE_LOG(DEBUG, USER1, "MBUFPOOL count %u\n",
569 rte_mempool_avail_count(mbuf_pool));
570 rte_mempool_free(mbuf_pool);
571 mbuf_pool = NULL;
572 }
573
574 if (cop_pool != NULL) {
575 RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
576 rte_mempool_avail_count(cop_pool));
577 rte_mempool_free(cop_pool);
578 cop_pool = NULL;
579 }
580
581 rte_ring_free(ring_inb_prepare);
582 rte_ring_free(ring_inb_process);
583 rte_ring_free(ring_outb_prepare);
584 rte_ring_free(ring_outb_process);
585
586 ring_inb_prepare = NULL;
587 ring_inb_process = NULL;
588 ring_outb_prepare = NULL;
589 ring_outb_process = NULL;
590 }
591
592 static int
test_libipsec_perf(void)593 test_libipsec_perf(void)
594 {
595 struct ipsec_sa sa_out = { .sa_prm = { 0 } };
596 struct ipsec_sa sa_in = { .sa_prm = { 0 } };
597 uint32_t i;
598 int ret;
599
600 ret = rte_cryptodev_count();
601 if (ret < 1) {
602 RTE_LOG(WARNING, USER1, "No crypto devices found?\n");
603 return TEST_SKIPPED;
604 }
605
606 if (testsuite_setup() < 0) {
607 testsuite_teardown();
608 return TEST_FAILED;
609 }
610
611 for (i = 0; i < RTE_DIM(test_cfg) ; i++) {
612
613 ret = init_sa_session(&test_cfg[i], &sa_out, &sa_in);
614 if (ret != 0) {
615 testsuite_teardown();
616 return TEST_FAILED;
617 }
618
619 if (measure_performance(&sa_out, &sa_in) < 0) {
620 testsuite_teardown();
621 return TEST_FAILED;
622 }
623
624 print_metrics(&test_cfg[i], &sa_out, &sa_in);
625 }
626
627 testsuite_teardown();
628
629 return TEST_SUCCESS;
630 }
631
632 #endif /* !RTE_EXEC_ENV_WINDOWS */
633
634 REGISTER_PERF_TEST(ipsec_perf_autotest, test_libipsec_perf);
635