1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022, Marvell
3 */
4
5 #include <getopt.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8
9 #include <rte_common.h>
10 #include <rte_cryptodev.h>
11 #include <rte_eal.h>
12 #include <rte_lcore.h>
13 #include <rte_malloc.h>
14 #include <rte_security.h>
15
16 #include <app/test/test_cryptodev.h>
17 #include <app/test/test_cryptodev_security_ipsec.h>
18 #include <app/test/test_cryptodev_security_ipsec_test_vectors.h>
19 #include <app/test/test_security_proto.h>
20
21 #define NB_DESC 4096
22 #define DEF_NB_SESSIONS (16 * 10 * 1024) /* 16 * 10K tunnels */
23
24 struct lcore_conf {
25 struct rte_crypto_sym_xform cipher_xform;
26 struct rte_crypto_sym_xform auth_xform;
27 struct rte_crypto_sym_xform aead_xform;
28 uint8_t dev_id;
29 uint8_t qp_id;
30 struct test_ctx *ctx;
31 };
32
33 struct test_ctx {
34 struct lcore_conf lconf[RTE_MAX_LCORE];
35 void *sec_ctx;
36 struct rte_mempool *sess_mp;
37 struct ipsec_test_data *td;
38 int nb_sess;
39 unsigned long td_idx;
40 uint8_t nb_lcores;
41 uint8_t nb_cryptodevs;
42 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS];
43 bool is_inbound;
44 };
45
46 static struct test_ctx ctx;
47
48 static int
cryptodev_init(struct test_ctx * ctx,uint8_t nb_lcores)49 cryptodev_init(struct test_ctx *ctx, uint8_t nb_lcores)
50 {
51 const char dev_names[][RTE_CRYPTODEV_NAME_MAX_LEN] = {
52 "crypto_cn10k",
53 "crypto_cn9k",
54 "crypto_dpaa_sec",
55 "crypto_dpaa2_sec",
56 };
57 struct rte_cryptodev_qp_conf qp_conf;
58 struct rte_cryptodev_info dev_info;
59 struct rte_cryptodev_config config;
60 unsigned int j, nb_qp, qps_reqd;
61 uint8_t socket_id;
62 uint32_t dev_cnt;
63 int ret, core_id;
64 void *sec_ctx;
65 uint64_t i;
66
67 i = 0;
68 do {
69 dev_cnt = rte_cryptodev_devices_get(dev_names[i],
70 ctx->enabled_cdevs,
71 RTE_CRYPTO_MAX_DEVS);
72 i++;
73 } while (dev_cnt == 0 && i < RTE_DIM(dev_names));
74
75 if (dev_cnt == 0)
76 return -1;
77
78 /* Check first device for capabilities */
79 rte_cryptodev_info_get(0, &dev_info);
80 if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_SECURITY)) {
81 RTE_LOG(ERR, USER1,
82 "Security not supported by the cryptodev\n");
83 return -1;
84 }
85
86 sec_ctx = rte_cryptodev_get_sec_ctx(0);
87 ctx->sec_ctx = sec_ctx;
88
89 socket_id = rte_socket_id();
90 qps_reqd = nb_lcores;
91 core_id = 0;
92 i = 0;
93
94 do {
95 rte_cryptodev_info_get(i, &dev_info);
96 qps_reqd = RTE_MIN(dev_info.max_nb_queue_pairs, qps_reqd);
97
98 for (j = 0; j < qps_reqd; j++) {
99 ctx->lconf[core_id].dev_id = i;
100 ctx->lconf[core_id].qp_id = j;
101 ctx->lconf[core_id].ctx = ctx;
102 core_id++;
103 if (core_id == RTE_MAX_LCORE)
104 break;
105 }
106
107 nb_qp = j;
108
109 memset(&config, 0, sizeof(config));
110 config.nb_queue_pairs = nb_qp;
111 config.socket_id = socket_id;
112
113 ret = rte_cryptodev_configure(i, &config);
114 if (ret < 0) {
115 RTE_LOG(ERR, USER1,
116 "Could not configure cryptodev - %" PRIu64 "\n",
117 i);
118 return -1;
119 }
120
121 memset(&qp_conf, 0, sizeof(qp_conf));
122 qp_conf.nb_descriptors = NB_DESC;
123
124 for (j = 0; j < nb_qp; j++) {
125 ret = rte_cryptodev_queue_pair_setup(i, j, &qp_conf,
126 socket_id);
127 if (ret < 0) {
128 RTE_LOG(ERR, USER1,
129 "Could not configure queue pair:"
130 " %" PRIu64 " - %d\n", i, j);
131 return -1;
132 }
133 }
134
135 ret = rte_cryptodev_start(i);
136 if (ret < 0) {
137 RTE_LOG(ERR, USER1, "Could not start cryptodev\n");
138 return -1;
139 }
140
141 i++;
142 qps_reqd -= j;
143
144 } while (i < dev_cnt && core_id < RTE_MAX_LCORE);
145
146 ctx->nb_cryptodevs = i;
147
148 return 0;
149 }
150
151 static int
cryptodev_fini(struct test_ctx * ctx)152 cryptodev_fini(struct test_ctx *ctx)
153 {
154 int i, ret = 0;
155
156 for (i = 0; i < ctx->nb_cryptodevs &&
157 i < RTE_CRYPTO_MAX_DEVS; i++) {
158 rte_cryptodev_stop(ctx->enabled_cdevs[i]);
159 ret = rte_cryptodev_close(ctx->enabled_cdevs[i]);
160 if (ret)
161 RTE_LOG(ERR, USER1,
162 "Crypto device close error %d\n", ret);
163 }
164
165 return ret;
166 }
167
168 static int
mempool_init(struct test_ctx * ctx,uint8_t nb_lcores)169 mempool_init(struct test_ctx *ctx, uint8_t nb_lcores)
170 {
171 struct rte_mempool *sess_mpool;
172 unsigned int sec_sess_sz;
173 int nb_sess_total;
174
175 nb_sess_total = ctx->nb_sess + RTE_MEMPOOL_CACHE_MAX_SIZE * nb_lcores;
176
177 sec_sess_sz = rte_security_session_get_size(ctx->sec_ctx);
178
179 sess_mpool = rte_cryptodev_sym_session_pool_create("test_sess_mp",
180 nb_sess_total, sec_sess_sz, RTE_MEMPOOL_CACHE_MAX_SIZE,
181 0, SOCKET_ID_ANY);
182 if (sess_mpool == NULL) {
183 RTE_LOG(ERR, USER1, "Could not create mempool\n");
184 return -1;
185 }
186
187 ctx->sess_mp = sess_mpool;
188
189 return 0;
190 }
191
192 static int
mempool_fini(struct test_ctx * ctx)193 mempool_fini(struct test_ctx *ctx)
194 {
195 rte_mempool_free(ctx->sess_mp);
196
197 return 0;
198 }
199
200 static int
sec_conf_init(struct lcore_conf * conf,struct rte_security_session_conf * sess_conf,struct rte_security_ipsec_xform * ipsec_xform,const struct ipsec_test_data * td)201 sec_conf_init(struct lcore_conf *conf,
202 struct rte_security_session_conf *sess_conf,
203 struct rte_security_ipsec_xform *ipsec_xform,
204 const struct ipsec_test_data *td)
205 {
206 uint16_t v6_src[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
207 0x0000, 0x001a};
208 uint16_t v6_dst[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
209 0xe82c, 0x4887};
210 const struct rte_ipv4_hdr *ipv4 =
211 (const struct rte_ipv4_hdr *)td->output_text.data;
212 struct rte_security_capability_idx sec_cap_idx;
213 const struct rte_security_capability *sec_cap;
214 enum rte_security_ipsec_sa_direction dir;
215 uint32_t src, dst;
216 int salt_len;
217
218 /* Copy IPsec xform */
219 memcpy(ipsec_xform, &td->ipsec_xform, sizeof(*ipsec_xform));
220
221 dir = ipsec_xform->direction;
222
223 memcpy(&src, &ipv4->src_addr, sizeof(ipv4->src_addr));
224 memcpy(&dst, &ipv4->dst_addr, sizeof(ipv4->dst_addr));
225
226 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
227 if (td->ipsec_xform.tunnel.type ==
228 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
229 memcpy(&ipsec_xform->tunnel.ipv4.src_ip, &src,
230 sizeof(src));
231 memcpy(&ipsec_xform->tunnel.ipv4.dst_ip, &dst,
232 sizeof(dst));
233
234 } else {
235 memcpy(&ipsec_xform->tunnel.ipv6.src_addr, &v6_src,
236 sizeof(v6_src));
237 memcpy(&ipsec_xform->tunnel.ipv6.dst_addr, &v6_dst,
238 sizeof(v6_dst));
239 }
240 }
241
242 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
243 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
244 sec_cap_idx.ipsec.proto = ipsec_xform->proto;
245 sec_cap_idx.ipsec.mode = ipsec_xform->mode;
246 sec_cap_idx.ipsec.direction = ipsec_xform->direction;
247
248 sec_cap = rte_security_capability_get(conf->ctx->sec_ctx, &sec_cap_idx);
249 if (sec_cap == NULL) {
250 RTE_LOG(ERR, USER1, "Could not get capabilities\n");
251 return -1;
252 }
253
254 /* Copy cipher session parameters */
255 if (td[0].aead) {
256 memcpy(&conf->aead_xform, &td[0].xform.aead,
257 sizeof(conf->aead_xform));
258 conf->aead_xform.aead.key.data = td[0].key.data;
259 conf->aead_xform.aead.iv.offset = IV_OFFSET;
260
261 /* Verify crypto capabilities */
262 if (test_sec_crypto_caps_aead_verify(sec_cap, &conf->aead_xform) != 0) {
263 RTE_LOG(ERR, USER1,
264 "Crypto capabilities not supported\n");
265 return -1;
266 }
267 } else if (td[0].auth_only) {
268 memcpy(&conf->auth_xform, &td[0].xform.chain.auth,
269 sizeof(conf->auth_xform));
270 conf->auth_xform.auth.key.data = td[0].auth_key.data;
271
272 if (test_sec_crypto_caps_auth_verify(sec_cap, &conf->auth_xform) != 0) {
273 RTE_LOG(INFO, USER1,
274 "Auth crypto capabilities not supported\n");
275 return -1;
276 }
277 } else {
278 memcpy(&conf->cipher_xform, &td[0].xform.chain.cipher,
279 sizeof(conf->cipher_xform));
280 memcpy(&conf->auth_xform, &td[0].xform.chain.auth,
281 sizeof(conf->auth_xform));
282 conf->cipher_xform.cipher.key.data = td[0].key.data;
283 conf->cipher_xform.cipher.iv.offset = IV_OFFSET;
284 conf->auth_xform.auth.key.data = td[0].auth_key.data;
285
286 /* Verify crypto capabilities */
287
288 if (test_sec_crypto_caps_cipher_verify(sec_cap, &conf->cipher_xform) != 0) {
289 RTE_LOG(ERR, USER1,
290 "Cipher crypto capabilities not supported\n");
291 return -1;
292 }
293
294 if (test_sec_crypto_caps_auth_verify(sec_cap, &conf->auth_xform) != 0) {
295 RTE_LOG(ERR, USER1,
296 "Auth crypto capabilities not supported\n");
297 return -1;
298 }
299 }
300
301 if (test_ipsec_sec_caps_verify(ipsec_xform, sec_cap, 0) != 0)
302 return -1;
303
304 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
305 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
306
307 if (td[0].aead || td[0].aes_gmac) {
308 salt_len = RTE_MIN(sizeof(ipsec_xform->salt), td[0].salt.len);
309 memcpy(&ipsec_xform->salt, td[0].salt.data, salt_len);
310 }
311
312 if (td[0].aead) {
313 sess_conf->ipsec = *ipsec_xform;
314 sess_conf->crypto_xform = &conf->aead_xform;
315 } else if (td[0].auth_only) {
316 sess_conf->ipsec = *ipsec_xform;
317 sess_conf->crypto_xform = &conf->auth_xform;
318 } else {
319 sess_conf->ipsec = *ipsec_xform;
320 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
321 sess_conf->crypto_xform = &conf->cipher_xform;
322 conf->cipher_xform.next = &conf->auth_xform;
323 } else {
324 sess_conf->crypto_xform = &conf->auth_xform;
325 conf->auth_xform.next = &conf->cipher_xform;
326 }
327 }
328
329 return 0;
330 }
331
332 static int
test_security_session_perf(void * arg)333 test_security_session_perf(void *arg)
334 {
335 uint64_t tsc_start, tsc_mid, tsc_end, tsc_setup_dur, tsc_destroy_dur;
336 struct rte_security_ipsec_xform ipsec_xform;
337 struct rte_security_session_conf sess_conf;
338 int i, ret, nb_sessions, nb_sess_total;
339 struct rte_security_session **sess;
340 void *sec_ctx;
341 double setup_rate, destroy_rate;
342 uint64_t setup_ms, destroy_ms;
343 struct lcore_conf *conf = arg;
344 struct rte_mempool *sess_mp;
345 uint8_t nb_lcores;
346
347 nb_lcores = conf->ctx->nb_lcores;
348 nb_sess_total = conf->ctx->nb_sess;
349 sec_ctx = conf->ctx->sec_ctx;
350 sess_mp = conf->ctx->sess_mp;
351
352 nb_sessions = nb_sess_total / nb_lcores;
353
354 if (conf->qp_id == 0)
355 nb_sessions += (nb_sess_total - nb_sessions * nb_lcores);
356
357 ret = sec_conf_init(conf, &sess_conf, &ipsec_xform,
358 &ctx.td[ctx.td_idx]);
359 if (ret) {
360 RTE_LOG(ERR, USER1, "Could not initialize session conf\n");
361 return EXIT_FAILURE;
362 }
363
364 sess = rte_zmalloc(NULL, sizeof(void *) * nb_sessions, 0);
365
366 tsc_start = rte_rdtsc_precise();
367
368 for (i = 0; i < nb_sessions; i++) {
369 sess[i] = rte_security_session_create(sec_ctx,
370 &sess_conf,
371 sess_mp);
372 if (unlikely(sess[i] == NULL)) {
373 RTE_LOG(ERR, USER1, "Could not create session\n");
374 return EXIT_FAILURE;
375 }
376 }
377
378 tsc_mid = rte_rdtsc_precise();
379
380 for (i = 0; i < nb_sessions; i++) {
381 ret = rte_security_session_destroy(sec_ctx, sess[i]);
382 if (unlikely(ret < 0)) {
383 RTE_LOG(ERR, USER1, "Could not destroy session\n");
384 return EXIT_FAILURE;
385 }
386 }
387
388 tsc_end = rte_rdtsc_precise();
389
390 tsc_setup_dur = tsc_mid - tsc_start;
391 tsc_destroy_dur = tsc_end - tsc_mid;
392
393 setup_ms = tsc_setup_dur * 1000 / rte_get_tsc_hz();
394 destroy_ms = tsc_destroy_dur * 1000 / rte_get_tsc_hz();
395
396 setup_rate = (double)nb_sessions * rte_get_tsc_hz() / tsc_setup_dur;
397 destroy_rate = (double)nb_sessions * rte_get_tsc_hz() / tsc_destroy_dur;
398
399 printf("%20u%20u%20"PRIu64"%20"PRIu64"%20.2f%20.2f\n",
400 rte_lcore_id(),
401 nb_sessions,
402 setup_ms,
403 destroy_ms,
404 setup_rate,
405 destroy_rate);
406
407 return EXIT_SUCCESS;
408 }
409
410 static void
usage(char * progname)411 usage(char *progname)
412 {
413 printf("\nusage: %s\n", progname);
414 printf(" --help : display this message and exit\n"
415 " --inbound : test for inbound direction\n"
416 " default outbound direction is tested\n"
417 " --nb-sess=N: to set the number of sessions\n"
418 " to be created, default is %d\n", DEF_NB_SESSIONS);
419 }
420
421 static void
args_parse(int argc,char ** argv)422 args_parse(int argc, char **argv)
423 {
424 char **argvopt;
425 int n, opt;
426 int opt_idx;
427
428 static const struct option lgopts[] = {
429 /* Control */
430 { "help", 0, 0, 0 },
431 { "inbound", 0, 0, 0 },
432 { "nb-sess", 1, 0, 0 },
433 { NULL, 0, 0, 0 }
434 };
435
436 argvopt = argv;
437
438 while ((opt = getopt_long(argc, argvopt, "",
439 lgopts, &opt_idx)) != EOF) {
440 switch (opt) {
441 case 0:
442 if (strcmp(lgopts[opt_idx].name, "help") == 0) {
443 usage(argv[0]);
444 exit(EXIT_SUCCESS);
445 }
446
447 if (strcmp(lgopts[opt_idx].name, "nb-sess") == 0) {
448 n = atoi(optarg);
449 if (n >= 0)
450 ctx.nb_sess = n;
451 else
452 rte_exit(EXIT_FAILURE,
453 "nb-sess should be >= 0\n");
454 printf("nb-sess %d / ", ctx.nb_sess);
455 } else if (strcmp(lgopts[opt_idx].name, "inbound") ==
456 0) {
457 ctx.is_inbound = true;
458 printf("inbound / ");
459 }
460
461 break;
462
463 default:
464 usage(argv[0]);
465 rte_exit(EXIT_FAILURE, "Invalid option: %s\n",
466 argv[opt_idx - 1]);
467 break;
468 }
469 }
470
471 printf("\n\n");
472 }
473
474 int
main(int argc,char ** argv)475 main(int argc, char **argv)
476 {
477 struct ipsec_test_data td_outb[RTE_DIM(sec_alg_list)];
478 struct ipsec_test_data td_inb[RTE_DIM(sec_alg_list)];
479 struct ipsec_test_flags flags;
480 uint32_t lcore_id;
481 uint8_t nb_lcores;
482 unsigned long i;
483 int ret;
484
485 memset(&ctx, 0, sizeof(struct test_ctx));
486 memset(&flags, 0, sizeof(flags));
487
488 ret = rte_eal_init(argc, argv);
489 if (ret < 0)
490 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
491 argc -= ret;
492 argv += ret;
493
494 nb_lcores = rte_lcore_count() - 1;
495 if (nb_lcores < 1) {
496 RTE_LOG(ERR, USER1,
497 "Number of worker cores need to be higher than 1\n");
498 return -EINVAL;
499 }
500
501 ctx.nb_sess = DEF_NB_SESSIONS + RTE_MEMPOOL_CACHE_MAX_SIZE * nb_lcores;
502
503 if (argc > 1)
504 args_parse(argc, argv);
505
506 ctx.nb_lcores = nb_lcores;
507
508 ret = cryptodev_init(&ctx, nb_lcores);
509 if (ret)
510 goto exit;
511
512 ret = mempool_init(&ctx, nb_lcores);
513 if (ret)
514 goto cryptodev_fini;
515
516 test_sec_alg_list_populate();
517
518 for (i = 0; i < RTE_DIM(sec_alg_list); i++) {
519 test_ipsec_td_prepare(sec_alg_list[i].param1,
520 sec_alg_list[i].param2,
521 &flags,
522 &td_outb[i],
523 1);
524 if (ctx.is_inbound)
525 test_ipsec_td_in_from_out(&td_outb[i], &td_inb[i]);
526 }
527
528 ctx.td = td_outb;
529 if (ctx.is_inbound)
530 ctx.td = td_inb;
531
532 for (ctx.td_idx = 0; ctx.td_idx < RTE_DIM(sec_alg_list); ctx.td_idx++) {
533
534 printf("\n\n Algorithm combination:");
535 test_sec_alg_display(sec_alg_list[ctx.td_idx].param1,
536 sec_alg_list[ctx.td_idx].param2);
537 printf(" ----------------------");
538
539 printf("\n%20s%20s%20s%20s%20s%20s\n\n",
540 "lcore id", "nb_sessions",
541 "Setup time(ms)", "Destroy time(ms)",
542 "Setup rate(sess/s)",
543 "Destroy rate(sess/sec)");
544
545 i = 0;
546 RTE_LCORE_FOREACH_WORKER(lcore_id) {
547 rte_eal_remote_launch(test_security_session_perf,
548 &ctx.lconf[i],
549 lcore_id);
550 i++;
551 }
552
553 RTE_LCORE_FOREACH_WORKER(lcore_id) {
554 ret |= rte_eal_wait_lcore(lcore_id);
555 }
556
557 }
558
559 cryptodev_fini(&ctx);
560 mempool_fini(&ctx);
561
562 return EXIT_SUCCESS;
563 cryptodev_fini:
564 cryptodev_fini(&ctx);
565 exit:
566 return EXIT_FAILURE;
567
568 }
569