1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #ifdef RTE_LIBRTE_SECURITY 19 #include <rte_security_driver.h> 20 #endif 21 #include <rte_cycles.h> 22 #include <rte_dev.h> 23 #include <rte_kvargs.h> 24 #include <rte_malloc.h> 25 #include <rte_mbuf.h> 26 #include <rte_memcpy.h> 27 #include <rte_string_fns.h> 28 #include <rte_spinlock.h> 29 30 #include <fsl_usd.h> 31 #include <fsl_qman.h> 32 #include <dpaa_of.h> 33 34 /* RTA header files */ 35 #include <desc/common.h> 36 #include <desc/algo.h> 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 40 #include <rte_dpaa_bus.h> 41 #include <dpaa_sec.h> 42 #include <dpaa_sec_event.h> 43 #include <dpaa_sec_log.h> 44 #include <dpaax_iova_table.h> 45 46 enum rta_sec_era rta_sec_era; 47 48 int dpaa_logtype_sec; 49 50 static uint8_t cryptodev_driver_id; 51 52 static __thread struct rte_crypto_op **dpaa_sec_ops; 53 static __thread int dpaa_sec_op_nb; 54 55 static int 56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 57 58 static inline void 59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 60 { 61 if (!ctx->fd_status) { 62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 63 } else { 64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 66 } 67 } 68 69 static inline struct dpaa_sec_op_ctx * 70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 71 { 72 struct dpaa_sec_op_ctx *ctx; 73 int i, retval; 74 75 retval = rte_mempool_get( 76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 77 (void **)(&ctx)); 78 if (!ctx || retval) { 79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 80 return NULL; 81 } 82 /* 83 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 86 * each packet, memset is costlier than dcbz_64(). 87 */ 88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 89 dcbz_64(&ctx->job.sg[i]); 90 91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 93 94 return ctx; 95 } 96 97 static void 98 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 99 struct qman_fq *fq, 100 const struct qm_mr_entry *msg) 101 { 102 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 103 fq->fqid, msg->ern.rc, msg->ern.seqnum); 104 } 105 106 /* initialize the queue with dest chan as caam chan so that 107 * all the packets in this queue could be dispatched into caam 108 */ 109 static int 110 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 111 uint32_t fqid_out) 112 { 113 struct qm_mcc_initfq fq_opts; 114 uint32_t flags; 115 int ret = -1; 116 117 /* Clear FQ options */ 118 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 119 120 flags = QMAN_INITFQ_FLAG_SCHED; 121 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 122 QM_INITFQ_WE_CONTEXTB; 123 124 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 125 fq_opts.fqd.context_b = fqid_out; 126 fq_opts.fqd.dest.channel = qm_channel_caam; 127 fq_opts.fqd.dest.wq = 0; 128 129 fq_in->cb.ern = ern_sec_fq_handler; 130 131 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 132 133 ret = qman_init_fq(fq_in, flags, &fq_opts); 134 if (unlikely(ret != 0)) 135 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 136 137 return ret; 138 } 139 140 /* something is put into in_fq and caam put the crypto result into out_fq */ 141 static enum qman_cb_dqrr_result 142 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 143 struct qman_fq *fq __always_unused, 144 const struct qm_dqrr_entry *dqrr) 145 { 146 const struct qm_fd *fd; 147 struct dpaa_sec_job *job; 148 struct dpaa_sec_op_ctx *ctx; 149 150 if (dpaa_sec_op_nb >= DPAA_SEC_BURST) 151 return qman_cb_dqrr_defer; 152 153 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 154 return qman_cb_dqrr_consume; 155 156 fd = &dqrr->fd; 157 /* sg is embedded in an op ctx, 158 * sg[0] is for output 159 * sg[1] for input 160 */ 161 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 162 163 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 164 ctx->fd_status = fd->status; 165 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 166 struct qm_sg_entry *sg_out; 167 uint32_t len; 168 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 169 ctx->op->sym->m_src : ctx->op->sym->m_dst; 170 171 sg_out = &job->sg[0]; 172 hw_sg_to_cpu(sg_out); 173 len = sg_out->length; 174 mbuf->pkt_len = len; 175 while (mbuf->next != NULL) { 176 len -= mbuf->data_len; 177 mbuf = mbuf->next; 178 } 179 mbuf->data_len = len; 180 } 181 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op; 182 dpaa_sec_op_ending(ctx); 183 184 return qman_cb_dqrr_consume; 185 } 186 187 /* caam result is put into this queue */ 188 static int 189 dpaa_sec_init_tx(struct qman_fq *fq) 190 { 191 int ret; 192 struct qm_mcc_initfq opts; 193 uint32_t flags; 194 195 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 196 QMAN_FQ_FLAG_DYNAMIC_FQID; 197 198 ret = qman_create_fq(0, flags, fq); 199 if (unlikely(ret)) { 200 DPAA_SEC_ERR("qman_create_fq failed"); 201 return ret; 202 } 203 204 memset(&opts, 0, sizeof(opts)); 205 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 206 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 207 208 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 209 210 fq->cb.dqrr = dqrr_out_fq_cb_rx; 211 fq->cb.ern = ern_sec_fq_handler; 212 213 ret = qman_init_fq(fq, 0, &opts); 214 if (unlikely(ret)) { 215 DPAA_SEC_ERR("unable to init caam source fq!"); 216 return ret; 217 } 218 219 return ret; 220 } 221 222 static inline int is_aead(dpaa_sec_session *ses) 223 { 224 return ((ses->cipher_alg == 0) && 225 (ses->auth_alg == 0) && 226 (ses->aead_alg != 0)); 227 } 228 229 static inline int is_encode(dpaa_sec_session *ses) 230 { 231 return ses->dir == DIR_ENC; 232 } 233 234 static inline int is_decode(dpaa_sec_session *ses) 235 { 236 return ses->dir == DIR_DEC; 237 } 238 239 #ifdef RTE_LIBRTE_SECURITY 240 static int 241 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 242 { 243 struct alginfo authdata = {0}, cipherdata = {0}; 244 struct sec_cdb *cdb = &ses->cdb; 245 struct alginfo *p_authdata = NULL; 246 int32_t shared_desc_len = 0; 247 int err; 248 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 249 int swap = false; 250 #else 251 int swap = true; 252 #endif 253 254 cipherdata.key = (size_t)ses->cipher_key.data; 255 cipherdata.keylen = ses->cipher_key.length; 256 cipherdata.key_enc_flags = 0; 257 cipherdata.key_type = RTA_DATA_IMM; 258 cipherdata.algtype = ses->cipher_key.alg; 259 cipherdata.algmode = ses->cipher_key.algmode; 260 261 cdb->sh_desc[0] = cipherdata.keylen; 262 cdb->sh_desc[1] = 0; 263 cdb->sh_desc[2] = 0; 264 265 if (ses->auth_alg) { 266 authdata.key = (size_t)ses->auth_key.data; 267 authdata.keylen = ses->auth_key.length; 268 authdata.key_enc_flags = 0; 269 authdata.key_type = RTA_DATA_IMM; 270 authdata.algtype = ses->auth_key.alg; 271 authdata.algmode = ses->auth_key.algmode; 272 273 p_authdata = &authdata; 274 275 cdb->sh_desc[1] = authdata.keylen; 276 } 277 278 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 279 MIN_JOB_DESC_SIZE, 280 (unsigned int *)cdb->sh_desc, 281 &cdb->sh_desc[2], 2); 282 if (err < 0) { 283 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 284 return err; 285 } 286 287 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { 288 cipherdata.key = 289 (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key); 290 cipherdata.key_type = RTA_DATA_PTR; 291 } 292 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) { 293 authdata.key = 294 (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key); 295 authdata.key_type = RTA_DATA_PTR; 296 } 297 298 cdb->sh_desc[0] = 0; 299 cdb->sh_desc[1] = 0; 300 cdb->sh_desc[2] = 0; 301 302 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 303 if (ses->dir == DIR_ENC) 304 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 305 cdb->sh_desc, 1, swap, 306 ses->pdcp.hfn, 307 ses->pdcp.sn_size, 308 ses->pdcp.bearer, 309 ses->pdcp.pkt_dir, 310 ses->pdcp.hfn_threshold, 311 &cipherdata, &authdata, 312 0); 313 else if (ses->dir == DIR_DEC) 314 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 315 cdb->sh_desc, 1, swap, 316 ses->pdcp.hfn, 317 ses->pdcp.sn_size, 318 ses->pdcp.bearer, 319 ses->pdcp.pkt_dir, 320 ses->pdcp.hfn_threshold, 321 &cipherdata, &authdata, 322 0); 323 } else { 324 if (ses->dir == DIR_ENC) 325 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap( 326 cdb->sh_desc, 1, swap, 327 ses->pdcp.sn_size, 328 ses->pdcp.hfn, 329 ses->pdcp.bearer, 330 ses->pdcp.pkt_dir, 331 ses->pdcp.hfn_threshold, 332 &cipherdata, p_authdata, 0); 333 else if (ses->dir == DIR_DEC) 334 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap( 335 cdb->sh_desc, 1, swap, 336 ses->pdcp.sn_size, 337 ses->pdcp.hfn, 338 ses->pdcp.bearer, 339 ses->pdcp.pkt_dir, 340 ses->pdcp.hfn_threshold, 341 &cipherdata, p_authdata, 0); 342 } 343 return shared_desc_len; 344 } 345 346 /* prepare ipsec proto command block of the session */ 347 static int 348 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 349 { 350 struct alginfo cipherdata = {0}, authdata = {0}; 351 struct sec_cdb *cdb = &ses->cdb; 352 int32_t shared_desc_len = 0; 353 int err; 354 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 355 int swap = false; 356 #else 357 int swap = true; 358 #endif 359 360 cipherdata.key = (size_t)ses->cipher_key.data; 361 cipherdata.keylen = ses->cipher_key.length; 362 cipherdata.key_enc_flags = 0; 363 cipherdata.key_type = RTA_DATA_IMM; 364 cipherdata.algtype = ses->cipher_key.alg; 365 cipherdata.algmode = ses->cipher_key.algmode; 366 367 if (ses->auth_key.length) { 368 authdata.key = (size_t)ses->auth_key.data; 369 authdata.keylen = ses->auth_key.length; 370 authdata.key_enc_flags = 0; 371 authdata.key_type = RTA_DATA_IMM; 372 authdata.algtype = ses->auth_key.alg; 373 authdata.algmode = ses->auth_key.algmode; 374 } 375 376 cdb->sh_desc[0] = cipherdata.keylen; 377 cdb->sh_desc[1] = authdata.keylen; 378 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 379 MIN_JOB_DESC_SIZE, 380 (unsigned int *)cdb->sh_desc, 381 &cdb->sh_desc[2], 2); 382 383 if (err < 0) { 384 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 385 return err; 386 } 387 if (cdb->sh_desc[2] & 1) 388 cipherdata.key_type = RTA_DATA_IMM; 389 else { 390 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 391 (void *)(size_t)cipherdata.key); 392 cipherdata.key_type = RTA_DATA_PTR; 393 } 394 if (cdb->sh_desc[2] & (1<<1)) 395 authdata.key_type = RTA_DATA_IMM; 396 else { 397 authdata.key = (size_t)rte_dpaa_mem_vtop( 398 (void *)(size_t)authdata.key); 399 authdata.key_type = RTA_DATA_PTR; 400 } 401 402 cdb->sh_desc[0] = 0; 403 cdb->sh_desc[1] = 0; 404 cdb->sh_desc[2] = 0; 405 if (ses->dir == DIR_ENC) { 406 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 407 cdb->sh_desc, 408 true, swap, SHR_SERIAL, 409 &ses->encap_pdb, 410 (uint8_t *)&ses->ip4_hdr, 411 &cipherdata, &authdata); 412 } else if (ses->dir == DIR_DEC) { 413 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 414 cdb->sh_desc, 415 true, swap, SHR_SERIAL, 416 &ses->decap_pdb, 417 &cipherdata, &authdata); 418 } 419 return shared_desc_len; 420 } 421 #endif 422 /* prepare command block of the session */ 423 static int 424 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 425 { 426 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 427 int32_t shared_desc_len = 0; 428 struct sec_cdb *cdb = &ses->cdb; 429 int err; 430 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 431 int swap = false; 432 #else 433 int swap = true; 434 #endif 435 436 memset(cdb, 0, sizeof(struct sec_cdb)); 437 438 switch (ses->ctxt) { 439 #ifdef RTE_LIBRTE_SECURITY 440 case DPAA_SEC_IPSEC: 441 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 442 break; 443 case DPAA_SEC_PDCP: 444 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 445 break; 446 #endif 447 case DPAA_SEC_CIPHER: 448 alginfo_c.key = (size_t)ses->cipher_key.data; 449 alginfo_c.keylen = ses->cipher_key.length; 450 alginfo_c.key_enc_flags = 0; 451 alginfo_c.key_type = RTA_DATA_IMM; 452 alginfo_c.algtype = ses->cipher_key.alg; 453 alginfo_c.algmode = ses->cipher_key.algmode; 454 455 switch (ses->cipher_alg) { 456 case RTE_CRYPTO_CIPHER_AES_CBC: 457 case RTE_CRYPTO_CIPHER_3DES_CBC: 458 case RTE_CRYPTO_CIPHER_AES_CTR: 459 case RTE_CRYPTO_CIPHER_3DES_CTR: 460 shared_desc_len = cnstr_shdsc_blkcipher( 461 cdb->sh_desc, true, 462 swap, SHR_NEVER, &alginfo_c, 463 ses->iv.length, 464 ses->dir); 465 break; 466 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 467 shared_desc_len = cnstr_shdsc_snow_f8( 468 cdb->sh_desc, true, swap, 469 &alginfo_c, 470 ses->dir); 471 break; 472 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 473 shared_desc_len = cnstr_shdsc_zuce( 474 cdb->sh_desc, true, swap, 475 &alginfo_c, 476 ses->dir); 477 break; 478 default: 479 DPAA_SEC_ERR("unsupported cipher alg %d", 480 ses->cipher_alg); 481 return -ENOTSUP; 482 } 483 break; 484 case DPAA_SEC_AUTH: 485 alginfo_a.key = (size_t)ses->auth_key.data; 486 alginfo_a.keylen = ses->auth_key.length; 487 alginfo_a.key_enc_flags = 0; 488 alginfo_a.key_type = RTA_DATA_IMM; 489 alginfo_a.algtype = ses->auth_key.alg; 490 alginfo_a.algmode = ses->auth_key.algmode; 491 switch (ses->auth_alg) { 492 case RTE_CRYPTO_AUTH_MD5_HMAC: 493 case RTE_CRYPTO_AUTH_SHA1_HMAC: 494 case RTE_CRYPTO_AUTH_SHA224_HMAC: 495 case RTE_CRYPTO_AUTH_SHA256_HMAC: 496 case RTE_CRYPTO_AUTH_SHA384_HMAC: 497 case RTE_CRYPTO_AUTH_SHA512_HMAC: 498 shared_desc_len = cnstr_shdsc_hmac( 499 cdb->sh_desc, true, 500 swap, SHR_NEVER, &alginfo_a, 501 !ses->dir, 502 ses->digest_length); 503 break; 504 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 505 shared_desc_len = cnstr_shdsc_snow_f9( 506 cdb->sh_desc, true, swap, 507 &alginfo_a, 508 !ses->dir, 509 ses->digest_length); 510 break; 511 case RTE_CRYPTO_AUTH_ZUC_EIA3: 512 shared_desc_len = cnstr_shdsc_zuca( 513 cdb->sh_desc, true, swap, 514 &alginfo_a, 515 !ses->dir, 516 ses->digest_length); 517 break; 518 default: 519 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); 520 } 521 break; 522 case DPAA_SEC_AEAD: 523 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 524 DPAA_SEC_ERR("not supported aead alg"); 525 return -ENOTSUP; 526 } 527 alginfo.key = (size_t)ses->aead_key.data; 528 alginfo.keylen = ses->aead_key.length; 529 alginfo.key_enc_flags = 0; 530 alginfo.key_type = RTA_DATA_IMM; 531 alginfo.algtype = ses->aead_key.alg; 532 alginfo.algmode = ses->aead_key.algmode; 533 534 if (ses->dir == DIR_ENC) 535 shared_desc_len = cnstr_shdsc_gcm_encap( 536 cdb->sh_desc, true, swap, SHR_NEVER, 537 &alginfo, 538 ses->iv.length, 539 ses->digest_length); 540 else 541 shared_desc_len = cnstr_shdsc_gcm_decap( 542 cdb->sh_desc, true, swap, SHR_NEVER, 543 &alginfo, 544 ses->iv.length, 545 ses->digest_length); 546 break; 547 case DPAA_SEC_CIPHER_HASH: 548 alginfo_c.key = (size_t)ses->cipher_key.data; 549 alginfo_c.keylen = ses->cipher_key.length; 550 alginfo_c.key_enc_flags = 0; 551 alginfo_c.key_type = RTA_DATA_IMM; 552 alginfo_c.algtype = ses->cipher_key.alg; 553 alginfo_c.algmode = ses->cipher_key.algmode; 554 555 alginfo_a.key = (size_t)ses->auth_key.data; 556 alginfo_a.keylen = ses->auth_key.length; 557 alginfo_a.key_enc_flags = 0; 558 alginfo_a.key_type = RTA_DATA_IMM; 559 alginfo_a.algtype = ses->auth_key.alg; 560 alginfo_a.algmode = ses->auth_key.algmode; 561 562 cdb->sh_desc[0] = alginfo_c.keylen; 563 cdb->sh_desc[1] = alginfo_a.keylen; 564 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 565 MIN_JOB_DESC_SIZE, 566 (unsigned int *)cdb->sh_desc, 567 &cdb->sh_desc[2], 2); 568 569 if (err < 0) { 570 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 571 return err; 572 } 573 if (cdb->sh_desc[2] & 1) 574 alginfo_c.key_type = RTA_DATA_IMM; 575 else { 576 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 577 (void *)(size_t)alginfo_c.key); 578 alginfo_c.key_type = RTA_DATA_PTR; 579 } 580 if (cdb->sh_desc[2] & (1<<1)) 581 alginfo_a.key_type = RTA_DATA_IMM; 582 else { 583 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 584 (void *)(size_t)alginfo_a.key); 585 alginfo_a.key_type = RTA_DATA_PTR; 586 } 587 cdb->sh_desc[0] = 0; 588 cdb->sh_desc[1] = 0; 589 cdb->sh_desc[2] = 0; 590 /* Auth_only_len is set as 0 here and it will be 591 * overwritten in fd for each packet. 592 */ 593 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 594 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 595 ses->iv.length, 596 ses->digest_length, ses->dir); 597 break; 598 case DPAA_SEC_HASH_CIPHER: 599 default: 600 DPAA_SEC_ERR("error: Unsupported session"); 601 return -ENOTSUP; 602 } 603 604 if (shared_desc_len < 0) { 605 DPAA_SEC_ERR("error in preparing command block"); 606 return shared_desc_len; 607 } 608 609 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 610 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 611 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 612 613 return 0; 614 } 615 616 /* qp is lockless, should be accessed by only one thread */ 617 static int 618 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 619 { 620 struct qman_fq *fq; 621 unsigned int pkts = 0; 622 int num_rx_bufs, ret; 623 struct qm_dqrr_entry *dq; 624 uint32_t vdqcr_flags = 0; 625 626 fq = &qp->outq; 627 /* 628 * Until request for four buffers, we provide exact number of buffers. 629 * Otherwise we do not set the QM_VDQCR_EXACT flag. 630 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 631 * requested, so we request two less in this case. 632 */ 633 if (nb_ops < 4) { 634 vdqcr_flags = QM_VDQCR_EXACT; 635 num_rx_bufs = nb_ops; 636 } else { 637 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 638 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 639 } 640 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 641 if (ret) 642 return 0; 643 644 do { 645 const struct qm_fd *fd; 646 struct dpaa_sec_job *job; 647 struct dpaa_sec_op_ctx *ctx; 648 struct rte_crypto_op *op; 649 650 dq = qman_dequeue(fq); 651 if (!dq) 652 continue; 653 654 fd = &dq->fd; 655 /* sg is embedded in an op ctx, 656 * sg[0] is for output 657 * sg[1] for input 658 */ 659 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 660 661 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 662 ctx->fd_status = fd->status; 663 op = ctx->op; 664 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 665 struct qm_sg_entry *sg_out; 666 uint32_t len; 667 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 668 op->sym->m_src : op->sym->m_dst; 669 670 sg_out = &job->sg[0]; 671 hw_sg_to_cpu(sg_out); 672 len = sg_out->length; 673 mbuf->pkt_len = len; 674 while (mbuf->next != NULL) { 675 len -= mbuf->data_len; 676 mbuf = mbuf->next; 677 } 678 mbuf->data_len = len; 679 } 680 if (!ctx->fd_status) { 681 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 682 } else { 683 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); 684 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 685 } 686 ops[pkts++] = op; 687 688 /* report op status to sym->op and then free the ctx memeory */ 689 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 690 691 qman_dqrr_consume(fq, dq); 692 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 693 694 return pkts; 695 } 696 697 static inline struct dpaa_sec_job * 698 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 699 { 700 struct rte_crypto_sym_op *sym = op->sym; 701 struct rte_mbuf *mbuf = sym->m_src; 702 struct dpaa_sec_job *cf; 703 struct dpaa_sec_op_ctx *ctx; 704 struct qm_sg_entry *sg, *out_sg, *in_sg; 705 phys_addr_t start_addr; 706 uint8_t *old_digest, extra_segs; 707 int data_len, data_offset; 708 709 data_len = sym->auth.data.length; 710 data_offset = sym->auth.data.offset; 711 712 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 713 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 714 if ((data_len & 7) || (data_offset & 7)) { 715 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 716 return NULL; 717 } 718 719 data_len = data_len >> 3; 720 data_offset = data_offset >> 3; 721 } 722 723 if (is_decode(ses)) 724 extra_segs = 3; 725 else 726 extra_segs = 2; 727 728 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 729 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 730 MAX_SG_ENTRIES); 731 return NULL; 732 } 733 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 734 if (!ctx) 735 return NULL; 736 737 cf = &ctx->job; 738 ctx->op = op; 739 old_digest = ctx->digest; 740 741 /* output */ 742 out_sg = &cf->sg[0]; 743 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 744 out_sg->length = ses->digest_length; 745 cpu_to_hw_sg(out_sg); 746 747 /* input */ 748 in_sg = &cf->sg[1]; 749 /* need to extend the input to a compound frame */ 750 in_sg->extension = 1; 751 in_sg->final = 1; 752 in_sg->length = data_len; 753 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 754 755 /* 1st seg */ 756 sg = in_sg + 1; 757 758 if (ses->iv.length) { 759 uint8_t *iv_ptr; 760 761 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 762 ses->iv.offset); 763 764 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 765 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 766 sg->length = 12; 767 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 768 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 769 sg->length = 8; 770 } else { 771 sg->length = ses->iv.length; 772 } 773 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 774 in_sg->length += sg->length; 775 cpu_to_hw_sg(sg); 776 sg++; 777 } 778 779 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 780 sg->offset = data_offset; 781 782 if (data_len <= (mbuf->data_len - data_offset)) { 783 sg->length = data_len; 784 } else { 785 sg->length = mbuf->data_len - data_offset; 786 787 /* remaining i/p segs */ 788 while ((data_len = data_len - sg->length) && 789 (mbuf = mbuf->next)) { 790 cpu_to_hw_sg(sg); 791 sg++; 792 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 793 if (data_len > mbuf->data_len) 794 sg->length = mbuf->data_len; 795 else 796 sg->length = data_len; 797 } 798 } 799 800 if (is_decode(ses)) { 801 /* Digest verification case */ 802 cpu_to_hw_sg(sg); 803 sg++; 804 rte_memcpy(old_digest, sym->auth.digest.data, 805 ses->digest_length); 806 start_addr = rte_dpaa_mem_vtop(old_digest); 807 qm_sg_entry_set64(sg, start_addr); 808 sg->length = ses->digest_length; 809 in_sg->length += ses->digest_length; 810 } 811 sg->final = 1; 812 cpu_to_hw_sg(sg); 813 cpu_to_hw_sg(in_sg); 814 815 return cf; 816 } 817 818 /** 819 * packet looks like: 820 * |<----data_len------->| 821 * |ip_header|ah_header|icv|payload| 822 * ^ 823 * | 824 * mbuf->pkt.data 825 */ 826 static inline struct dpaa_sec_job * 827 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 828 { 829 struct rte_crypto_sym_op *sym = op->sym; 830 struct rte_mbuf *mbuf = sym->m_src; 831 struct dpaa_sec_job *cf; 832 struct dpaa_sec_op_ctx *ctx; 833 struct qm_sg_entry *sg, *in_sg; 834 rte_iova_t start_addr; 835 uint8_t *old_digest; 836 int data_len, data_offset; 837 838 data_len = sym->auth.data.length; 839 data_offset = sym->auth.data.offset; 840 841 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 842 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 843 if ((data_len & 7) || (data_offset & 7)) { 844 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 845 return NULL; 846 } 847 848 data_len = data_len >> 3; 849 data_offset = data_offset >> 3; 850 } 851 852 ctx = dpaa_sec_alloc_ctx(ses, 4); 853 if (!ctx) 854 return NULL; 855 856 cf = &ctx->job; 857 ctx->op = op; 858 old_digest = ctx->digest; 859 860 start_addr = rte_pktmbuf_iova(mbuf); 861 /* output */ 862 sg = &cf->sg[0]; 863 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 864 sg->length = ses->digest_length; 865 cpu_to_hw_sg(sg); 866 867 /* input */ 868 in_sg = &cf->sg[1]; 869 /* need to extend the input to a compound frame */ 870 in_sg->extension = 1; 871 in_sg->final = 1; 872 in_sg->length = data_len; 873 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 874 sg = &cf->sg[2]; 875 876 if (ses->iv.length) { 877 uint8_t *iv_ptr; 878 879 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 880 ses->iv.offset); 881 882 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 883 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 884 sg->length = 12; 885 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 886 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 887 sg->length = 8; 888 } else { 889 sg->length = ses->iv.length; 890 } 891 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 892 in_sg->length += sg->length; 893 cpu_to_hw_sg(sg); 894 sg++; 895 } 896 897 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 898 sg->offset = data_offset; 899 sg->length = data_len; 900 901 if (is_decode(ses)) { 902 /* Digest verification case */ 903 cpu_to_hw_sg(sg); 904 /* hash result or digest, save digest first */ 905 rte_memcpy(old_digest, sym->auth.digest.data, 906 ses->digest_length); 907 /* let's check digest by hw */ 908 start_addr = rte_dpaa_mem_vtop(old_digest); 909 sg++; 910 qm_sg_entry_set64(sg, start_addr); 911 sg->length = ses->digest_length; 912 in_sg->length += ses->digest_length; 913 } 914 sg->final = 1; 915 cpu_to_hw_sg(sg); 916 cpu_to_hw_sg(in_sg); 917 918 return cf; 919 } 920 921 static inline struct dpaa_sec_job * 922 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 923 { 924 struct rte_crypto_sym_op *sym = op->sym; 925 struct dpaa_sec_job *cf; 926 struct dpaa_sec_op_ctx *ctx; 927 struct qm_sg_entry *sg, *out_sg, *in_sg; 928 struct rte_mbuf *mbuf; 929 uint8_t req_segs; 930 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 931 ses->iv.offset); 932 int data_len, data_offset; 933 934 data_len = sym->cipher.data.length; 935 data_offset = sym->cipher.data.offset; 936 937 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 938 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 939 if ((data_len & 7) || (data_offset & 7)) { 940 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 941 return NULL; 942 } 943 944 data_len = data_len >> 3; 945 data_offset = data_offset >> 3; 946 } 947 948 if (sym->m_dst) { 949 mbuf = sym->m_dst; 950 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 951 } else { 952 mbuf = sym->m_src; 953 req_segs = mbuf->nb_segs * 2 + 3; 954 } 955 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 956 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 957 MAX_SG_ENTRIES); 958 return NULL; 959 } 960 961 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 962 if (!ctx) 963 return NULL; 964 965 cf = &ctx->job; 966 ctx->op = op; 967 968 /* output */ 969 out_sg = &cf->sg[0]; 970 out_sg->extension = 1; 971 out_sg->length = data_len; 972 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 973 cpu_to_hw_sg(out_sg); 974 975 /* 1st seg */ 976 sg = &cf->sg[2]; 977 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 978 sg->length = mbuf->data_len - data_offset; 979 sg->offset = data_offset; 980 981 /* Successive segs */ 982 mbuf = mbuf->next; 983 while (mbuf) { 984 cpu_to_hw_sg(sg); 985 sg++; 986 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 987 sg->length = mbuf->data_len; 988 mbuf = mbuf->next; 989 } 990 sg->final = 1; 991 cpu_to_hw_sg(sg); 992 993 /* input */ 994 mbuf = sym->m_src; 995 in_sg = &cf->sg[1]; 996 in_sg->extension = 1; 997 in_sg->final = 1; 998 in_sg->length = data_len + ses->iv.length; 999 1000 sg++; 1001 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1002 cpu_to_hw_sg(in_sg); 1003 1004 /* IV */ 1005 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1006 sg->length = ses->iv.length; 1007 cpu_to_hw_sg(sg); 1008 1009 /* 1st seg */ 1010 sg++; 1011 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1012 sg->length = mbuf->data_len - data_offset; 1013 sg->offset = data_offset; 1014 1015 /* Successive segs */ 1016 mbuf = mbuf->next; 1017 while (mbuf) { 1018 cpu_to_hw_sg(sg); 1019 sg++; 1020 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1021 sg->length = mbuf->data_len; 1022 mbuf = mbuf->next; 1023 } 1024 sg->final = 1; 1025 cpu_to_hw_sg(sg); 1026 1027 return cf; 1028 } 1029 1030 static inline struct dpaa_sec_job * 1031 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1032 { 1033 struct rte_crypto_sym_op *sym = op->sym; 1034 struct dpaa_sec_job *cf; 1035 struct dpaa_sec_op_ctx *ctx; 1036 struct qm_sg_entry *sg; 1037 rte_iova_t src_start_addr, dst_start_addr; 1038 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1039 ses->iv.offset); 1040 int data_len, data_offset; 1041 1042 data_len = sym->cipher.data.length; 1043 data_offset = sym->cipher.data.offset; 1044 1045 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1046 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1047 if ((data_len & 7) || (data_offset & 7)) { 1048 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1049 return NULL; 1050 } 1051 1052 data_len = data_len >> 3; 1053 data_offset = data_offset >> 3; 1054 } 1055 1056 ctx = dpaa_sec_alloc_ctx(ses, 4); 1057 if (!ctx) 1058 return NULL; 1059 1060 cf = &ctx->job; 1061 ctx->op = op; 1062 1063 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1064 1065 if (sym->m_dst) 1066 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1067 else 1068 dst_start_addr = src_start_addr; 1069 1070 /* output */ 1071 sg = &cf->sg[0]; 1072 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1073 sg->length = data_len + ses->iv.length; 1074 cpu_to_hw_sg(sg); 1075 1076 /* input */ 1077 sg = &cf->sg[1]; 1078 1079 /* need to extend the input to a compound frame */ 1080 sg->extension = 1; 1081 sg->final = 1; 1082 sg->length = data_len + ses->iv.length; 1083 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1084 cpu_to_hw_sg(sg); 1085 1086 sg = &cf->sg[2]; 1087 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1088 sg->length = ses->iv.length; 1089 cpu_to_hw_sg(sg); 1090 1091 sg++; 1092 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1093 sg->length = data_len; 1094 sg->final = 1; 1095 cpu_to_hw_sg(sg); 1096 1097 return cf; 1098 } 1099 1100 static inline struct dpaa_sec_job * 1101 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1102 { 1103 struct rte_crypto_sym_op *sym = op->sym; 1104 struct dpaa_sec_job *cf; 1105 struct dpaa_sec_op_ctx *ctx; 1106 struct qm_sg_entry *sg, *out_sg, *in_sg; 1107 struct rte_mbuf *mbuf; 1108 uint8_t req_segs; 1109 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1110 ses->iv.offset); 1111 1112 if (sym->m_dst) { 1113 mbuf = sym->m_dst; 1114 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1115 } else { 1116 mbuf = sym->m_src; 1117 req_segs = mbuf->nb_segs * 2 + 4; 1118 } 1119 1120 if (ses->auth_only_len) 1121 req_segs++; 1122 1123 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1124 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1125 MAX_SG_ENTRIES); 1126 return NULL; 1127 } 1128 1129 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1130 if (!ctx) 1131 return NULL; 1132 1133 cf = &ctx->job; 1134 ctx->op = op; 1135 1136 rte_prefetch0(cf->sg); 1137 1138 /* output */ 1139 out_sg = &cf->sg[0]; 1140 out_sg->extension = 1; 1141 if (is_encode(ses)) 1142 out_sg->length = sym->aead.data.length + ses->digest_length; 1143 else 1144 out_sg->length = sym->aead.data.length; 1145 1146 /* output sg entries */ 1147 sg = &cf->sg[2]; 1148 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1149 cpu_to_hw_sg(out_sg); 1150 1151 /* 1st seg */ 1152 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1153 sg->length = mbuf->data_len - sym->aead.data.offset; 1154 sg->offset = sym->aead.data.offset; 1155 1156 /* Successive segs */ 1157 mbuf = mbuf->next; 1158 while (mbuf) { 1159 cpu_to_hw_sg(sg); 1160 sg++; 1161 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1162 sg->length = mbuf->data_len; 1163 mbuf = mbuf->next; 1164 } 1165 sg->length -= ses->digest_length; 1166 1167 if (is_encode(ses)) { 1168 cpu_to_hw_sg(sg); 1169 /* set auth output */ 1170 sg++; 1171 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1172 sg->length = ses->digest_length; 1173 } 1174 sg->final = 1; 1175 cpu_to_hw_sg(sg); 1176 1177 /* input */ 1178 mbuf = sym->m_src; 1179 in_sg = &cf->sg[1]; 1180 in_sg->extension = 1; 1181 in_sg->final = 1; 1182 if (is_encode(ses)) 1183 in_sg->length = ses->iv.length + sym->aead.data.length 1184 + ses->auth_only_len; 1185 else 1186 in_sg->length = ses->iv.length + sym->aead.data.length 1187 + ses->auth_only_len + ses->digest_length; 1188 1189 /* input sg entries */ 1190 sg++; 1191 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1192 cpu_to_hw_sg(in_sg); 1193 1194 /* 1st seg IV */ 1195 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1196 sg->length = ses->iv.length; 1197 cpu_to_hw_sg(sg); 1198 1199 /* 2nd seg auth only */ 1200 if (ses->auth_only_len) { 1201 sg++; 1202 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1203 sg->length = ses->auth_only_len; 1204 cpu_to_hw_sg(sg); 1205 } 1206 1207 /* 3rd seg */ 1208 sg++; 1209 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1210 sg->length = mbuf->data_len - sym->aead.data.offset; 1211 sg->offset = sym->aead.data.offset; 1212 1213 /* Successive segs */ 1214 mbuf = mbuf->next; 1215 while (mbuf) { 1216 cpu_to_hw_sg(sg); 1217 sg++; 1218 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1219 sg->length = mbuf->data_len; 1220 mbuf = mbuf->next; 1221 } 1222 1223 if (is_decode(ses)) { 1224 cpu_to_hw_sg(sg); 1225 sg++; 1226 memcpy(ctx->digest, sym->aead.digest.data, 1227 ses->digest_length); 1228 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1229 sg->length = ses->digest_length; 1230 } 1231 sg->final = 1; 1232 cpu_to_hw_sg(sg); 1233 1234 return cf; 1235 } 1236 1237 static inline struct dpaa_sec_job * 1238 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1239 { 1240 struct rte_crypto_sym_op *sym = op->sym; 1241 struct dpaa_sec_job *cf; 1242 struct dpaa_sec_op_ctx *ctx; 1243 struct qm_sg_entry *sg; 1244 uint32_t length = 0; 1245 rte_iova_t src_start_addr, dst_start_addr; 1246 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1247 ses->iv.offset); 1248 1249 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1250 1251 if (sym->m_dst) 1252 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1253 else 1254 dst_start_addr = src_start_addr; 1255 1256 ctx = dpaa_sec_alloc_ctx(ses, 7); 1257 if (!ctx) 1258 return NULL; 1259 1260 cf = &ctx->job; 1261 ctx->op = op; 1262 1263 /* input */ 1264 rte_prefetch0(cf->sg); 1265 sg = &cf->sg[2]; 1266 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1267 if (is_encode(ses)) { 1268 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1269 sg->length = ses->iv.length; 1270 length += sg->length; 1271 cpu_to_hw_sg(sg); 1272 1273 sg++; 1274 if (ses->auth_only_len) { 1275 qm_sg_entry_set64(sg, 1276 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1277 sg->length = ses->auth_only_len; 1278 length += sg->length; 1279 cpu_to_hw_sg(sg); 1280 sg++; 1281 } 1282 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1283 sg->length = sym->aead.data.length; 1284 length += sg->length; 1285 sg->final = 1; 1286 cpu_to_hw_sg(sg); 1287 } else { 1288 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1289 sg->length = ses->iv.length; 1290 length += sg->length; 1291 cpu_to_hw_sg(sg); 1292 1293 sg++; 1294 if (ses->auth_only_len) { 1295 qm_sg_entry_set64(sg, 1296 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1297 sg->length = ses->auth_only_len; 1298 length += sg->length; 1299 cpu_to_hw_sg(sg); 1300 sg++; 1301 } 1302 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1303 sg->length = sym->aead.data.length; 1304 length += sg->length; 1305 cpu_to_hw_sg(sg); 1306 1307 memcpy(ctx->digest, sym->aead.digest.data, 1308 ses->digest_length); 1309 sg++; 1310 1311 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1312 sg->length = ses->digest_length; 1313 length += sg->length; 1314 sg->final = 1; 1315 cpu_to_hw_sg(sg); 1316 } 1317 /* input compound frame */ 1318 cf->sg[1].length = length; 1319 cf->sg[1].extension = 1; 1320 cf->sg[1].final = 1; 1321 cpu_to_hw_sg(&cf->sg[1]); 1322 1323 /* output */ 1324 sg++; 1325 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1326 qm_sg_entry_set64(sg, 1327 dst_start_addr + sym->aead.data.offset); 1328 sg->length = sym->aead.data.length; 1329 length = sg->length; 1330 if (is_encode(ses)) { 1331 cpu_to_hw_sg(sg); 1332 /* set auth output */ 1333 sg++; 1334 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1335 sg->length = ses->digest_length; 1336 length += sg->length; 1337 } 1338 sg->final = 1; 1339 cpu_to_hw_sg(sg); 1340 1341 /* output compound frame */ 1342 cf->sg[0].length = length; 1343 cf->sg[0].extension = 1; 1344 cpu_to_hw_sg(&cf->sg[0]); 1345 1346 return cf; 1347 } 1348 1349 static inline struct dpaa_sec_job * 1350 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1351 { 1352 struct rte_crypto_sym_op *sym = op->sym; 1353 struct dpaa_sec_job *cf; 1354 struct dpaa_sec_op_ctx *ctx; 1355 struct qm_sg_entry *sg, *out_sg, *in_sg; 1356 struct rte_mbuf *mbuf; 1357 uint8_t req_segs; 1358 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1359 ses->iv.offset); 1360 1361 if (sym->m_dst) { 1362 mbuf = sym->m_dst; 1363 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1364 } else { 1365 mbuf = sym->m_src; 1366 req_segs = mbuf->nb_segs * 2 + 4; 1367 } 1368 1369 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1370 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1371 MAX_SG_ENTRIES); 1372 return NULL; 1373 } 1374 1375 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1376 if (!ctx) 1377 return NULL; 1378 1379 cf = &ctx->job; 1380 ctx->op = op; 1381 1382 rte_prefetch0(cf->sg); 1383 1384 /* output */ 1385 out_sg = &cf->sg[0]; 1386 out_sg->extension = 1; 1387 if (is_encode(ses)) 1388 out_sg->length = sym->auth.data.length + ses->digest_length; 1389 else 1390 out_sg->length = sym->auth.data.length; 1391 1392 /* output sg entries */ 1393 sg = &cf->sg[2]; 1394 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1395 cpu_to_hw_sg(out_sg); 1396 1397 /* 1st seg */ 1398 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1399 sg->length = mbuf->data_len - sym->auth.data.offset; 1400 sg->offset = sym->auth.data.offset; 1401 1402 /* Successive segs */ 1403 mbuf = mbuf->next; 1404 while (mbuf) { 1405 cpu_to_hw_sg(sg); 1406 sg++; 1407 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1408 sg->length = mbuf->data_len; 1409 mbuf = mbuf->next; 1410 } 1411 sg->length -= ses->digest_length; 1412 1413 if (is_encode(ses)) { 1414 cpu_to_hw_sg(sg); 1415 /* set auth output */ 1416 sg++; 1417 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1418 sg->length = ses->digest_length; 1419 } 1420 sg->final = 1; 1421 cpu_to_hw_sg(sg); 1422 1423 /* input */ 1424 mbuf = sym->m_src; 1425 in_sg = &cf->sg[1]; 1426 in_sg->extension = 1; 1427 in_sg->final = 1; 1428 if (is_encode(ses)) 1429 in_sg->length = ses->iv.length + sym->auth.data.length; 1430 else 1431 in_sg->length = ses->iv.length + sym->auth.data.length 1432 + ses->digest_length; 1433 1434 /* input sg entries */ 1435 sg++; 1436 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1437 cpu_to_hw_sg(in_sg); 1438 1439 /* 1st seg IV */ 1440 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1441 sg->length = ses->iv.length; 1442 cpu_to_hw_sg(sg); 1443 1444 /* 2nd seg */ 1445 sg++; 1446 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1447 sg->length = mbuf->data_len - sym->auth.data.offset; 1448 sg->offset = sym->auth.data.offset; 1449 1450 /* Successive segs */ 1451 mbuf = mbuf->next; 1452 while (mbuf) { 1453 cpu_to_hw_sg(sg); 1454 sg++; 1455 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1456 sg->length = mbuf->data_len; 1457 mbuf = mbuf->next; 1458 } 1459 1460 sg->length -= ses->digest_length; 1461 if (is_decode(ses)) { 1462 cpu_to_hw_sg(sg); 1463 sg++; 1464 memcpy(ctx->digest, sym->auth.digest.data, 1465 ses->digest_length); 1466 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1467 sg->length = ses->digest_length; 1468 } 1469 sg->final = 1; 1470 cpu_to_hw_sg(sg); 1471 1472 return cf; 1473 } 1474 1475 static inline struct dpaa_sec_job * 1476 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1477 { 1478 struct rte_crypto_sym_op *sym = op->sym; 1479 struct dpaa_sec_job *cf; 1480 struct dpaa_sec_op_ctx *ctx; 1481 struct qm_sg_entry *sg; 1482 rte_iova_t src_start_addr, dst_start_addr; 1483 uint32_t length = 0; 1484 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1485 ses->iv.offset); 1486 1487 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1488 if (sym->m_dst) 1489 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1490 else 1491 dst_start_addr = src_start_addr; 1492 1493 ctx = dpaa_sec_alloc_ctx(ses, 7); 1494 if (!ctx) 1495 return NULL; 1496 1497 cf = &ctx->job; 1498 ctx->op = op; 1499 1500 /* input */ 1501 rte_prefetch0(cf->sg); 1502 sg = &cf->sg[2]; 1503 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1504 if (is_encode(ses)) { 1505 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1506 sg->length = ses->iv.length; 1507 length += sg->length; 1508 cpu_to_hw_sg(sg); 1509 1510 sg++; 1511 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1512 sg->length = sym->auth.data.length; 1513 length += sg->length; 1514 sg->final = 1; 1515 cpu_to_hw_sg(sg); 1516 } else { 1517 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1518 sg->length = ses->iv.length; 1519 length += sg->length; 1520 cpu_to_hw_sg(sg); 1521 1522 sg++; 1523 1524 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1525 sg->length = sym->auth.data.length; 1526 length += sg->length; 1527 cpu_to_hw_sg(sg); 1528 1529 memcpy(ctx->digest, sym->auth.digest.data, 1530 ses->digest_length); 1531 sg++; 1532 1533 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1534 sg->length = ses->digest_length; 1535 length += sg->length; 1536 sg->final = 1; 1537 cpu_to_hw_sg(sg); 1538 } 1539 /* input compound frame */ 1540 cf->sg[1].length = length; 1541 cf->sg[1].extension = 1; 1542 cf->sg[1].final = 1; 1543 cpu_to_hw_sg(&cf->sg[1]); 1544 1545 /* output */ 1546 sg++; 1547 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1548 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1549 sg->length = sym->cipher.data.length; 1550 length = sg->length; 1551 if (is_encode(ses)) { 1552 cpu_to_hw_sg(sg); 1553 /* set auth output */ 1554 sg++; 1555 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1556 sg->length = ses->digest_length; 1557 length += sg->length; 1558 } 1559 sg->final = 1; 1560 cpu_to_hw_sg(sg); 1561 1562 /* output compound frame */ 1563 cf->sg[0].length = length; 1564 cf->sg[0].extension = 1; 1565 cpu_to_hw_sg(&cf->sg[0]); 1566 1567 return cf; 1568 } 1569 1570 #ifdef RTE_LIBRTE_SECURITY 1571 static inline struct dpaa_sec_job * 1572 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1573 { 1574 struct rte_crypto_sym_op *sym = op->sym; 1575 struct dpaa_sec_job *cf; 1576 struct dpaa_sec_op_ctx *ctx; 1577 struct qm_sg_entry *sg; 1578 phys_addr_t src_start_addr, dst_start_addr; 1579 1580 ctx = dpaa_sec_alloc_ctx(ses, 2); 1581 if (!ctx) 1582 return NULL; 1583 cf = &ctx->job; 1584 ctx->op = op; 1585 1586 src_start_addr = rte_pktmbuf_mtophys(sym->m_src); 1587 1588 if (sym->m_dst) 1589 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst); 1590 else 1591 dst_start_addr = src_start_addr; 1592 1593 /* input */ 1594 sg = &cf->sg[1]; 1595 qm_sg_entry_set64(sg, src_start_addr); 1596 sg->length = sym->m_src->pkt_len; 1597 sg->final = 1; 1598 cpu_to_hw_sg(sg); 1599 1600 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1601 /* output */ 1602 sg = &cf->sg[0]; 1603 qm_sg_entry_set64(sg, dst_start_addr); 1604 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1605 cpu_to_hw_sg(sg); 1606 1607 return cf; 1608 } 1609 1610 static inline struct dpaa_sec_job * 1611 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1612 { 1613 struct rte_crypto_sym_op *sym = op->sym; 1614 struct dpaa_sec_job *cf; 1615 struct dpaa_sec_op_ctx *ctx; 1616 struct qm_sg_entry *sg, *out_sg, *in_sg; 1617 struct rte_mbuf *mbuf; 1618 uint8_t req_segs; 1619 uint32_t in_len = 0, out_len = 0; 1620 1621 if (sym->m_dst) 1622 mbuf = sym->m_dst; 1623 else 1624 mbuf = sym->m_src; 1625 1626 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1627 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1628 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1629 MAX_SG_ENTRIES); 1630 return NULL; 1631 } 1632 1633 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1634 if (!ctx) 1635 return NULL; 1636 cf = &ctx->job; 1637 ctx->op = op; 1638 /* output */ 1639 out_sg = &cf->sg[0]; 1640 out_sg->extension = 1; 1641 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1642 1643 /* 1st seg */ 1644 sg = &cf->sg[2]; 1645 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1646 sg->offset = 0; 1647 1648 /* Successive segs */ 1649 while (mbuf->next) { 1650 sg->length = mbuf->data_len; 1651 out_len += sg->length; 1652 mbuf = mbuf->next; 1653 cpu_to_hw_sg(sg); 1654 sg++; 1655 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1656 sg->offset = 0; 1657 } 1658 sg->length = mbuf->buf_len - mbuf->data_off; 1659 out_len += sg->length; 1660 sg->final = 1; 1661 cpu_to_hw_sg(sg); 1662 1663 out_sg->length = out_len; 1664 cpu_to_hw_sg(out_sg); 1665 1666 /* input */ 1667 mbuf = sym->m_src; 1668 in_sg = &cf->sg[1]; 1669 in_sg->extension = 1; 1670 in_sg->final = 1; 1671 in_len = mbuf->data_len; 1672 1673 sg++; 1674 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1675 1676 /* 1st seg */ 1677 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1678 sg->length = mbuf->data_len; 1679 sg->offset = 0; 1680 1681 /* Successive segs */ 1682 mbuf = mbuf->next; 1683 while (mbuf) { 1684 cpu_to_hw_sg(sg); 1685 sg++; 1686 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1687 sg->length = mbuf->data_len; 1688 sg->offset = 0; 1689 in_len += sg->length; 1690 mbuf = mbuf->next; 1691 } 1692 sg->final = 1; 1693 cpu_to_hw_sg(sg); 1694 1695 in_sg->length = in_len; 1696 cpu_to_hw_sg(in_sg); 1697 1698 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1699 1700 return cf; 1701 } 1702 #endif 1703 1704 static uint16_t 1705 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1706 uint16_t nb_ops) 1707 { 1708 /* Function to transmit the frames to given device and queuepair */ 1709 uint32_t loop; 1710 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1711 uint16_t num_tx = 0; 1712 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1713 uint32_t frames_to_send; 1714 struct rte_crypto_op *op; 1715 struct dpaa_sec_job *cf; 1716 dpaa_sec_session *ses; 1717 uint16_t auth_hdr_len, auth_tail_len; 1718 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1719 struct qman_fq *inq[DPAA_SEC_BURST]; 1720 1721 while (nb_ops) { 1722 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1723 DPAA_SEC_BURST : nb_ops; 1724 for (loop = 0; loop < frames_to_send; loop++) { 1725 op = *(ops++); 1726 if (op->sym->m_src->seqn != 0) { 1727 index = op->sym->m_src->seqn - 1; 1728 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1729 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1730 flags[loop] = ((index & 0x0f) << 8); 1731 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1732 DPAA_PER_LCORE_DQRR_SIZE--; 1733 DPAA_PER_LCORE_DQRR_HELD &= 1734 ~(1 << index); 1735 } 1736 } 1737 1738 switch (op->sess_type) { 1739 case RTE_CRYPTO_OP_WITH_SESSION: 1740 ses = (dpaa_sec_session *) 1741 get_sym_session_private_data( 1742 op->sym->session, 1743 cryptodev_driver_id); 1744 break; 1745 #ifdef RTE_LIBRTE_SECURITY 1746 case RTE_CRYPTO_OP_SECURITY_SESSION: 1747 ses = (dpaa_sec_session *) 1748 get_sec_session_private_data( 1749 op->sym->sec_session); 1750 break; 1751 #endif 1752 default: 1753 DPAA_SEC_DP_ERR( 1754 "sessionless crypto op not supported"); 1755 frames_to_send = loop; 1756 nb_ops = loop; 1757 goto send_pkts; 1758 } 1759 1760 if (!ses) { 1761 DPAA_SEC_DP_ERR("session not available"); 1762 frames_to_send = loop; 1763 nb_ops = loop; 1764 goto send_pkts; 1765 } 1766 1767 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1768 if (dpaa_sec_attach_sess_q(qp, ses)) { 1769 frames_to_send = loop; 1770 nb_ops = loop; 1771 goto send_pkts; 1772 } 1773 } else if (unlikely(ses->qp[rte_lcore_id() % 1774 MAX_DPAA_CORES] != qp)) { 1775 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1776 " New qp = %p\n", 1777 ses->qp[rte_lcore_id() % 1778 MAX_DPAA_CORES], qp); 1779 frames_to_send = loop; 1780 nb_ops = loop; 1781 goto send_pkts; 1782 } 1783 1784 auth_hdr_len = op->sym->auth.data.length - 1785 op->sym->cipher.data.length; 1786 auth_tail_len = 0; 1787 1788 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1789 ((op->sym->m_dst == NULL) || 1790 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1791 switch (ses->ctxt) { 1792 #ifdef RTE_LIBRTE_SECURITY 1793 case DPAA_SEC_PDCP: 1794 case DPAA_SEC_IPSEC: 1795 cf = build_proto(op, ses); 1796 break; 1797 #endif 1798 case DPAA_SEC_AUTH: 1799 cf = build_auth_only(op, ses); 1800 break; 1801 case DPAA_SEC_CIPHER: 1802 cf = build_cipher_only(op, ses); 1803 break; 1804 case DPAA_SEC_AEAD: 1805 cf = build_cipher_auth_gcm(op, ses); 1806 auth_hdr_len = ses->auth_only_len; 1807 break; 1808 case DPAA_SEC_CIPHER_HASH: 1809 auth_hdr_len = 1810 op->sym->cipher.data.offset 1811 - op->sym->auth.data.offset; 1812 auth_tail_len = 1813 op->sym->auth.data.length 1814 - op->sym->cipher.data.length 1815 - auth_hdr_len; 1816 cf = build_cipher_auth(op, ses); 1817 break; 1818 default: 1819 DPAA_SEC_DP_ERR("not supported ops"); 1820 frames_to_send = loop; 1821 nb_ops = loop; 1822 goto send_pkts; 1823 } 1824 } else { 1825 switch (ses->ctxt) { 1826 #ifdef RTE_LIBRTE_SECURITY 1827 case DPAA_SEC_PDCP: 1828 case DPAA_SEC_IPSEC: 1829 cf = build_proto_sg(op, ses); 1830 break; 1831 #endif 1832 case DPAA_SEC_AUTH: 1833 cf = build_auth_only_sg(op, ses); 1834 break; 1835 case DPAA_SEC_CIPHER: 1836 cf = build_cipher_only_sg(op, ses); 1837 break; 1838 case DPAA_SEC_AEAD: 1839 cf = build_cipher_auth_gcm_sg(op, ses); 1840 auth_hdr_len = ses->auth_only_len; 1841 break; 1842 case DPAA_SEC_CIPHER_HASH: 1843 auth_hdr_len = 1844 op->sym->cipher.data.offset 1845 - op->sym->auth.data.offset; 1846 auth_tail_len = 1847 op->sym->auth.data.length 1848 - op->sym->cipher.data.length 1849 - auth_hdr_len; 1850 cf = build_cipher_auth_sg(op, ses); 1851 break; 1852 default: 1853 DPAA_SEC_DP_ERR("not supported ops"); 1854 frames_to_send = loop; 1855 nb_ops = loop; 1856 goto send_pkts; 1857 } 1858 } 1859 if (unlikely(!cf)) { 1860 frames_to_send = loop; 1861 nb_ops = loop; 1862 goto send_pkts; 1863 } 1864 1865 fd = &fds[loop]; 1866 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 1867 fd->opaque_addr = 0; 1868 fd->cmd = 0; 1869 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 1870 fd->_format1 = qm_fd_compound; 1871 fd->length29 = 2 * sizeof(struct qm_sg_entry); 1872 1873 /* Auth_only_len is set as 0 in descriptor and it is 1874 * overwritten here in the fd.cmd which will update 1875 * the DPOVRD reg. 1876 */ 1877 if (auth_hdr_len || auth_tail_len) { 1878 fd->cmd = 0x80000000; 1879 fd->cmd |= 1880 ((auth_tail_len << 16) | auth_hdr_len); 1881 } 1882 1883 #ifdef RTE_LIBRTE_SECURITY 1884 /* In case of PDCP, per packet HFN is stored in 1885 * mbuf priv after sym_op. 1886 */ 1887 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 1888 fd->cmd = 0x80000000 | 1889 *((uint32_t *)((uint8_t *)op + 1890 ses->pdcp.hfn_ovd_offset)); 1891 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 1892 *((uint32_t *)((uint8_t *)op + 1893 ses->pdcp.hfn_ovd_offset)), 1894 ses->pdcp.hfn_ovd); 1895 } 1896 #endif 1897 } 1898 send_pkts: 1899 loop = 0; 1900 while (loop < frames_to_send) { 1901 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 1902 &flags[loop], frames_to_send - loop); 1903 } 1904 nb_ops -= frames_to_send; 1905 num_tx += frames_to_send; 1906 } 1907 1908 dpaa_qp->tx_pkts += num_tx; 1909 dpaa_qp->tx_errs += nb_ops - num_tx; 1910 1911 return num_tx; 1912 } 1913 1914 static uint16_t 1915 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1916 uint16_t nb_ops) 1917 { 1918 uint16_t num_rx; 1919 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1920 1921 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1922 1923 dpaa_qp->rx_pkts += num_rx; 1924 dpaa_qp->rx_errs += nb_ops - num_rx; 1925 1926 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1927 1928 return num_rx; 1929 } 1930 1931 /** Release queue pair */ 1932 static int 1933 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1934 uint16_t qp_id) 1935 { 1936 struct dpaa_sec_dev_private *internals; 1937 struct dpaa_sec_qp *qp = NULL; 1938 1939 PMD_INIT_FUNC_TRACE(); 1940 1941 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 1942 1943 internals = dev->data->dev_private; 1944 if (qp_id >= internals->max_nb_queue_pairs) { 1945 DPAA_SEC_ERR("Max supported qpid %d", 1946 internals->max_nb_queue_pairs); 1947 return -EINVAL; 1948 } 1949 1950 qp = &internals->qps[qp_id]; 1951 rte_mempool_free(qp->ctx_pool); 1952 qp->internals = NULL; 1953 dev->data->queue_pairs[qp_id] = NULL; 1954 1955 return 0; 1956 } 1957 1958 /** Setup a queue pair */ 1959 static int 1960 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1961 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1962 __rte_unused int socket_id) 1963 { 1964 struct dpaa_sec_dev_private *internals; 1965 struct dpaa_sec_qp *qp = NULL; 1966 char str[20]; 1967 1968 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1969 1970 internals = dev->data->dev_private; 1971 if (qp_id >= internals->max_nb_queue_pairs) { 1972 DPAA_SEC_ERR("Max supported qpid %d", 1973 internals->max_nb_queue_pairs); 1974 return -EINVAL; 1975 } 1976 1977 qp = &internals->qps[qp_id]; 1978 qp->internals = internals; 1979 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 1980 dev->data->dev_id, qp_id); 1981 if (!qp->ctx_pool) { 1982 qp->ctx_pool = rte_mempool_create((const char *)str, 1983 CTX_POOL_NUM_BUFS, 1984 CTX_POOL_BUF_SIZE, 1985 CTX_POOL_CACHE_SIZE, 0, 1986 NULL, NULL, NULL, NULL, 1987 SOCKET_ID_ANY, 0); 1988 if (!qp->ctx_pool) { 1989 DPAA_SEC_ERR("%s create failed\n", str); 1990 return -ENOMEM; 1991 } 1992 } else 1993 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 1994 dev->data->dev_id, qp_id); 1995 dev->data->queue_pairs[qp_id] = qp; 1996 1997 return 0; 1998 } 1999 2000 /** Returns the size of session structure */ 2001 static unsigned int 2002 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2003 { 2004 PMD_INIT_FUNC_TRACE(); 2005 2006 return sizeof(dpaa_sec_session); 2007 } 2008 2009 static int 2010 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2011 struct rte_crypto_sym_xform *xform, 2012 dpaa_sec_session *session) 2013 { 2014 session->ctxt = DPAA_SEC_CIPHER; 2015 session->cipher_alg = xform->cipher.algo; 2016 session->iv.length = xform->cipher.iv.length; 2017 session->iv.offset = xform->cipher.iv.offset; 2018 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2019 RTE_CACHE_LINE_SIZE); 2020 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2021 DPAA_SEC_ERR("No Memory for cipher key"); 2022 return -ENOMEM; 2023 } 2024 session->cipher_key.length = xform->cipher.key.length; 2025 2026 memcpy(session->cipher_key.data, xform->cipher.key.data, 2027 xform->cipher.key.length); 2028 switch (xform->cipher.algo) { 2029 case RTE_CRYPTO_CIPHER_AES_CBC: 2030 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2031 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2032 break; 2033 case RTE_CRYPTO_CIPHER_3DES_CBC: 2034 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2035 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2036 break; 2037 case RTE_CRYPTO_CIPHER_AES_CTR: 2038 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2039 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2040 break; 2041 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2042 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2043 break; 2044 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2045 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2046 break; 2047 default: 2048 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2049 xform->cipher.algo); 2050 return -ENOTSUP; 2051 } 2052 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2053 DIR_ENC : DIR_DEC; 2054 2055 return 0; 2056 } 2057 2058 static int 2059 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2060 struct rte_crypto_sym_xform *xform, 2061 dpaa_sec_session *session) 2062 { 2063 session->ctxt = DPAA_SEC_AUTH; 2064 session->auth_alg = xform->auth.algo; 2065 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 2066 RTE_CACHE_LINE_SIZE); 2067 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 2068 DPAA_SEC_ERR("No Memory for auth key"); 2069 return -ENOMEM; 2070 } 2071 session->auth_key.length = xform->auth.key.length; 2072 session->digest_length = xform->auth.digest_length; 2073 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2074 session->iv.offset = xform->auth.iv.offset; 2075 session->iv.length = xform->auth.iv.length; 2076 } 2077 2078 memcpy(session->auth_key.data, xform->auth.key.data, 2079 xform->auth.key.length); 2080 2081 switch (xform->auth.algo) { 2082 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2083 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2084 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2085 break; 2086 case RTE_CRYPTO_AUTH_MD5_HMAC: 2087 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2088 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2089 break; 2090 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2091 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2092 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2093 break; 2094 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2095 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2096 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2097 break; 2098 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2099 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2100 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2101 break; 2102 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2103 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2104 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2105 break; 2106 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2107 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2108 session->auth_key.algmode = OP_ALG_AAI_F9; 2109 break; 2110 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2111 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2112 session->auth_key.algmode = OP_ALG_AAI_F9; 2113 break; 2114 default: 2115 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2116 xform->auth.algo); 2117 return -ENOTSUP; 2118 } 2119 2120 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2121 DIR_ENC : DIR_DEC; 2122 2123 return 0; 2124 } 2125 2126 static int 2127 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2128 struct rte_crypto_sym_xform *xform, 2129 dpaa_sec_session *session) 2130 { 2131 2132 struct rte_crypto_cipher_xform *cipher_xform; 2133 struct rte_crypto_auth_xform *auth_xform; 2134 2135 session->ctxt = DPAA_SEC_CIPHER_HASH; 2136 if (session->auth_cipher_text) { 2137 cipher_xform = &xform->cipher; 2138 auth_xform = &xform->next->auth; 2139 } else { 2140 cipher_xform = &xform->next->cipher; 2141 auth_xform = &xform->auth; 2142 } 2143 2144 /* Set IV parameters */ 2145 session->iv.offset = cipher_xform->iv.offset; 2146 session->iv.length = cipher_xform->iv.length; 2147 2148 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2149 RTE_CACHE_LINE_SIZE); 2150 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2151 DPAA_SEC_ERR("No Memory for cipher key"); 2152 return -ENOMEM; 2153 } 2154 session->cipher_key.length = cipher_xform->key.length; 2155 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2156 RTE_CACHE_LINE_SIZE); 2157 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2158 DPAA_SEC_ERR("No Memory for auth key"); 2159 return -ENOMEM; 2160 } 2161 session->auth_key.length = auth_xform->key.length; 2162 memcpy(session->cipher_key.data, cipher_xform->key.data, 2163 cipher_xform->key.length); 2164 memcpy(session->auth_key.data, auth_xform->key.data, 2165 auth_xform->key.length); 2166 2167 session->digest_length = auth_xform->digest_length; 2168 session->auth_alg = auth_xform->algo; 2169 2170 switch (auth_xform->algo) { 2171 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2172 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2173 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2174 break; 2175 case RTE_CRYPTO_AUTH_MD5_HMAC: 2176 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2177 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2178 break; 2179 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2180 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2181 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2182 break; 2183 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2184 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2185 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2186 break; 2187 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2188 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2189 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2190 break; 2191 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2192 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2193 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2194 break; 2195 default: 2196 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2197 auth_xform->algo); 2198 return -ENOTSUP; 2199 } 2200 2201 session->cipher_alg = cipher_xform->algo; 2202 2203 switch (cipher_xform->algo) { 2204 case RTE_CRYPTO_CIPHER_AES_CBC: 2205 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2206 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2207 break; 2208 case RTE_CRYPTO_CIPHER_3DES_CBC: 2209 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2210 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2211 break; 2212 case RTE_CRYPTO_CIPHER_AES_CTR: 2213 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2214 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2215 break; 2216 default: 2217 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2218 cipher_xform->algo); 2219 return -ENOTSUP; 2220 } 2221 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2222 DIR_ENC : DIR_DEC; 2223 return 0; 2224 } 2225 2226 static int 2227 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2228 struct rte_crypto_sym_xform *xform, 2229 dpaa_sec_session *session) 2230 { 2231 session->aead_alg = xform->aead.algo; 2232 session->ctxt = DPAA_SEC_AEAD; 2233 session->iv.length = xform->aead.iv.length; 2234 session->iv.offset = xform->aead.iv.offset; 2235 session->auth_only_len = xform->aead.aad_length; 2236 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2237 RTE_CACHE_LINE_SIZE); 2238 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2239 DPAA_SEC_ERR("No Memory for aead key\n"); 2240 return -ENOMEM; 2241 } 2242 session->aead_key.length = xform->aead.key.length; 2243 session->digest_length = xform->aead.digest_length; 2244 2245 memcpy(session->aead_key.data, xform->aead.key.data, 2246 xform->aead.key.length); 2247 2248 switch (session->aead_alg) { 2249 case RTE_CRYPTO_AEAD_AES_GCM: 2250 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2251 session->aead_key.algmode = OP_ALG_AAI_GCM; 2252 break; 2253 default: 2254 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2255 return -ENOTSUP; 2256 } 2257 2258 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2259 DIR_ENC : DIR_DEC; 2260 2261 return 0; 2262 } 2263 2264 static struct qman_fq * 2265 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2266 { 2267 unsigned int i; 2268 2269 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2270 if (qi->inq_attach[i] == 0) { 2271 qi->inq_attach[i] = 1; 2272 return &qi->inq[i]; 2273 } 2274 } 2275 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2276 2277 return NULL; 2278 } 2279 2280 static int 2281 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2282 { 2283 unsigned int i; 2284 2285 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2286 if (&qi->inq[i] == fq) { 2287 if (qman_retire_fq(fq, NULL) != 0) 2288 DPAA_SEC_WARN("Queue is not retired\n"); 2289 qman_oos_fq(fq); 2290 qi->inq_attach[i] = 0; 2291 return 0; 2292 } 2293 } 2294 return -1; 2295 } 2296 2297 static int 2298 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2299 { 2300 int ret; 2301 2302 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2303 ret = dpaa_sec_prep_cdb(sess); 2304 if (ret) { 2305 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2306 return ret; 2307 } 2308 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 2309 ret = rte_dpaa_portal_init((void *)0); 2310 if (ret) { 2311 DPAA_SEC_ERR("Failure in affining portal"); 2312 return ret; 2313 } 2314 } 2315 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2316 rte_dpaa_mem_vtop(&sess->cdb), 2317 qman_fq_fqid(&qp->outq)); 2318 if (ret) 2319 DPAA_SEC_ERR("Unable to init sec queue"); 2320 2321 return ret; 2322 } 2323 2324 static inline void 2325 free_session_data(dpaa_sec_session *s) 2326 { 2327 if (is_aead(s)) 2328 rte_free(s->aead_key.data); 2329 else { 2330 rte_free(s->auth_key.data); 2331 rte_free(s->cipher_key.data); 2332 } 2333 memset(s, 0, sizeof(dpaa_sec_session)); 2334 } 2335 2336 static int 2337 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2338 struct rte_crypto_sym_xform *xform, void *sess) 2339 { 2340 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2341 dpaa_sec_session *session = sess; 2342 uint32_t i; 2343 int ret; 2344 2345 PMD_INIT_FUNC_TRACE(); 2346 2347 if (unlikely(sess == NULL)) { 2348 DPAA_SEC_ERR("invalid session struct"); 2349 return -EINVAL; 2350 } 2351 memset(session, 0, sizeof(dpaa_sec_session)); 2352 2353 /* Default IV length = 0 */ 2354 session->iv.length = 0; 2355 2356 /* Cipher Only */ 2357 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2358 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2359 ret = dpaa_sec_cipher_init(dev, xform, session); 2360 2361 /* Authentication Only */ 2362 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2363 xform->next == NULL) { 2364 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2365 session->ctxt = DPAA_SEC_AUTH; 2366 ret = dpaa_sec_auth_init(dev, xform, session); 2367 2368 /* Cipher then Authenticate */ 2369 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2370 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2371 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2372 session->auth_cipher_text = 1; 2373 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2374 ret = dpaa_sec_auth_init(dev, xform, session); 2375 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2376 ret = dpaa_sec_cipher_init(dev, xform, session); 2377 else 2378 ret = dpaa_sec_chain_init(dev, xform, session); 2379 } else { 2380 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2381 return -ENOTSUP; 2382 } 2383 /* Authenticate then Cipher */ 2384 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2385 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2386 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2387 session->auth_cipher_text = 0; 2388 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2389 ret = dpaa_sec_cipher_init(dev, xform, session); 2390 else if (xform->next->cipher.algo 2391 == RTE_CRYPTO_CIPHER_NULL) 2392 ret = dpaa_sec_auth_init(dev, xform, session); 2393 else 2394 ret = dpaa_sec_chain_init(dev, xform, session); 2395 } else { 2396 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2397 return -ENOTSUP; 2398 } 2399 2400 /* AEAD operation for AES-GCM kind of Algorithms */ 2401 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2402 xform->next == NULL) { 2403 ret = dpaa_sec_aead_init(dev, xform, session); 2404 2405 } else { 2406 DPAA_SEC_ERR("Invalid crypto type"); 2407 return -EINVAL; 2408 } 2409 if (ret) { 2410 DPAA_SEC_ERR("unable to init session"); 2411 goto err1; 2412 } 2413 2414 rte_spinlock_lock(&internals->lock); 2415 for (i = 0; i < MAX_DPAA_CORES; i++) { 2416 session->inq[i] = dpaa_sec_attach_rxq(internals); 2417 if (session->inq[i] == NULL) { 2418 DPAA_SEC_ERR("unable to attach sec queue"); 2419 rte_spinlock_unlock(&internals->lock); 2420 ret = -EBUSY; 2421 goto err1; 2422 } 2423 } 2424 rte_spinlock_unlock(&internals->lock); 2425 2426 return 0; 2427 2428 err1: 2429 free_session_data(session); 2430 return ret; 2431 } 2432 2433 static int 2434 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2435 struct rte_crypto_sym_xform *xform, 2436 struct rte_cryptodev_sym_session *sess, 2437 struct rte_mempool *mempool) 2438 { 2439 void *sess_private_data; 2440 int ret; 2441 2442 PMD_INIT_FUNC_TRACE(); 2443 2444 if (rte_mempool_get(mempool, &sess_private_data)) { 2445 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2446 return -ENOMEM; 2447 } 2448 2449 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2450 if (ret != 0) { 2451 DPAA_SEC_ERR("failed to configure session parameters"); 2452 2453 /* Return session to mempool */ 2454 rte_mempool_put(mempool, sess_private_data); 2455 return ret; 2456 } 2457 2458 set_sym_session_private_data(sess, dev->driver_id, 2459 sess_private_data); 2460 2461 2462 return 0; 2463 } 2464 2465 static inline void 2466 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2467 { 2468 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2469 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s); 2470 uint8_t i; 2471 2472 for (i = 0; i < MAX_DPAA_CORES; i++) { 2473 if (s->inq[i]) 2474 dpaa_sec_detach_rxq(qi, s->inq[i]); 2475 s->inq[i] = NULL; 2476 s->qp[i] = NULL; 2477 } 2478 free_session_data(s); 2479 rte_mempool_put(sess_mp, (void *)s); 2480 } 2481 2482 /** Clear the memory of session so it doesn't leave key material behind */ 2483 static void 2484 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2485 struct rte_cryptodev_sym_session *sess) 2486 { 2487 PMD_INIT_FUNC_TRACE(); 2488 uint8_t index = dev->driver_id; 2489 void *sess_priv = get_sym_session_private_data(sess, index); 2490 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2491 2492 if (sess_priv) { 2493 free_session_memory(dev, s); 2494 set_sym_session_private_data(sess, index, NULL); 2495 } 2496 } 2497 2498 #ifdef RTE_LIBRTE_SECURITY 2499 static int 2500 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2501 struct rte_security_ipsec_xform *ipsec_xform, 2502 dpaa_sec_session *session) 2503 { 2504 PMD_INIT_FUNC_TRACE(); 2505 2506 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2507 RTE_CACHE_LINE_SIZE); 2508 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2509 DPAA_SEC_ERR("No Memory for aead key"); 2510 return -ENOMEM; 2511 } 2512 memcpy(session->aead_key.data, aead_xform->key.data, 2513 aead_xform->key.length); 2514 2515 session->digest_length = aead_xform->digest_length; 2516 session->aead_key.length = aead_xform->key.length; 2517 2518 switch (aead_xform->algo) { 2519 case RTE_CRYPTO_AEAD_AES_GCM: 2520 switch (session->digest_length) { 2521 case 8: 2522 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2523 break; 2524 case 12: 2525 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2526 break; 2527 case 16: 2528 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2529 break; 2530 default: 2531 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2532 session->digest_length); 2533 return -EINVAL; 2534 } 2535 if (session->dir == DIR_ENC) { 2536 memcpy(session->encap_pdb.gcm.salt, 2537 (uint8_t *)&(ipsec_xform->salt), 4); 2538 } else { 2539 memcpy(session->decap_pdb.gcm.salt, 2540 (uint8_t *)&(ipsec_xform->salt), 4); 2541 } 2542 session->aead_key.algmode = OP_ALG_AAI_GCM; 2543 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2544 break; 2545 default: 2546 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2547 aead_xform->algo); 2548 return -ENOTSUP; 2549 } 2550 return 0; 2551 } 2552 2553 static int 2554 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2555 struct rte_crypto_auth_xform *auth_xform, 2556 struct rte_security_ipsec_xform *ipsec_xform, 2557 dpaa_sec_session *session) 2558 { 2559 if (cipher_xform) { 2560 session->cipher_key.data = rte_zmalloc(NULL, 2561 cipher_xform->key.length, 2562 RTE_CACHE_LINE_SIZE); 2563 if (session->cipher_key.data == NULL && 2564 cipher_xform->key.length > 0) { 2565 DPAA_SEC_ERR("No Memory for cipher key"); 2566 return -ENOMEM; 2567 } 2568 2569 session->cipher_key.length = cipher_xform->key.length; 2570 memcpy(session->cipher_key.data, cipher_xform->key.data, 2571 cipher_xform->key.length); 2572 session->cipher_alg = cipher_xform->algo; 2573 } else { 2574 session->cipher_key.data = NULL; 2575 session->cipher_key.length = 0; 2576 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2577 } 2578 2579 if (auth_xform) { 2580 session->auth_key.data = rte_zmalloc(NULL, 2581 auth_xform->key.length, 2582 RTE_CACHE_LINE_SIZE); 2583 if (session->auth_key.data == NULL && 2584 auth_xform->key.length > 0) { 2585 DPAA_SEC_ERR("No Memory for auth key"); 2586 return -ENOMEM; 2587 } 2588 session->auth_key.length = auth_xform->key.length; 2589 memcpy(session->auth_key.data, auth_xform->key.data, 2590 auth_xform->key.length); 2591 session->auth_alg = auth_xform->algo; 2592 session->digest_length = auth_xform->digest_length; 2593 } else { 2594 session->auth_key.data = NULL; 2595 session->auth_key.length = 0; 2596 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2597 } 2598 2599 switch (session->auth_alg) { 2600 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2601 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2602 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2603 break; 2604 case RTE_CRYPTO_AUTH_MD5_HMAC: 2605 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2606 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2607 break; 2608 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2609 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2610 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2611 if (session->digest_length != 16) 2612 DPAA_SEC_WARN( 2613 "+++Using sha256-hmac truncated len is non-standard," 2614 "it will not work with lookaside proto"); 2615 break; 2616 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2617 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2618 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2619 break; 2620 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2621 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2622 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2623 break; 2624 case RTE_CRYPTO_AUTH_AES_CMAC: 2625 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2626 break; 2627 case RTE_CRYPTO_AUTH_NULL: 2628 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2629 break; 2630 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2631 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2632 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2633 case RTE_CRYPTO_AUTH_SHA1: 2634 case RTE_CRYPTO_AUTH_SHA256: 2635 case RTE_CRYPTO_AUTH_SHA512: 2636 case RTE_CRYPTO_AUTH_SHA224: 2637 case RTE_CRYPTO_AUTH_SHA384: 2638 case RTE_CRYPTO_AUTH_MD5: 2639 case RTE_CRYPTO_AUTH_AES_GMAC: 2640 case RTE_CRYPTO_AUTH_KASUMI_F9: 2641 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2642 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2643 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2644 session->auth_alg); 2645 return -ENOTSUP; 2646 default: 2647 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", 2648 session->auth_alg); 2649 return -ENOTSUP; 2650 } 2651 2652 switch (session->cipher_alg) { 2653 case RTE_CRYPTO_CIPHER_AES_CBC: 2654 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2655 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2656 break; 2657 case RTE_CRYPTO_CIPHER_3DES_CBC: 2658 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2659 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2660 break; 2661 case RTE_CRYPTO_CIPHER_AES_CTR: 2662 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2663 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2664 if (session->dir == DIR_ENC) { 2665 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2666 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2667 } else { 2668 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2669 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2670 } 2671 break; 2672 case RTE_CRYPTO_CIPHER_NULL: 2673 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2674 break; 2675 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2676 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2677 case RTE_CRYPTO_CIPHER_3DES_ECB: 2678 case RTE_CRYPTO_CIPHER_AES_ECB: 2679 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2680 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2681 session->cipher_alg); 2682 return -ENOTSUP; 2683 default: 2684 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2685 session->cipher_alg); 2686 return -ENOTSUP; 2687 } 2688 2689 return 0; 2690 } 2691 2692 static int 2693 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2694 struct rte_security_session_conf *conf, 2695 void *sess) 2696 { 2697 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2698 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2699 struct rte_crypto_auth_xform *auth_xform = NULL; 2700 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2701 struct rte_crypto_aead_xform *aead_xform = NULL; 2702 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2703 uint32_t i; 2704 int ret; 2705 2706 PMD_INIT_FUNC_TRACE(); 2707 2708 memset(session, 0, sizeof(dpaa_sec_session)); 2709 session->proto_alg = conf->protocol; 2710 session->ctxt = DPAA_SEC_IPSEC; 2711 2712 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2713 session->dir = DIR_ENC; 2714 else 2715 session->dir = DIR_DEC; 2716 2717 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2718 cipher_xform = &conf->crypto_xform->cipher; 2719 if (conf->crypto_xform->next) 2720 auth_xform = &conf->crypto_xform->next->auth; 2721 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2722 ipsec_xform, session); 2723 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2724 auth_xform = &conf->crypto_xform->auth; 2725 if (conf->crypto_xform->next) 2726 cipher_xform = &conf->crypto_xform->next->cipher; 2727 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2728 ipsec_xform, session); 2729 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2730 aead_xform = &conf->crypto_xform->aead; 2731 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2732 ipsec_xform, session); 2733 } else { 2734 DPAA_SEC_ERR("XFORM not specified"); 2735 ret = -EINVAL; 2736 goto out; 2737 } 2738 if (ret) { 2739 DPAA_SEC_ERR("Failed to process xform"); 2740 goto out; 2741 } 2742 2743 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2744 if (ipsec_xform->tunnel.type == 2745 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2746 session->ip4_hdr.ip_v = IPVERSION; 2747 session->ip4_hdr.ip_hl = 5; 2748 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2749 sizeof(session->ip4_hdr)); 2750 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2751 session->ip4_hdr.ip_id = 0; 2752 session->ip4_hdr.ip_off = 0; 2753 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2754 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2755 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2756 IPPROTO_ESP : IPPROTO_AH; 2757 session->ip4_hdr.ip_sum = 0; 2758 session->ip4_hdr.ip_src = 2759 ipsec_xform->tunnel.ipv4.src_ip; 2760 session->ip4_hdr.ip_dst = 2761 ipsec_xform->tunnel.ipv4.dst_ip; 2762 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2763 (void *)&session->ip4_hdr, 2764 sizeof(struct ip)); 2765 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2766 } else if (ipsec_xform->tunnel.type == 2767 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2768 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2769 DPAA_IPv6_DEFAULT_VTC_FLOW | 2770 ((ipsec_xform->tunnel.ipv6.dscp << 2771 RTE_IPV6_HDR_TC_SHIFT) & 2772 RTE_IPV6_HDR_TC_MASK) | 2773 ((ipsec_xform->tunnel.ipv6.flabel << 2774 RTE_IPV6_HDR_FL_SHIFT) & 2775 RTE_IPV6_HDR_FL_MASK)); 2776 /* Payload length will be updated by HW */ 2777 session->ip6_hdr.payload_len = 0; 2778 session->ip6_hdr.hop_limits = 2779 ipsec_xform->tunnel.ipv6.hlimit; 2780 session->ip6_hdr.proto = (ipsec_xform->proto == 2781 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2782 IPPROTO_ESP : IPPROTO_AH; 2783 memcpy(&session->ip6_hdr.src_addr, 2784 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2785 memcpy(&session->ip6_hdr.dst_addr, 2786 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2787 session->encap_pdb.ip_hdr_len = 2788 sizeof(struct rte_ipv6_hdr); 2789 } 2790 session->encap_pdb.options = 2791 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2792 PDBOPTS_ESP_OIHI_PDB_INL | 2793 PDBOPTS_ESP_IVSRC | 2794 PDBHMO_ESP_ENCAP_DTTL | 2795 PDBHMO_ESP_SNR; 2796 if (ipsec_xform->options.esn) 2797 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 2798 session->encap_pdb.spi = ipsec_xform->spi; 2799 2800 } else if (ipsec_xform->direction == 2801 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2802 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 2803 session->decap_pdb.options = sizeof(struct ip) << 16; 2804 else 2805 session->decap_pdb.options = 2806 sizeof(struct rte_ipv6_hdr) << 16; 2807 if (ipsec_xform->options.esn) 2808 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 2809 if (ipsec_xform->replay_win_sz) { 2810 uint32_t win_sz; 2811 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2812 2813 switch (win_sz) { 2814 case 1: 2815 case 2: 2816 case 4: 2817 case 8: 2818 case 16: 2819 case 32: 2820 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 2821 break; 2822 case 64: 2823 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 2824 break; 2825 default: 2826 session->decap_pdb.options |= 2827 PDBOPTS_ESP_ARS128; 2828 } 2829 } 2830 } else 2831 goto out; 2832 rte_spinlock_lock(&internals->lock); 2833 for (i = 0; i < MAX_DPAA_CORES; i++) { 2834 session->inq[i] = dpaa_sec_attach_rxq(internals); 2835 if (session->inq[i] == NULL) { 2836 DPAA_SEC_ERR("unable to attach sec queue"); 2837 rte_spinlock_unlock(&internals->lock); 2838 goto out; 2839 } 2840 } 2841 rte_spinlock_unlock(&internals->lock); 2842 2843 return 0; 2844 out: 2845 free_session_data(session); 2846 return -1; 2847 } 2848 2849 static int 2850 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 2851 struct rte_security_session_conf *conf, 2852 void *sess) 2853 { 2854 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2855 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2856 struct rte_crypto_auth_xform *auth_xform = NULL; 2857 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2858 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2859 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 2860 uint32_t i; 2861 int ret; 2862 2863 PMD_INIT_FUNC_TRACE(); 2864 2865 memset(session, 0, sizeof(dpaa_sec_session)); 2866 2867 /* find xfrm types */ 2868 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2869 cipher_xform = &xform->cipher; 2870 if (xform->next != NULL) 2871 auth_xform = &xform->next->auth; 2872 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2873 auth_xform = &xform->auth; 2874 if (xform->next != NULL) 2875 cipher_xform = &xform->next->cipher; 2876 } else { 2877 DPAA_SEC_ERR("Invalid crypto type"); 2878 return -EINVAL; 2879 } 2880 2881 session->proto_alg = conf->protocol; 2882 session->ctxt = DPAA_SEC_PDCP; 2883 2884 if (cipher_xform) { 2885 switch (cipher_xform->algo) { 2886 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2887 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 2888 break; 2889 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2890 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 2891 break; 2892 case RTE_CRYPTO_CIPHER_AES_CTR: 2893 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 2894 break; 2895 case RTE_CRYPTO_CIPHER_NULL: 2896 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 2897 break; 2898 default: 2899 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2900 session->cipher_alg); 2901 return -EINVAL; 2902 } 2903 2904 session->cipher_key.data = rte_zmalloc(NULL, 2905 cipher_xform->key.length, 2906 RTE_CACHE_LINE_SIZE); 2907 if (session->cipher_key.data == NULL && 2908 cipher_xform->key.length > 0) { 2909 DPAA_SEC_ERR("No Memory for cipher key"); 2910 return -ENOMEM; 2911 } 2912 session->cipher_key.length = cipher_xform->key.length; 2913 memcpy(session->cipher_key.data, cipher_xform->key.data, 2914 cipher_xform->key.length); 2915 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2916 DIR_ENC : DIR_DEC; 2917 session->cipher_alg = cipher_xform->algo; 2918 } else { 2919 session->cipher_key.data = NULL; 2920 session->cipher_key.length = 0; 2921 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2922 session->dir = DIR_ENC; 2923 } 2924 2925 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2926 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 2927 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 2928 DPAA_SEC_ERR( 2929 "PDCP Seq Num size should be 5/12 bits for cmode"); 2930 ret = -EINVAL; 2931 goto out; 2932 } 2933 } 2934 2935 if (auth_xform) { 2936 switch (auth_xform->algo) { 2937 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2938 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 2939 break; 2940 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2941 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 2942 break; 2943 case RTE_CRYPTO_AUTH_AES_CMAC: 2944 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 2945 break; 2946 case RTE_CRYPTO_AUTH_NULL: 2947 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 2948 break; 2949 default: 2950 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2951 session->auth_alg); 2952 rte_free(session->cipher_key.data); 2953 return -EINVAL; 2954 } 2955 session->auth_key.data = rte_zmalloc(NULL, 2956 auth_xform->key.length, 2957 RTE_CACHE_LINE_SIZE); 2958 if (!session->auth_key.data && 2959 auth_xform->key.length > 0) { 2960 DPAA_SEC_ERR("No Memory for auth key"); 2961 rte_free(session->cipher_key.data); 2962 return -ENOMEM; 2963 } 2964 session->auth_key.length = auth_xform->key.length; 2965 memcpy(session->auth_key.data, auth_xform->key.data, 2966 auth_xform->key.length); 2967 session->auth_alg = auth_xform->algo; 2968 } else { 2969 session->auth_key.data = NULL; 2970 session->auth_key.length = 0; 2971 session->auth_alg = 0; 2972 } 2973 session->pdcp.domain = pdcp_xform->domain; 2974 session->pdcp.bearer = pdcp_xform->bearer; 2975 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2976 session->pdcp.sn_size = pdcp_xform->sn_size; 2977 session->pdcp.hfn = pdcp_xform->hfn; 2978 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2979 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 2980 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 2981 2982 rte_spinlock_lock(&dev_priv->lock); 2983 for (i = 0; i < MAX_DPAA_CORES; i++) { 2984 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 2985 if (session->inq[i] == NULL) { 2986 DPAA_SEC_ERR("unable to attach sec queue"); 2987 rte_spinlock_unlock(&dev_priv->lock); 2988 ret = -EBUSY; 2989 goto out; 2990 } 2991 } 2992 rte_spinlock_unlock(&dev_priv->lock); 2993 return 0; 2994 out: 2995 rte_free(session->auth_key.data); 2996 rte_free(session->cipher_key.data); 2997 memset(session, 0, sizeof(dpaa_sec_session)); 2998 return ret; 2999 } 3000 3001 static int 3002 dpaa_sec_security_session_create(void *dev, 3003 struct rte_security_session_conf *conf, 3004 struct rte_security_session *sess, 3005 struct rte_mempool *mempool) 3006 { 3007 void *sess_private_data; 3008 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3009 int ret; 3010 3011 if (rte_mempool_get(mempool, &sess_private_data)) { 3012 DPAA_SEC_ERR("Couldn't get object from session mempool"); 3013 return -ENOMEM; 3014 } 3015 3016 switch (conf->protocol) { 3017 case RTE_SECURITY_PROTOCOL_IPSEC: 3018 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3019 sess_private_data); 3020 break; 3021 case RTE_SECURITY_PROTOCOL_PDCP: 3022 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3023 sess_private_data); 3024 break; 3025 case RTE_SECURITY_PROTOCOL_MACSEC: 3026 return -ENOTSUP; 3027 default: 3028 return -EINVAL; 3029 } 3030 if (ret != 0) { 3031 DPAA_SEC_ERR("failed to configure session parameters"); 3032 /* Return session to mempool */ 3033 rte_mempool_put(mempool, sess_private_data); 3034 return ret; 3035 } 3036 3037 set_sec_session_private_data(sess, sess_private_data); 3038 3039 return ret; 3040 } 3041 3042 /** Clear the memory of session so it doesn't leave key material behind */ 3043 static int 3044 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3045 struct rte_security_session *sess) 3046 { 3047 PMD_INIT_FUNC_TRACE(); 3048 void *sess_priv = get_sec_session_private_data(sess); 3049 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3050 3051 if (sess_priv) { 3052 free_session_memory((struct rte_cryptodev *)dev, s); 3053 set_sec_session_private_data(sess, NULL); 3054 } 3055 return 0; 3056 } 3057 #endif 3058 static int 3059 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3060 struct rte_cryptodev_config *config __rte_unused) 3061 { 3062 PMD_INIT_FUNC_TRACE(); 3063 3064 return 0; 3065 } 3066 3067 static int 3068 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3069 { 3070 PMD_INIT_FUNC_TRACE(); 3071 return 0; 3072 } 3073 3074 static void 3075 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3076 { 3077 PMD_INIT_FUNC_TRACE(); 3078 } 3079 3080 static int 3081 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3082 { 3083 PMD_INIT_FUNC_TRACE(); 3084 3085 if (dev == NULL) 3086 return -ENOMEM; 3087 3088 return 0; 3089 } 3090 3091 static void 3092 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3093 struct rte_cryptodev_info *info) 3094 { 3095 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3096 3097 PMD_INIT_FUNC_TRACE(); 3098 if (info != NULL) { 3099 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3100 info->feature_flags = dev->feature_flags; 3101 info->capabilities = dpaa_sec_capabilities; 3102 info->sym.max_nb_sessions = internals->max_nb_sessions; 3103 info->driver_id = cryptodev_driver_id; 3104 } 3105 } 3106 3107 static enum qman_cb_dqrr_result 3108 dpaa_sec_process_parallel_event(void *event, 3109 struct qman_portal *qm __always_unused, 3110 struct qman_fq *outq, 3111 const struct qm_dqrr_entry *dqrr, 3112 void **bufs) 3113 { 3114 const struct qm_fd *fd; 3115 struct dpaa_sec_job *job; 3116 struct dpaa_sec_op_ctx *ctx; 3117 struct rte_event *ev = (struct rte_event *)event; 3118 3119 fd = &dqrr->fd; 3120 3121 /* sg is embedded in an op ctx, 3122 * sg[0] is for output 3123 * sg[1] for input 3124 */ 3125 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3126 3127 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3128 ctx->fd_status = fd->status; 3129 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3130 struct qm_sg_entry *sg_out; 3131 uint32_t len; 3132 3133 sg_out = &job->sg[0]; 3134 hw_sg_to_cpu(sg_out); 3135 len = sg_out->length; 3136 ctx->op->sym->m_src->pkt_len = len; 3137 ctx->op->sym->m_src->data_len = len; 3138 } 3139 if (!ctx->fd_status) { 3140 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3141 } else { 3142 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3143 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3144 } 3145 ev->event_ptr = (void *)ctx->op; 3146 3147 ev->flow_id = outq->ev.flow_id; 3148 ev->sub_event_type = outq->ev.sub_event_type; 3149 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3150 ev->op = RTE_EVENT_OP_NEW; 3151 ev->sched_type = outq->ev.sched_type; 3152 ev->queue_id = outq->ev.queue_id; 3153 ev->priority = outq->ev.priority; 3154 *bufs = (void *)ctx->op; 3155 3156 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3157 3158 return qman_cb_dqrr_consume; 3159 } 3160 3161 static enum qman_cb_dqrr_result 3162 dpaa_sec_process_atomic_event(void *event, 3163 struct qman_portal *qm __rte_unused, 3164 struct qman_fq *outq, 3165 const struct qm_dqrr_entry *dqrr, 3166 void **bufs) 3167 { 3168 u8 index; 3169 const struct qm_fd *fd; 3170 struct dpaa_sec_job *job; 3171 struct dpaa_sec_op_ctx *ctx; 3172 struct rte_event *ev = (struct rte_event *)event; 3173 3174 fd = &dqrr->fd; 3175 3176 /* sg is embedded in an op ctx, 3177 * sg[0] is for output 3178 * sg[1] for input 3179 */ 3180 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3181 3182 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3183 ctx->fd_status = fd->status; 3184 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3185 struct qm_sg_entry *sg_out; 3186 uint32_t len; 3187 3188 sg_out = &job->sg[0]; 3189 hw_sg_to_cpu(sg_out); 3190 len = sg_out->length; 3191 ctx->op->sym->m_src->pkt_len = len; 3192 ctx->op->sym->m_src->data_len = len; 3193 } 3194 if (!ctx->fd_status) { 3195 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3196 } else { 3197 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3198 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3199 } 3200 ev->event_ptr = (void *)ctx->op; 3201 ev->flow_id = outq->ev.flow_id; 3202 ev->sub_event_type = outq->ev.sub_event_type; 3203 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3204 ev->op = RTE_EVENT_OP_NEW; 3205 ev->sched_type = outq->ev.sched_type; 3206 ev->queue_id = outq->ev.queue_id; 3207 ev->priority = outq->ev.priority; 3208 3209 /* Save active dqrr entries */ 3210 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3211 DPAA_PER_LCORE_DQRR_SIZE++; 3212 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3213 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3214 ev->impl_opaque = index + 1; 3215 ctx->op->sym->m_src->seqn = (uint32_t)index + 1; 3216 *bufs = (void *)ctx->op; 3217 3218 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3219 3220 return qman_cb_dqrr_defer; 3221 } 3222 3223 int 3224 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3225 int qp_id, 3226 uint16_t ch_id, 3227 const struct rte_event *event) 3228 { 3229 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3230 struct qm_mcc_initfq opts = {0}; 3231 3232 int ret; 3233 3234 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3235 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3236 opts.fqd.dest.channel = ch_id; 3237 3238 switch (event->sched_type) { 3239 case RTE_SCHED_TYPE_ATOMIC: 3240 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3241 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3242 * configuration with HOLD_ACTIVE setting 3243 */ 3244 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3245 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3246 break; 3247 case RTE_SCHED_TYPE_ORDERED: 3248 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3249 return -ENOTSUP; 3250 default: 3251 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3252 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3253 break; 3254 } 3255 3256 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3257 if (unlikely(ret)) { 3258 DPAA_SEC_ERR("unable to init caam source fq!"); 3259 return ret; 3260 } 3261 3262 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3263 3264 return 0; 3265 } 3266 3267 int 3268 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3269 int qp_id) 3270 { 3271 struct qm_mcc_initfq opts = {0}; 3272 int ret; 3273 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3274 3275 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3276 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3277 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3278 qp->outq.cb.ern = ern_sec_fq_handler; 3279 qman_retire_fq(&qp->outq, NULL); 3280 qman_oos_fq(&qp->outq); 3281 ret = qman_init_fq(&qp->outq, 0, &opts); 3282 if (ret) 3283 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret); 3284 qp->outq.cb.dqrr = NULL; 3285 3286 return ret; 3287 } 3288 3289 static struct rte_cryptodev_ops crypto_ops = { 3290 .dev_configure = dpaa_sec_dev_configure, 3291 .dev_start = dpaa_sec_dev_start, 3292 .dev_stop = dpaa_sec_dev_stop, 3293 .dev_close = dpaa_sec_dev_close, 3294 .dev_infos_get = dpaa_sec_dev_infos_get, 3295 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3296 .queue_pair_release = dpaa_sec_queue_pair_release, 3297 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3298 .sym_session_configure = dpaa_sec_sym_session_configure, 3299 .sym_session_clear = dpaa_sec_sym_session_clear 3300 }; 3301 3302 #ifdef RTE_LIBRTE_SECURITY 3303 static const struct rte_security_capability * 3304 dpaa_sec_capabilities_get(void *device __rte_unused) 3305 { 3306 return dpaa_sec_security_cap; 3307 } 3308 3309 static const struct rte_security_ops dpaa_sec_security_ops = { 3310 .session_create = dpaa_sec_security_session_create, 3311 .session_update = NULL, 3312 .session_stats_get = NULL, 3313 .session_destroy = dpaa_sec_security_session_destroy, 3314 .set_pkt_metadata = NULL, 3315 .capabilities_get = dpaa_sec_capabilities_get 3316 }; 3317 #endif 3318 static int 3319 dpaa_sec_uninit(struct rte_cryptodev *dev) 3320 { 3321 struct dpaa_sec_dev_private *internals; 3322 3323 if (dev == NULL) 3324 return -ENODEV; 3325 3326 internals = dev->data->dev_private; 3327 rte_free(dev->security_ctx); 3328 3329 rte_free(internals); 3330 3331 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3332 dev->data->name, rte_socket_id()); 3333 3334 return 0; 3335 } 3336 3337 static int 3338 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3339 { 3340 struct dpaa_sec_dev_private *internals; 3341 #ifdef RTE_LIBRTE_SECURITY 3342 struct rte_security_ctx *security_instance; 3343 #endif 3344 struct dpaa_sec_qp *qp; 3345 uint32_t i, flags; 3346 int ret; 3347 3348 PMD_INIT_FUNC_TRACE(); 3349 3350 cryptodev->driver_id = cryptodev_driver_id; 3351 cryptodev->dev_ops = &crypto_ops; 3352 3353 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3354 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3355 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3356 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3357 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3358 RTE_CRYPTODEV_FF_SECURITY | 3359 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3360 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3361 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3362 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3363 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3364 3365 internals = cryptodev->data->dev_private; 3366 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3367 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3368 3369 /* 3370 * For secondary processes, we don't initialise any further as primary 3371 * has already done this work. Only check we don't need a different 3372 * RX function 3373 */ 3374 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3375 DPAA_SEC_WARN("Device already init by primary process"); 3376 return 0; 3377 } 3378 #ifdef RTE_LIBRTE_SECURITY 3379 /* Initialize security_ctx only for primary process*/ 3380 security_instance = rte_malloc("rte_security_instances_ops", 3381 sizeof(struct rte_security_ctx), 0); 3382 if (security_instance == NULL) 3383 return -ENOMEM; 3384 security_instance->device = (void *)cryptodev; 3385 security_instance->ops = &dpaa_sec_security_ops; 3386 security_instance->sess_cnt = 0; 3387 cryptodev->security_ctx = security_instance; 3388 #endif 3389 rte_spinlock_init(&internals->lock); 3390 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3391 /* init qman fq for queue pair */ 3392 qp = &internals->qps[i]; 3393 ret = dpaa_sec_init_tx(&qp->outq); 3394 if (ret) { 3395 DPAA_SEC_ERR("config tx of queue pair %d", i); 3396 goto init_error; 3397 } 3398 } 3399 3400 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3401 QMAN_FQ_FLAG_TO_DCPORTAL; 3402 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3403 /* create rx qman fq for sessions*/ 3404 ret = qman_create_fq(0, flags, &internals->inq[i]); 3405 if (unlikely(ret != 0)) { 3406 DPAA_SEC_ERR("sec qman_create_fq failed"); 3407 goto init_error; 3408 } 3409 } 3410 3411 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); 3412 return 0; 3413 3414 init_error: 3415 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3416 3417 rte_free(cryptodev->security_ctx); 3418 return -EFAULT; 3419 } 3420 3421 static int 3422 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3423 struct rte_dpaa_device *dpaa_dev) 3424 { 3425 struct rte_cryptodev *cryptodev; 3426 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3427 3428 int retval; 3429 3430 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3431 3432 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3433 if (cryptodev == NULL) 3434 return -ENOMEM; 3435 3436 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3437 cryptodev->data->dev_private = rte_zmalloc_socket( 3438 "cryptodev private structure", 3439 sizeof(struct dpaa_sec_dev_private), 3440 RTE_CACHE_LINE_SIZE, 3441 rte_socket_id()); 3442 3443 if (cryptodev->data->dev_private == NULL) 3444 rte_panic("Cannot allocate memzone for private " 3445 "device data"); 3446 } 3447 3448 dpaa_dev->crypto_dev = cryptodev; 3449 cryptodev->device = &dpaa_dev->device; 3450 3451 /* init user callbacks */ 3452 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3453 3454 /* if sec device version is not configured */ 3455 if (!rta_get_sec_era()) { 3456 const struct device_node *caam_node; 3457 3458 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3459 const uint32_t *prop = of_get_property(caam_node, 3460 "fsl,sec-era", 3461 NULL); 3462 if (prop) { 3463 rta_set_sec_era( 3464 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3465 break; 3466 } 3467 } 3468 } 3469 3470 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 3471 retval = rte_dpaa_portal_init((void *)1); 3472 if (retval) { 3473 DPAA_SEC_ERR("Unable to initialize portal"); 3474 goto out; 3475 } 3476 } 3477 3478 /* Invoke PMD device initialization function */ 3479 retval = dpaa_sec_dev_init(cryptodev); 3480 if (retval == 0) 3481 return 0; 3482 3483 retval = -ENXIO; 3484 out: 3485 /* In case of error, cleanup is done */ 3486 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3487 rte_free(cryptodev->data->dev_private); 3488 3489 rte_cryptodev_pmd_release_device(cryptodev); 3490 3491 return retval; 3492 } 3493 3494 static int 3495 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3496 { 3497 struct rte_cryptodev *cryptodev; 3498 int ret; 3499 3500 cryptodev = dpaa_dev->crypto_dev; 3501 if (cryptodev == NULL) 3502 return -ENODEV; 3503 3504 ret = dpaa_sec_uninit(cryptodev); 3505 if (ret) 3506 return ret; 3507 3508 return rte_cryptodev_pmd_destroy(cryptodev); 3509 } 3510 3511 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3512 .drv_type = FSL_DPAA_CRYPTO, 3513 .driver = { 3514 .name = "DPAA SEC PMD" 3515 }, 3516 .probe = cryptodev_dpaa_sec_probe, 3517 .remove = cryptodev_dpaa_sec_remove, 3518 }; 3519 3520 static struct cryptodev_driver dpaa_sec_crypto_drv; 3521 3522 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3523 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3524 cryptodev_driver_id); 3525 3526 RTE_INIT(dpaa_sec_init_log) 3527 { 3528 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); 3529 if (dpaa_logtype_sec >= 0) 3530 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); 3531 } 3532