1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #ifdef RTE_LIBRTE_SECURITY 19 #include <rte_security_driver.h> 20 #endif 21 #include <rte_cycles.h> 22 #include <rte_dev.h> 23 #include <rte_kvargs.h> 24 #include <rte_malloc.h> 25 #include <rte_mbuf.h> 26 #include <rte_memcpy.h> 27 #include <rte_string_fns.h> 28 #include <rte_spinlock.h> 29 30 #include <fsl_usd.h> 31 #include <fsl_qman.h> 32 #include <dpaa_of.h> 33 34 /* RTA header files */ 35 #include <desc/common.h> 36 #include <desc/algo.h> 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 40 #include <rte_dpaa_bus.h> 41 #include <dpaa_sec.h> 42 #include <dpaa_sec_event.h> 43 #include <dpaa_sec_log.h> 44 #include <dpaax_iova_table.h> 45 46 enum rta_sec_era rta_sec_era; 47 48 int dpaa_logtype_sec; 49 50 static uint8_t cryptodev_driver_id; 51 52 static __thread struct rte_crypto_op **dpaa_sec_ops; 53 static __thread int dpaa_sec_op_nb; 54 55 static int 56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 57 58 static inline void 59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 60 { 61 if (!ctx->fd_status) { 62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 63 } else { 64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 66 } 67 } 68 69 static inline struct dpaa_sec_op_ctx * 70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 71 { 72 struct dpaa_sec_op_ctx *ctx; 73 int i, retval; 74 75 retval = rte_mempool_get( 76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 77 (void **)(&ctx)); 78 if (!ctx || retval) { 79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 80 return NULL; 81 } 82 /* 83 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 86 * each packet, memset is costlier than dcbz_64(). 87 */ 88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 89 dcbz_64(&ctx->job.sg[i]); 90 91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 93 94 return ctx; 95 } 96 97 static void 98 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 99 struct qman_fq *fq, 100 const struct qm_mr_entry *msg) 101 { 102 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 103 fq->fqid, msg->ern.rc, msg->ern.seqnum); 104 } 105 106 /* initialize the queue with dest chan as caam chan so that 107 * all the packets in this queue could be dispatched into caam 108 */ 109 static int 110 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 111 uint32_t fqid_out) 112 { 113 struct qm_mcc_initfq fq_opts; 114 uint32_t flags; 115 int ret = -1; 116 117 /* Clear FQ options */ 118 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 119 120 flags = QMAN_INITFQ_FLAG_SCHED; 121 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 122 QM_INITFQ_WE_CONTEXTB; 123 124 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 125 fq_opts.fqd.context_b = fqid_out; 126 fq_opts.fqd.dest.channel = qm_channel_caam; 127 fq_opts.fqd.dest.wq = 0; 128 129 fq_in->cb.ern = ern_sec_fq_handler; 130 131 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 132 133 ret = qman_init_fq(fq_in, flags, &fq_opts); 134 if (unlikely(ret != 0)) 135 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 136 137 return ret; 138 } 139 140 /* something is put into in_fq and caam put the crypto result into out_fq */ 141 static enum qman_cb_dqrr_result 142 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 143 struct qman_fq *fq __always_unused, 144 const struct qm_dqrr_entry *dqrr) 145 { 146 const struct qm_fd *fd; 147 struct dpaa_sec_job *job; 148 struct dpaa_sec_op_ctx *ctx; 149 150 if (dpaa_sec_op_nb >= DPAA_SEC_BURST) 151 return qman_cb_dqrr_defer; 152 153 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 154 return qman_cb_dqrr_consume; 155 156 fd = &dqrr->fd; 157 /* sg is embedded in an op ctx, 158 * sg[0] is for output 159 * sg[1] for input 160 */ 161 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 162 163 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 164 ctx->fd_status = fd->status; 165 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 166 struct qm_sg_entry *sg_out; 167 uint32_t len; 168 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 169 ctx->op->sym->m_src : ctx->op->sym->m_dst; 170 171 sg_out = &job->sg[0]; 172 hw_sg_to_cpu(sg_out); 173 len = sg_out->length; 174 mbuf->pkt_len = len; 175 while (mbuf->next != NULL) { 176 len -= mbuf->data_len; 177 mbuf = mbuf->next; 178 } 179 mbuf->data_len = len; 180 } 181 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op; 182 dpaa_sec_op_ending(ctx); 183 184 return qman_cb_dqrr_consume; 185 } 186 187 /* caam result is put into this queue */ 188 static int 189 dpaa_sec_init_tx(struct qman_fq *fq) 190 { 191 int ret; 192 struct qm_mcc_initfq opts; 193 uint32_t flags; 194 195 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 196 QMAN_FQ_FLAG_DYNAMIC_FQID; 197 198 ret = qman_create_fq(0, flags, fq); 199 if (unlikely(ret)) { 200 DPAA_SEC_ERR("qman_create_fq failed"); 201 return ret; 202 } 203 204 memset(&opts, 0, sizeof(opts)); 205 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 206 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 207 208 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 209 210 fq->cb.dqrr = dqrr_out_fq_cb_rx; 211 fq->cb.ern = ern_sec_fq_handler; 212 213 ret = qman_init_fq(fq, 0, &opts); 214 if (unlikely(ret)) { 215 DPAA_SEC_ERR("unable to init caam source fq!"); 216 return ret; 217 } 218 219 return ret; 220 } 221 222 static inline int is_encode(dpaa_sec_session *ses) 223 { 224 return ses->dir == DIR_ENC; 225 } 226 227 static inline int is_decode(dpaa_sec_session *ses) 228 { 229 return ses->dir == DIR_DEC; 230 } 231 232 #ifdef RTE_LIBRTE_SECURITY 233 static int 234 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 235 { 236 struct alginfo authdata = {0}, cipherdata = {0}; 237 struct sec_cdb *cdb = &ses->cdb; 238 struct alginfo *p_authdata = NULL; 239 int32_t shared_desc_len = 0; 240 int err; 241 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 242 int swap = false; 243 #else 244 int swap = true; 245 #endif 246 247 cipherdata.key = (size_t)ses->cipher_key.data; 248 cipherdata.keylen = ses->cipher_key.length; 249 cipherdata.key_enc_flags = 0; 250 cipherdata.key_type = RTA_DATA_IMM; 251 cipherdata.algtype = ses->cipher_key.alg; 252 cipherdata.algmode = ses->cipher_key.algmode; 253 254 cdb->sh_desc[0] = cipherdata.keylen; 255 cdb->sh_desc[1] = 0; 256 cdb->sh_desc[2] = 0; 257 258 if (ses->auth_alg) { 259 authdata.key = (size_t)ses->auth_key.data; 260 authdata.keylen = ses->auth_key.length; 261 authdata.key_enc_flags = 0; 262 authdata.key_type = RTA_DATA_IMM; 263 authdata.algtype = ses->auth_key.alg; 264 authdata.algmode = ses->auth_key.algmode; 265 266 p_authdata = &authdata; 267 268 cdb->sh_desc[1] = authdata.keylen; 269 } 270 271 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 272 MIN_JOB_DESC_SIZE, 273 (unsigned int *)cdb->sh_desc, 274 &cdb->sh_desc[2], 2); 275 if (err < 0) { 276 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 277 return err; 278 } 279 280 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { 281 cipherdata.key = 282 (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key); 283 cipherdata.key_type = RTA_DATA_PTR; 284 } 285 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) { 286 authdata.key = 287 (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key); 288 authdata.key_type = RTA_DATA_PTR; 289 } 290 291 cdb->sh_desc[0] = 0; 292 cdb->sh_desc[1] = 0; 293 cdb->sh_desc[2] = 0; 294 295 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 296 if (ses->dir == DIR_ENC) 297 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 298 cdb->sh_desc, 1, swap, 299 ses->pdcp.hfn, 300 ses->pdcp.sn_size, 301 ses->pdcp.bearer, 302 ses->pdcp.pkt_dir, 303 ses->pdcp.hfn_threshold, 304 &cipherdata, &authdata, 305 0); 306 else if (ses->dir == DIR_DEC) 307 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 308 cdb->sh_desc, 1, swap, 309 ses->pdcp.hfn, 310 ses->pdcp.sn_size, 311 ses->pdcp.bearer, 312 ses->pdcp.pkt_dir, 313 ses->pdcp.hfn_threshold, 314 &cipherdata, &authdata, 315 0); 316 } else { 317 if (ses->dir == DIR_ENC) 318 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap( 319 cdb->sh_desc, 1, swap, 320 ses->pdcp.sn_size, 321 ses->pdcp.hfn, 322 ses->pdcp.bearer, 323 ses->pdcp.pkt_dir, 324 ses->pdcp.hfn_threshold, 325 &cipherdata, p_authdata, 0); 326 else if (ses->dir == DIR_DEC) 327 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap( 328 cdb->sh_desc, 1, swap, 329 ses->pdcp.sn_size, 330 ses->pdcp.hfn, 331 ses->pdcp.bearer, 332 ses->pdcp.pkt_dir, 333 ses->pdcp.hfn_threshold, 334 &cipherdata, p_authdata, 0); 335 } 336 return shared_desc_len; 337 } 338 339 /* prepare ipsec proto command block of the session */ 340 static int 341 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 342 { 343 struct alginfo cipherdata = {0}, authdata = {0}; 344 struct sec_cdb *cdb = &ses->cdb; 345 int32_t shared_desc_len = 0; 346 int err; 347 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 348 int swap = false; 349 #else 350 int swap = true; 351 #endif 352 353 cipherdata.key = (size_t)ses->cipher_key.data; 354 cipherdata.keylen = ses->cipher_key.length; 355 cipherdata.key_enc_flags = 0; 356 cipherdata.key_type = RTA_DATA_IMM; 357 cipherdata.algtype = ses->cipher_key.alg; 358 cipherdata.algmode = ses->cipher_key.algmode; 359 360 if (ses->auth_key.length) { 361 authdata.key = (size_t)ses->auth_key.data; 362 authdata.keylen = ses->auth_key.length; 363 authdata.key_enc_flags = 0; 364 authdata.key_type = RTA_DATA_IMM; 365 authdata.algtype = ses->auth_key.alg; 366 authdata.algmode = ses->auth_key.algmode; 367 } 368 369 cdb->sh_desc[0] = cipherdata.keylen; 370 cdb->sh_desc[1] = authdata.keylen; 371 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 372 MIN_JOB_DESC_SIZE, 373 (unsigned int *)cdb->sh_desc, 374 &cdb->sh_desc[2], 2); 375 376 if (err < 0) { 377 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 378 return err; 379 } 380 if (cdb->sh_desc[2] & 1) 381 cipherdata.key_type = RTA_DATA_IMM; 382 else { 383 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 384 (void *)(size_t)cipherdata.key); 385 cipherdata.key_type = RTA_DATA_PTR; 386 } 387 if (cdb->sh_desc[2] & (1<<1)) 388 authdata.key_type = RTA_DATA_IMM; 389 else { 390 authdata.key = (size_t)rte_dpaa_mem_vtop( 391 (void *)(size_t)authdata.key); 392 authdata.key_type = RTA_DATA_PTR; 393 } 394 395 cdb->sh_desc[0] = 0; 396 cdb->sh_desc[1] = 0; 397 cdb->sh_desc[2] = 0; 398 if (ses->dir == DIR_ENC) { 399 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 400 cdb->sh_desc, 401 true, swap, SHR_SERIAL, 402 &ses->encap_pdb, 403 (uint8_t *)&ses->ip4_hdr, 404 &cipherdata, &authdata); 405 } else if (ses->dir == DIR_DEC) { 406 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 407 cdb->sh_desc, 408 true, swap, SHR_SERIAL, 409 &ses->decap_pdb, 410 &cipherdata, &authdata); 411 } 412 return shared_desc_len; 413 } 414 #endif 415 /* prepare command block of the session */ 416 static int 417 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 418 { 419 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 420 int32_t shared_desc_len = 0; 421 struct sec_cdb *cdb = &ses->cdb; 422 int err; 423 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 424 int swap = false; 425 #else 426 int swap = true; 427 #endif 428 429 memset(cdb, 0, sizeof(struct sec_cdb)); 430 431 switch (ses->ctxt) { 432 #ifdef RTE_LIBRTE_SECURITY 433 case DPAA_SEC_IPSEC: 434 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 435 break; 436 case DPAA_SEC_PDCP: 437 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 438 break; 439 #endif 440 case DPAA_SEC_CIPHER: 441 alginfo_c.key = (size_t)ses->cipher_key.data; 442 alginfo_c.keylen = ses->cipher_key.length; 443 alginfo_c.key_enc_flags = 0; 444 alginfo_c.key_type = RTA_DATA_IMM; 445 alginfo_c.algtype = ses->cipher_key.alg; 446 alginfo_c.algmode = ses->cipher_key.algmode; 447 448 switch (ses->cipher_alg) { 449 case RTE_CRYPTO_CIPHER_AES_CBC: 450 case RTE_CRYPTO_CIPHER_3DES_CBC: 451 case RTE_CRYPTO_CIPHER_AES_CTR: 452 case RTE_CRYPTO_CIPHER_3DES_CTR: 453 shared_desc_len = cnstr_shdsc_blkcipher( 454 cdb->sh_desc, true, 455 swap, SHR_NEVER, &alginfo_c, 456 ses->iv.length, 457 ses->dir); 458 break; 459 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 460 shared_desc_len = cnstr_shdsc_snow_f8( 461 cdb->sh_desc, true, swap, 462 &alginfo_c, 463 ses->dir); 464 break; 465 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 466 shared_desc_len = cnstr_shdsc_zuce( 467 cdb->sh_desc, true, swap, 468 &alginfo_c, 469 ses->dir); 470 break; 471 default: 472 DPAA_SEC_ERR("unsupported cipher alg %d", 473 ses->cipher_alg); 474 return -ENOTSUP; 475 } 476 break; 477 case DPAA_SEC_AUTH: 478 alginfo_a.key = (size_t)ses->auth_key.data; 479 alginfo_a.keylen = ses->auth_key.length; 480 alginfo_a.key_enc_flags = 0; 481 alginfo_a.key_type = RTA_DATA_IMM; 482 alginfo_a.algtype = ses->auth_key.alg; 483 alginfo_a.algmode = ses->auth_key.algmode; 484 switch (ses->auth_alg) { 485 case RTE_CRYPTO_AUTH_MD5_HMAC: 486 case RTE_CRYPTO_AUTH_SHA1_HMAC: 487 case RTE_CRYPTO_AUTH_SHA224_HMAC: 488 case RTE_CRYPTO_AUTH_SHA256_HMAC: 489 case RTE_CRYPTO_AUTH_SHA384_HMAC: 490 case RTE_CRYPTO_AUTH_SHA512_HMAC: 491 shared_desc_len = cnstr_shdsc_hmac( 492 cdb->sh_desc, true, 493 swap, SHR_NEVER, &alginfo_a, 494 !ses->dir, 495 ses->digest_length); 496 break; 497 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 498 shared_desc_len = cnstr_shdsc_snow_f9( 499 cdb->sh_desc, true, swap, 500 &alginfo_a, 501 !ses->dir, 502 ses->digest_length); 503 break; 504 case RTE_CRYPTO_AUTH_ZUC_EIA3: 505 shared_desc_len = cnstr_shdsc_zuca( 506 cdb->sh_desc, true, swap, 507 &alginfo_a, 508 !ses->dir, 509 ses->digest_length); 510 break; 511 default: 512 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); 513 } 514 break; 515 case DPAA_SEC_AEAD: 516 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 517 DPAA_SEC_ERR("not supported aead alg"); 518 return -ENOTSUP; 519 } 520 alginfo.key = (size_t)ses->aead_key.data; 521 alginfo.keylen = ses->aead_key.length; 522 alginfo.key_enc_flags = 0; 523 alginfo.key_type = RTA_DATA_IMM; 524 alginfo.algtype = ses->aead_key.alg; 525 alginfo.algmode = ses->aead_key.algmode; 526 527 if (ses->dir == DIR_ENC) 528 shared_desc_len = cnstr_shdsc_gcm_encap( 529 cdb->sh_desc, true, swap, SHR_NEVER, 530 &alginfo, 531 ses->iv.length, 532 ses->digest_length); 533 else 534 shared_desc_len = cnstr_shdsc_gcm_decap( 535 cdb->sh_desc, true, swap, SHR_NEVER, 536 &alginfo, 537 ses->iv.length, 538 ses->digest_length); 539 break; 540 case DPAA_SEC_CIPHER_HASH: 541 alginfo_c.key = (size_t)ses->cipher_key.data; 542 alginfo_c.keylen = ses->cipher_key.length; 543 alginfo_c.key_enc_flags = 0; 544 alginfo_c.key_type = RTA_DATA_IMM; 545 alginfo_c.algtype = ses->cipher_key.alg; 546 alginfo_c.algmode = ses->cipher_key.algmode; 547 548 alginfo_a.key = (size_t)ses->auth_key.data; 549 alginfo_a.keylen = ses->auth_key.length; 550 alginfo_a.key_enc_flags = 0; 551 alginfo_a.key_type = RTA_DATA_IMM; 552 alginfo_a.algtype = ses->auth_key.alg; 553 alginfo_a.algmode = ses->auth_key.algmode; 554 555 cdb->sh_desc[0] = alginfo_c.keylen; 556 cdb->sh_desc[1] = alginfo_a.keylen; 557 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 558 MIN_JOB_DESC_SIZE, 559 (unsigned int *)cdb->sh_desc, 560 &cdb->sh_desc[2], 2); 561 562 if (err < 0) { 563 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 564 return err; 565 } 566 if (cdb->sh_desc[2] & 1) 567 alginfo_c.key_type = RTA_DATA_IMM; 568 else { 569 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 570 (void *)(size_t)alginfo_c.key); 571 alginfo_c.key_type = RTA_DATA_PTR; 572 } 573 if (cdb->sh_desc[2] & (1<<1)) 574 alginfo_a.key_type = RTA_DATA_IMM; 575 else { 576 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 577 (void *)(size_t)alginfo_a.key); 578 alginfo_a.key_type = RTA_DATA_PTR; 579 } 580 cdb->sh_desc[0] = 0; 581 cdb->sh_desc[1] = 0; 582 cdb->sh_desc[2] = 0; 583 /* Auth_only_len is set as 0 here and it will be 584 * overwritten in fd for each packet. 585 */ 586 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 587 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 588 ses->iv.length, 589 ses->digest_length, ses->dir); 590 break; 591 case DPAA_SEC_HASH_CIPHER: 592 default: 593 DPAA_SEC_ERR("error: Unsupported session"); 594 return -ENOTSUP; 595 } 596 597 if (shared_desc_len < 0) { 598 DPAA_SEC_ERR("error in preparing command block"); 599 return shared_desc_len; 600 } 601 602 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 603 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 604 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 605 606 return 0; 607 } 608 609 /* qp is lockless, should be accessed by only one thread */ 610 static int 611 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 612 { 613 struct qman_fq *fq; 614 unsigned int pkts = 0; 615 int num_rx_bufs, ret; 616 struct qm_dqrr_entry *dq; 617 uint32_t vdqcr_flags = 0; 618 619 fq = &qp->outq; 620 /* 621 * Until request for four buffers, we provide exact number of buffers. 622 * Otherwise we do not set the QM_VDQCR_EXACT flag. 623 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 624 * requested, so we request two less in this case. 625 */ 626 if (nb_ops < 4) { 627 vdqcr_flags = QM_VDQCR_EXACT; 628 num_rx_bufs = nb_ops; 629 } else { 630 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 631 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 632 } 633 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 634 if (ret) 635 return 0; 636 637 do { 638 const struct qm_fd *fd; 639 struct dpaa_sec_job *job; 640 struct dpaa_sec_op_ctx *ctx; 641 struct rte_crypto_op *op; 642 643 dq = qman_dequeue(fq); 644 if (!dq) 645 continue; 646 647 fd = &dq->fd; 648 /* sg is embedded in an op ctx, 649 * sg[0] is for output 650 * sg[1] for input 651 */ 652 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 653 654 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 655 ctx->fd_status = fd->status; 656 op = ctx->op; 657 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 658 struct qm_sg_entry *sg_out; 659 uint32_t len; 660 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 661 op->sym->m_src : op->sym->m_dst; 662 663 sg_out = &job->sg[0]; 664 hw_sg_to_cpu(sg_out); 665 len = sg_out->length; 666 mbuf->pkt_len = len; 667 while (mbuf->next != NULL) { 668 len -= mbuf->data_len; 669 mbuf = mbuf->next; 670 } 671 mbuf->data_len = len; 672 } 673 if (!ctx->fd_status) { 674 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 675 } else { 676 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); 677 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 678 } 679 ops[pkts++] = op; 680 681 /* report op status to sym->op and then free the ctx memeory */ 682 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 683 684 qman_dqrr_consume(fq, dq); 685 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 686 687 return pkts; 688 } 689 690 static inline struct dpaa_sec_job * 691 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 692 { 693 struct rte_crypto_sym_op *sym = op->sym; 694 struct rte_mbuf *mbuf = sym->m_src; 695 struct dpaa_sec_job *cf; 696 struct dpaa_sec_op_ctx *ctx; 697 struct qm_sg_entry *sg, *out_sg, *in_sg; 698 phys_addr_t start_addr; 699 uint8_t *old_digest, extra_segs; 700 int data_len, data_offset; 701 702 data_len = sym->auth.data.length; 703 data_offset = sym->auth.data.offset; 704 705 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 706 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 707 if ((data_len & 7) || (data_offset & 7)) { 708 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 709 return NULL; 710 } 711 712 data_len = data_len >> 3; 713 data_offset = data_offset >> 3; 714 } 715 716 if (is_decode(ses)) 717 extra_segs = 3; 718 else 719 extra_segs = 2; 720 721 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 722 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 723 MAX_SG_ENTRIES); 724 return NULL; 725 } 726 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 727 if (!ctx) 728 return NULL; 729 730 cf = &ctx->job; 731 ctx->op = op; 732 old_digest = ctx->digest; 733 734 /* output */ 735 out_sg = &cf->sg[0]; 736 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 737 out_sg->length = ses->digest_length; 738 cpu_to_hw_sg(out_sg); 739 740 /* input */ 741 in_sg = &cf->sg[1]; 742 /* need to extend the input to a compound frame */ 743 in_sg->extension = 1; 744 in_sg->final = 1; 745 in_sg->length = data_len; 746 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 747 748 /* 1st seg */ 749 sg = in_sg + 1; 750 751 if (ses->iv.length) { 752 uint8_t *iv_ptr; 753 754 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 755 ses->iv.offset); 756 757 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 758 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 759 sg->length = 12; 760 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 761 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 762 sg->length = 8; 763 } else { 764 sg->length = ses->iv.length; 765 } 766 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 767 in_sg->length += sg->length; 768 cpu_to_hw_sg(sg); 769 sg++; 770 } 771 772 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 773 sg->offset = data_offset; 774 775 if (data_len <= (mbuf->data_len - data_offset)) { 776 sg->length = data_len; 777 } else { 778 sg->length = mbuf->data_len - data_offset; 779 780 /* remaining i/p segs */ 781 while ((data_len = data_len - sg->length) && 782 (mbuf = mbuf->next)) { 783 cpu_to_hw_sg(sg); 784 sg++; 785 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 786 if (data_len > mbuf->data_len) 787 sg->length = mbuf->data_len; 788 else 789 sg->length = data_len; 790 } 791 } 792 793 if (is_decode(ses)) { 794 /* Digest verification case */ 795 cpu_to_hw_sg(sg); 796 sg++; 797 rte_memcpy(old_digest, sym->auth.digest.data, 798 ses->digest_length); 799 start_addr = rte_dpaa_mem_vtop(old_digest); 800 qm_sg_entry_set64(sg, start_addr); 801 sg->length = ses->digest_length; 802 in_sg->length += ses->digest_length; 803 } 804 sg->final = 1; 805 cpu_to_hw_sg(sg); 806 cpu_to_hw_sg(in_sg); 807 808 return cf; 809 } 810 811 /** 812 * packet looks like: 813 * |<----data_len------->| 814 * |ip_header|ah_header|icv|payload| 815 * ^ 816 * | 817 * mbuf->pkt.data 818 */ 819 static inline struct dpaa_sec_job * 820 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 821 { 822 struct rte_crypto_sym_op *sym = op->sym; 823 struct rte_mbuf *mbuf = sym->m_src; 824 struct dpaa_sec_job *cf; 825 struct dpaa_sec_op_ctx *ctx; 826 struct qm_sg_entry *sg, *in_sg; 827 rte_iova_t start_addr; 828 uint8_t *old_digest; 829 int data_len, data_offset; 830 831 data_len = sym->auth.data.length; 832 data_offset = sym->auth.data.offset; 833 834 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 835 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 836 if ((data_len & 7) || (data_offset & 7)) { 837 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 838 return NULL; 839 } 840 841 data_len = data_len >> 3; 842 data_offset = data_offset >> 3; 843 } 844 845 ctx = dpaa_sec_alloc_ctx(ses, 4); 846 if (!ctx) 847 return NULL; 848 849 cf = &ctx->job; 850 ctx->op = op; 851 old_digest = ctx->digest; 852 853 start_addr = rte_pktmbuf_iova(mbuf); 854 /* output */ 855 sg = &cf->sg[0]; 856 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 857 sg->length = ses->digest_length; 858 cpu_to_hw_sg(sg); 859 860 /* input */ 861 in_sg = &cf->sg[1]; 862 /* need to extend the input to a compound frame */ 863 in_sg->extension = 1; 864 in_sg->final = 1; 865 in_sg->length = data_len; 866 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 867 sg = &cf->sg[2]; 868 869 if (ses->iv.length) { 870 uint8_t *iv_ptr; 871 872 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 873 ses->iv.offset); 874 875 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 876 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 877 sg->length = 12; 878 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 879 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 880 sg->length = 8; 881 } else { 882 sg->length = ses->iv.length; 883 } 884 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 885 in_sg->length += sg->length; 886 cpu_to_hw_sg(sg); 887 sg++; 888 } 889 890 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 891 sg->offset = data_offset; 892 sg->length = data_len; 893 894 if (is_decode(ses)) { 895 /* Digest verification case */ 896 cpu_to_hw_sg(sg); 897 /* hash result or digest, save digest first */ 898 rte_memcpy(old_digest, sym->auth.digest.data, 899 ses->digest_length); 900 /* let's check digest by hw */ 901 start_addr = rte_dpaa_mem_vtop(old_digest); 902 sg++; 903 qm_sg_entry_set64(sg, start_addr); 904 sg->length = ses->digest_length; 905 in_sg->length += ses->digest_length; 906 } 907 sg->final = 1; 908 cpu_to_hw_sg(sg); 909 cpu_to_hw_sg(in_sg); 910 911 return cf; 912 } 913 914 static inline struct dpaa_sec_job * 915 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 916 { 917 struct rte_crypto_sym_op *sym = op->sym; 918 struct dpaa_sec_job *cf; 919 struct dpaa_sec_op_ctx *ctx; 920 struct qm_sg_entry *sg, *out_sg, *in_sg; 921 struct rte_mbuf *mbuf; 922 uint8_t req_segs; 923 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 924 ses->iv.offset); 925 int data_len, data_offset; 926 927 data_len = sym->cipher.data.length; 928 data_offset = sym->cipher.data.offset; 929 930 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 931 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 932 if ((data_len & 7) || (data_offset & 7)) { 933 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 934 return NULL; 935 } 936 937 data_len = data_len >> 3; 938 data_offset = data_offset >> 3; 939 } 940 941 if (sym->m_dst) { 942 mbuf = sym->m_dst; 943 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 944 } else { 945 mbuf = sym->m_src; 946 req_segs = mbuf->nb_segs * 2 + 3; 947 } 948 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 949 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 950 MAX_SG_ENTRIES); 951 return NULL; 952 } 953 954 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 955 if (!ctx) 956 return NULL; 957 958 cf = &ctx->job; 959 ctx->op = op; 960 961 /* output */ 962 out_sg = &cf->sg[0]; 963 out_sg->extension = 1; 964 out_sg->length = data_len; 965 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 966 cpu_to_hw_sg(out_sg); 967 968 /* 1st seg */ 969 sg = &cf->sg[2]; 970 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 971 sg->length = mbuf->data_len - data_offset; 972 sg->offset = data_offset; 973 974 /* Successive segs */ 975 mbuf = mbuf->next; 976 while (mbuf) { 977 cpu_to_hw_sg(sg); 978 sg++; 979 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 980 sg->length = mbuf->data_len; 981 mbuf = mbuf->next; 982 } 983 sg->final = 1; 984 cpu_to_hw_sg(sg); 985 986 /* input */ 987 mbuf = sym->m_src; 988 in_sg = &cf->sg[1]; 989 in_sg->extension = 1; 990 in_sg->final = 1; 991 in_sg->length = data_len + ses->iv.length; 992 993 sg++; 994 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 995 cpu_to_hw_sg(in_sg); 996 997 /* IV */ 998 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 999 sg->length = ses->iv.length; 1000 cpu_to_hw_sg(sg); 1001 1002 /* 1st seg */ 1003 sg++; 1004 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1005 sg->length = mbuf->data_len - data_offset; 1006 sg->offset = data_offset; 1007 1008 /* Successive segs */ 1009 mbuf = mbuf->next; 1010 while (mbuf) { 1011 cpu_to_hw_sg(sg); 1012 sg++; 1013 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1014 sg->length = mbuf->data_len; 1015 mbuf = mbuf->next; 1016 } 1017 sg->final = 1; 1018 cpu_to_hw_sg(sg); 1019 1020 return cf; 1021 } 1022 1023 static inline struct dpaa_sec_job * 1024 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1025 { 1026 struct rte_crypto_sym_op *sym = op->sym; 1027 struct dpaa_sec_job *cf; 1028 struct dpaa_sec_op_ctx *ctx; 1029 struct qm_sg_entry *sg; 1030 rte_iova_t src_start_addr, dst_start_addr; 1031 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1032 ses->iv.offset); 1033 int data_len, data_offset; 1034 1035 data_len = sym->cipher.data.length; 1036 data_offset = sym->cipher.data.offset; 1037 1038 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1039 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1040 if ((data_len & 7) || (data_offset & 7)) { 1041 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1042 return NULL; 1043 } 1044 1045 data_len = data_len >> 3; 1046 data_offset = data_offset >> 3; 1047 } 1048 1049 ctx = dpaa_sec_alloc_ctx(ses, 4); 1050 if (!ctx) 1051 return NULL; 1052 1053 cf = &ctx->job; 1054 ctx->op = op; 1055 1056 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1057 1058 if (sym->m_dst) 1059 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1060 else 1061 dst_start_addr = src_start_addr; 1062 1063 /* output */ 1064 sg = &cf->sg[0]; 1065 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1066 sg->length = data_len + ses->iv.length; 1067 cpu_to_hw_sg(sg); 1068 1069 /* input */ 1070 sg = &cf->sg[1]; 1071 1072 /* need to extend the input to a compound frame */ 1073 sg->extension = 1; 1074 sg->final = 1; 1075 sg->length = data_len + ses->iv.length; 1076 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1077 cpu_to_hw_sg(sg); 1078 1079 sg = &cf->sg[2]; 1080 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1081 sg->length = ses->iv.length; 1082 cpu_to_hw_sg(sg); 1083 1084 sg++; 1085 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1086 sg->length = data_len; 1087 sg->final = 1; 1088 cpu_to_hw_sg(sg); 1089 1090 return cf; 1091 } 1092 1093 static inline struct dpaa_sec_job * 1094 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1095 { 1096 struct rte_crypto_sym_op *sym = op->sym; 1097 struct dpaa_sec_job *cf; 1098 struct dpaa_sec_op_ctx *ctx; 1099 struct qm_sg_entry *sg, *out_sg, *in_sg; 1100 struct rte_mbuf *mbuf; 1101 uint8_t req_segs; 1102 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1103 ses->iv.offset); 1104 1105 if (sym->m_dst) { 1106 mbuf = sym->m_dst; 1107 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1108 } else { 1109 mbuf = sym->m_src; 1110 req_segs = mbuf->nb_segs * 2 + 4; 1111 } 1112 1113 if (ses->auth_only_len) 1114 req_segs++; 1115 1116 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1117 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1118 MAX_SG_ENTRIES); 1119 return NULL; 1120 } 1121 1122 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1123 if (!ctx) 1124 return NULL; 1125 1126 cf = &ctx->job; 1127 ctx->op = op; 1128 1129 rte_prefetch0(cf->sg); 1130 1131 /* output */ 1132 out_sg = &cf->sg[0]; 1133 out_sg->extension = 1; 1134 if (is_encode(ses)) 1135 out_sg->length = sym->aead.data.length + ses->digest_length; 1136 else 1137 out_sg->length = sym->aead.data.length; 1138 1139 /* output sg entries */ 1140 sg = &cf->sg[2]; 1141 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1142 cpu_to_hw_sg(out_sg); 1143 1144 /* 1st seg */ 1145 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1146 sg->length = mbuf->data_len - sym->aead.data.offset; 1147 sg->offset = sym->aead.data.offset; 1148 1149 /* Successive segs */ 1150 mbuf = mbuf->next; 1151 while (mbuf) { 1152 cpu_to_hw_sg(sg); 1153 sg++; 1154 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1155 sg->length = mbuf->data_len; 1156 mbuf = mbuf->next; 1157 } 1158 sg->length -= ses->digest_length; 1159 1160 if (is_encode(ses)) { 1161 cpu_to_hw_sg(sg); 1162 /* set auth output */ 1163 sg++; 1164 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1165 sg->length = ses->digest_length; 1166 } 1167 sg->final = 1; 1168 cpu_to_hw_sg(sg); 1169 1170 /* input */ 1171 mbuf = sym->m_src; 1172 in_sg = &cf->sg[1]; 1173 in_sg->extension = 1; 1174 in_sg->final = 1; 1175 if (is_encode(ses)) 1176 in_sg->length = ses->iv.length + sym->aead.data.length 1177 + ses->auth_only_len; 1178 else 1179 in_sg->length = ses->iv.length + sym->aead.data.length 1180 + ses->auth_only_len + ses->digest_length; 1181 1182 /* input sg entries */ 1183 sg++; 1184 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1185 cpu_to_hw_sg(in_sg); 1186 1187 /* 1st seg IV */ 1188 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1189 sg->length = ses->iv.length; 1190 cpu_to_hw_sg(sg); 1191 1192 /* 2nd seg auth only */ 1193 if (ses->auth_only_len) { 1194 sg++; 1195 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1196 sg->length = ses->auth_only_len; 1197 cpu_to_hw_sg(sg); 1198 } 1199 1200 /* 3rd seg */ 1201 sg++; 1202 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1203 sg->length = mbuf->data_len - sym->aead.data.offset; 1204 sg->offset = sym->aead.data.offset; 1205 1206 /* Successive segs */ 1207 mbuf = mbuf->next; 1208 while (mbuf) { 1209 cpu_to_hw_sg(sg); 1210 sg++; 1211 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1212 sg->length = mbuf->data_len; 1213 mbuf = mbuf->next; 1214 } 1215 1216 if (is_decode(ses)) { 1217 cpu_to_hw_sg(sg); 1218 sg++; 1219 memcpy(ctx->digest, sym->aead.digest.data, 1220 ses->digest_length); 1221 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1222 sg->length = ses->digest_length; 1223 } 1224 sg->final = 1; 1225 cpu_to_hw_sg(sg); 1226 1227 return cf; 1228 } 1229 1230 static inline struct dpaa_sec_job * 1231 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1232 { 1233 struct rte_crypto_sym_op *sym = op->sym; 1234 struct dpaa_sec_job *cf; 1235 struct dpaa_sec_op_ctx *ctx; 1236 struct qm_sg_entry *sg; 1237 uint32_t length = 0; 1238 rte_iova_t src_start_addr, dst_start_addr; 1239 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1240 ses->iv.offset); 1241 1242 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1243 1244 if (sym->m_dst) 1245 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1246 else 1247 dst_start_addr = src_start_addr; 1248 1249 ctx = dpaa_sec_alloc_ctx(ses, 7); 1250 if (!ctx) 1251 return NULL; 1252 1253 cf = &ctx->job; 1254 ctx->op = op; 1255 1256 /* input */ 1257 rte_prefetch0(cf->sg); 1258 sg = &cf->sg[2]; 1259 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1260 if (is_encode(ses)) { 1261 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1262 sg->length = ses->iv.length; 1263 length += sg->length; 1264 cpu_to_hw_sg(sg); 1265 1266 sg++; 1267 if (ses->auth_only_len) { 1268 qm_sg_entry_set64(sg, 1269 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1270 sg->length = ses->auth_only_len; 1271 length += sg->length; 1272 cpu_to_hw_sg(sg); 1273 sg++; 1274 } 1275 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1276 sg->length = sym->aead.data.length; 1277 length += sg->length; 1278 sg->final = 1; 1279 cpu_to_hw_sg(sg); 1280 } else { 1281 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1282 sg->length = ses->iv.length; 1283 length += sg->length; 1284 cpu_to_hw_sg(sg); 1285 1286 sg++; 1287 if (ses->auth_only_len) { 1288 qm_sg_entry_set64(sg, 1289 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1290 sg->length = ses->auth_only_len; 1291 length += sg->length; 1292 cpu_to_hw_sg(sg); 1293 sg++; 1294 } 1295 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1296 sg->length = sym->aead.data.length; 1297 length += sg->length; 1298 cpu_to_hw_sg(sg); 1299 1300 memcpy(ctx->digest, sym->aead.digest.data, 1301 ses->digest_length); 1302 sg++; 1303 1304 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1305 sg->length = ses->digest_length; 1306 length += sg->length; 1307 sg->final = 1; 1308 cpu_to_hw_sg(sg); 1309 } 1310 /* input compound frame */ 1311 cf->sg[1].length = length; 1312 cf->sg[1].extension = 1; 1313 cf->sg[1].final = 1; 1314 cpu_to_hw_sg(&cf->sg[1]); 1315 1316 /* output */ 1317 sg++; 1318 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1319 qm_sg_entry_set64(sg, 1320 dst_start_addr + sym->aead.data.offset); 1321 sg->length = sym->aead.data.length; 1322 length = sg->length; 1323 if (is_encode(ses)) { 1324 cpu_to_hw_sg(sg); 1325 /* set auth output */ 1326 sg++; 1327 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1328 sg->length = ses->digest_length; 1329 length += sg->length; 1330 } 1331 sg->final = 1; 1332 cpu_to_hw_sg(sg); 1333 1334 /* output compound frame */ 1335 cf->sg[0].length = length; 1336 cf->sg[0].extension = 1; 1337 cpu_to_hw_sg(&cf->sg[0]); 1338 1339 return cf; 1340 } 1341 1342 static inline struct dpaa_sec_job * 1343 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1344 { 1345 struct rte_crypto_sym_op *sym = op->sym; 1346 struct dpaa_sec_job *cf; 1347 struct dpaa_sec_op_ctx *ctx; 1348 struct qm_sg_entry *sg, *out_sg, *in_sg; 1349 struct rte_mbuf *mbuf; 1350 uint8_t req_segs; 1351 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1352 ses->iv.offset); 1353 1354 if (sym->m_dst) { 1355 mbuf = sym->m_dst; 1356 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1357 } else { 1358 mbuf = sym->m_src; 1359 req_segs = mbuf->nb_segs * 2 + 4; 1360 } 1361 1362 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1363 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1364 MAX_SG_ENTRIES); 1365 return NULL; 1366 } 1367 1368 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1369 if (!ctx) 1370 return NULL; 1371 1372 cf = &ctx->job; 1373 ctx->op = op; 1374 1375 rte_prefetch0(cf->sg); 1376 1377 /* output */ 1378 out_sg = &cf->sg[0]; 1379 out_sg->extension = 1; 1380 if (is_encode(ses)) 1381 out_sg->length = sym->auth.data.length + ses->digest_length; 1382 else 1383 out_sg->length = sym->auth.data.length; 1384 1385 /* output sg entries */ 1386 sg = &cf->sg[2]; 1387 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1388 cpu_to_hw_sg(out_sg); 1389 1390 /* 1st seg */ 1391 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1392 sg->length = mbuf->data_len - sym->auth.data.offset; 1393 sg->offset = sym->auth.data.offset; 1394 1395 /* Successive segs */ 1396 mbuf = mbuf->next; 1397 while (mbuf) { 1398 cpu_to_hw_sg(sg); 1399 sg++; 1400 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1401 sg->length = mbuf->data_len; 1402 mbuf = mbuf->next; 1403 } 1404 sg->length -= ses->digest_length; 1405 1406 if (is_encode(ses)) { 1407 cpu_to_hw_sg(sg); 1408 /* set auth output */ 1409 sg++; 1410 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1411 sg->length = ses->digest_length; 1412 } 1413 sg->final = 1; 1414 cpu_to_hw_sg(sg); 1415 1416 /* input */ 1417 mbuf = sym->m_src; 1418 in_sg = &cf->sg[1]; 1419 in_sg->extension = 1; 1420 in_sg->final = 1; 1421 if (is_encode(ses)) 1422 in_sg->length = ses->iv.length + sym->auth.data.length; 1423 else 1424 in_sg->length = ses->iv.length + sym->auth.data.length 1425 + ses->digest_length; 1426 1427 /* input sg entries */ 1428 sg++; 1429 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1430 cpu_to_hw_sg(in_sg); 1431 1432 /* 1st seg IV */ 1433 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1434 sg->length = ses->iv.length; 1435 cpu_to_hw_sg(sg); 1436 1437 /* 2nd seg */ 1438 sg++; 1439 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1440 sg->length = mbuf->data_len - sym->auth.data.offset; 1441 sg->offset = sym->auth.data.offset; 1442 1443 /* Successive segs */ 1444 mbuf = mbuf->next; 1445 while (mbuf) { 1446 cpu_to_hw_sg(sg); 1447 sg++; 1448 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1449 sg->length = mbuf->data_len; 1450 mbuf = mbuf->next; 1451 } 1452 1453 sg->length -= ses->digest_length; 1454 if (is_decode(ses)) { 1455 cpu_to_hw_sg(sg); 1456 sg++; 1457 memcpy(ctx->digest, sym->auth.digest.data, 1458 ses->digest_length); 1459 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1460 sg->length = ses->digest_length; 1461 } 1462 sg->final = 1; 1463 cpu_to_hw_sg(sg); 1464 1465 return cf; 1466 } 1467 1468 static inline struct dpaa_sec_job * 1469 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1470 { 1471 struct rte_crypto_sym_op *sym = op->sym; 1472 struct dpaa_sec_job *cf; 1473 struct dpaa_sec_op_ctx *ctx; 1474 struct qm_sg_entry *sg; 1475 rte_iova_t src_start_addr, dst_start_addr; 1476 uint32_t length = 0; 1477 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1478 ses->iv.offset); 1479 1480 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1481 if (sym->m_dst) 1482 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1483 else 1484 dst_start_addr = src_start_addr; 1485 1486 ctx = dpaa_sec_alloc_ctx(ses, 7); 1487 if (!ctx) 1488 return NULL; 1489 1490 cf = &ctx->job; 1491 ctx->op = op; 1492 1493 /* input */ 1494 rte_prefetch0(cf->sg); 1495 sg = &cf->sg[2]; 1496 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1497 if (is_encode(ses)) { 1498 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1499 sg->length = ses->iv.length; 1500 length += sg->length; 1501 cpu_to_hw_sg(sg); 1502 1503 sg++; 1504 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1505 sg->length = sym->auth.data.length; 1506 length += sg->length; 1507 sg->final = 1; 1508 cpu_to_hw_sg(sg); 1509 } else { 1510 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1511 sg->length = ses->iv.length; 1512 length += sg->length; 1513 cpu_to_hw_sg(sg); 1514 1515 sg++; 1516 1517 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1518 sg->length = sym->auth.data.length; 1519 length += sg->length; 1520 cpu_to_hw_sg(sg); 1521 1522 memcpy(ctx->digest, sym->auth.digest.data, 1523 ses->digest_length); 1524 sg++; 1525 1526 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1527 sg->length = ses->digest_length; 1528 length += sg->length; 1529 sg->final = 1; 1530 cpu_to_hw_sg(sg); 1531 } 1532 /* input compound frame */ 1533 cf->sg[1].length = length; 1534 cf->sg[1].extension = 1; 1535 cf->sg[1].final = 1; 1536 cpu_to_hw_sg(&cf->sg[1]); 1537 1538 /* output */ 1539 sg++; 1540 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1541 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1542 sg->length = sym->cipher.data.length; 1543 length = sg->length; 1544 if (is_encode(ses)) { 1545 cpu_to_hw_sg(sg); 1546 /* set auth output */ 1547 sg++; 1548 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1549 sg->length = ses->digest_length; 1550 length += sg->length; 1551 } 1552 sg->final = 1; 1553 cpu_to_hw_sg(sg); 1554 1555 /* output compound frame */ 1556 cf->sg[0].length = length; 1557 cf->sg[0].extension = 1; 1558 cpu_to_hw_sg(&cf->sg[0]); 1559 1560 return cf; 1561 } 1562 1563 #ifdef RTE_LIBRTE_SECURITY 1564 static inline struct dpaa_sec_job * 1565 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1566 { 1567 struct rte_crypto_sym_op *sym = op->sym; 1568 struct dpaa_sec_job *cf; 1569 struct dpaa_sec_op_ctx *ctx; 1570 struct qm_sg_entry *sg; 1571 phys_addr_t src_start_addr, dst_start_addr; 1572 1573 ctx = dpaa_sec_alloc_ctx(ses, 2); 1574 if (!ctx) 1575 return NULL; 1576 cf = &ctx->job; 1577 ctx->op = op; 1578 1579 src_start_addr = rte_pktmbuf_mtophys(sym->m_src); 1580 1581 if (sym->m_dst) 1582 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst); 1583 else 1584 dst_start_addr = src_start_addr; 1585 1586 /* input */ 1587 sg = &cf->sg[1]; 1588 qm_sg_entry_set64(sg, src_start_addr); 1589 sg->length = sym->m_src->pkt_len; 1590 sg->final = 1; 1591 cpu_to_hw_sg(sg); 1592 1593 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1594 /* output */ 1595 sg = &cf->sg[0]; 1596 qm_sg_entry_set64(sg, dst_start_addr); 1597 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1598 cpu_to_hw_sg(sg); 1599 1600 return cf; 1601 } 1602 1603 static inline struct dpaa_sec_job * 1604 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1605 { 1606 struct rte_crypto_sym_op *sym = op->sym; 1607 struct dpaa_sec_job *cf; 1608 struct dpaa_sec_op_ctx *ctx; 1609 struct qm_sg_entry *sg, *out_sg, *in_sg; 1610 struct rte_mbuf *mbuf; 1611 uint8_t req_segs; 1612 uint32_t in_len = 0, out_len = 0; 1613 1614 if (sym->m_dst) 1615 mbuf = sym->m_dst; 1616 else 1617 mbuf = sym->m_src; 1618 1619 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1620 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1621 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1622 MAX_SG_ENTRIES); 1623 return NULL; 1624 } 1625 1626 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1627 if (!ctx) 1628 return NULL; 1629 cf = &ctx->job; 1630 ctx->op = op; 1631 /* output */ 1632 out_sg = &cf->sg[0]; 1633 out_sg->extension = 1; 1634 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1635 1636 /* 1st seg */ 1637 sg = &cf->sg[2]; 1638 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1639 sg->offset = 0; 1640 1641 /* Successive segs */ 1642 while (mbuf->next) { 1643 sg->length = mbuf->data_len; 1644 out_len += sg->length; 1645 mbuf = mbuf->next; 1646 cpu_to_hw_sg(sg); 1647 sg++; 1648 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1649 sg->offset = 0; 1650 } 1651 sg->length = mbuf->buf_len - mbuf->data_off; 1652 out_len += sg->length; 1653 sg->final = 1; 1654 cpu_to_hw_sg(sg); 1655 1656 out_sg->length = out_len; 1657 cpu_to_hw_sg(out_sg); 1658 1659 /* input */ 1660 mbuf = sym->m_src; 1661 in_sg = &cf->sg[1]; 1662 in_sg->extension = 1; 1663 in_sg->final = 1; 1664 in_len = mbuf->data_len; 1665 1666 sg++; 1667 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1668 1669 /* 1st seg */ 1670 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1671 sg->length = mbuf->data_len; 1672 sg->offset = 0; 1673 1674 /* Successive segs */ 1675 mbuf = mbuf->next; 1676 while (mbuf) { 1677 cpu_to_hw_sg(sg); 1678 sg++; 1679 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1680 sg->length = mbuf->data_len; 1681 sg->offset = 0; 1682 in_len += sg->length; 1683 mbuf = mbuf->next; 1684 } 1685 sg->final = 1; 1686 cpu_to_hw_sg(sg); 1687 1688 in_sg->length = in_len; 1689 cpu_to_hw_sg(in_sg); 1690 1691 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1692 1693 return cf; 1694 } 1695 #endif 1696 1697 static uint16_t 1698 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1699 uint16_t nb_ops) 1700 { 1701 /* Function to transmit the frames to given device and queuepair */ 1702 uint32_t loop; 1703 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1704 uint16_t num_tx = 0; 1705 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1706 uint32_t frames_to_send; 1707 struct rte_crypto_op *op; 1708 struct dpaa_sec_job *cf; 1709 dpaa_sec_session *ses; 1710 uint16_t auth_hdr_len, auth_tail_len; 1711 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1712 struct qman_fq *inq[DPAA_SEC_BURST]; 1713 1714 while (nb_ops) { 1715 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1716 DPAA_SEC_BURST : nb_ops; 1717 for (loop = 0; loop < frames_to_send; loop++) { 1718 op = *(ops++); 1719 if (op->sym->m_src->seqn != 0) { 1720 index = op->sym->m_src->seqn - 1; 1721 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1722 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1723 flags[loop] = ((index & 0x0f) << 8); 1724 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1725 DPAA_PER_LCORE_DQRR_SIZE--; 1726 DPAA_PER_LCORE_DQRR_HELD &= 1727 ~(1 << index); 1728 } 1729 } 1730 1731 switch (op->sess_type) { 1732 case RTE_CRYPTO_OP_WITH_SESSION: 1733 ses = (dpaa_sec_session *) 1734 get_sym_session_private_data( 1735 op->sym->session, 1736 cryptodev_driver_id); 1737 break; 1738 #ifdef RTE_LIBRTE_SECURITY 1739 case RTE_CRYPTO_OP_SECURITY_SESSION: 1740 ses = (dpaa_sec_session *) 1741 get_sec_session_private_data( 1742 op->sym->sec_session); 1743 break; 1744 #endif 1745 default: 1746 DPAA_SEC_DP_ERR( 1747 "sessionless crypto op not supported"); 1748 frames_to_send = loop; 1749 nb_ops = loop; 1750 goto send_pkts; 1751 } 1752 1753 if (!ses) { 1754 DPAA_SEC_DP_ERR("session not available"); 1755 frames_to_send = loop; 1756 nb_ops = loop; 1757 goto send_pkts; 1758 } 1759 1760 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1761 if (dpaa_sec_attach_sess_q(qp, ses)) { 1762 frames_to_send = loop; 1763 nb_ops = loop; 1764 goto send_pkts; 1765 } 1766 } else if (unlikely(ses->qp[rte_lcore_id() % 1767 MAX_DPAA_CORES] != qp)) { 1768 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1769 " New qp = %p\n", 1770 ses->qp[rte_lcore_id() % 1771 MAX_DPAA_CORES], qp); 1772 frames_to_send = loop; 1773 nb_ops = loop; 1774 goto send_pkts; 1775 } 1776 1777 auth_hdr_len = op->sym->auth.data.length - 1778 op->sym->cipher.data.length; 1779 auth_tail_len = 0; 1780 1781 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1782 ((op->sym->m_dst == NULL) || 1783 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1784 switch (ses->ctxt) { 1785 #ifdef RTE_LIBRTE_SECURITY 1786 case DPAA_SEC_PDCP: 1787 case DPAA_SEC_IPSEC: 1788 cf = build_proto(op, ses); 1789 break; 1790 #endif 1791 case DPAA_SEC_AUTH: 1792 cf = build_auth_only(op, ses); 1793 break; 1794 case DPAA_SEC_CIPHER: 1795 cf = build_cipher_only(op, ses); 1796 break; 1797 case DPAA_SEC_AEAD: 1798 cf = build_cipher_auth_gcm(op, ses); 1799 auth_hdr_len = ses->auth_only_len; 1800 break; 1801 case DPAA_SEC_CIPHER_HASH: 1802 auth_hdr_len = 1803 op->sym->cipher.data.offset 1804 - op->sym->auth.data.offset; 1805 auth_tail_len = 1806 op->sym->auth.data.length 1807 - op->sym->cipher.data.length 1808 - auth_hdr_len; 1809 cf = build_cipher_auth(op, ses); 1810 break; 1811 default: 1812 DPAA_SEC_DP_ERR("not supported ops"); 1813 frames_to_send = loop; 1814 nb_ops = loop; 1815 goto send_pkts; 1816 } 1817 } else { 1818 switch (ses->ctxt) { 1819 #ifdef RTE_LIBRTE_SECURITY 1820 case DPAA_SEC_PDCP: 1821 case DPAA_SEC_IPSEC: 1822 cf = build_proto_sg(op, ses); 1823 break; 1824 #endif 1825 case DPAA_SEC_AUTH: 1826 cf = build_auth_only_sg(op, ses); 1827 break; 1828 case DPAA_SEC_CIPHER: 1829 cf = build_cipher_only_sg(op, ses); 1830 break; 1831 case DPAA_SEC_AEAD: 1832 cf = build_cipher_auth_gcm_sg(op, ses); 1833 auth_hdr_len = ses->auth_only_len; 1834 break; 1835 case DPAA_SEC_CIPHER_HASH: 1836 auth_hdr_len = 1837 op->sym->cipher.data.offset 1838 - op->sym->auth.data.offset; 1839 auth_tail_len = 1840 op->sym->auth.data.length 1841 - op->sym->cipher.data.length 1842 - auth_hdr_len; 1843 cf = build_cipher_auth_sg(op, ses); 1844 break; 1845 default: 1846 DPAA_SEC_DP_ERR("not supported ops"); 1847 frames_to_send = loop; 1848 nb_ops = loop; 1849 goto send_pkts; 1850 } 1851 } 1852 if (unlikely(!cf)) { 1853 frames_to_send = loop; 1854 nb_ops = loop; 1855 goto send_pkts; 1856 } 1857 1858 fd = &fds[loop]; 1859 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 1860 fd->opaque_addr = 0; 1861 fd->cmd = 0; 1862 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 1863 fd->_format1 = qm_fd_compound; 1864 fd->length29 = 2 * sizeof(struct qm_sg_entry); 1865 1866 /* Auth_only_len is set as 0 in descriptor and it is 1867 * overwritten here in the fd.cmd which will update 1868 * the DPOVRD reg. 1869 */ 1870 if (auth_hdr_len || auth_tail_len) { 1871 fd->cmd = 0x80000000; 1872 fd->cmd |= 1873 ((auth_tail_len << 16) | auth_hdr_len); 1874 } 1875 1876 #ifdef RTE_LIBRTE_SECURITY 1877 /* In case of PDCP, per packet HFN is stored in 1878 * mbuf priv after sym_op. 1879 */ 1880 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 1881 fd->cmd = 0x80000000 | 1882 *((uint32_t *)((uint8_t *)op + 1883 ses->pdcp.hfn_ovd_offset)); 1884 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 1885 *((uint32_t *)((uint8_t *)op + 1886 ses->pdcp.hfn_ovd_offset)), 1887 ses->pdcp.hfn_ovd); 1888 } 1889 #endif 1890 } 1891 send_pkts: 1892 loop = 0; 1893 while (loop < frames_to_send) { 1894 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 1895 &flags[loop], frames_to_send - loop); 1896 } 1897 nb_ops -= frames_to_send; 1898 num_tx += frames_to_send; 1899 } 1900 1901 dpaa_qp->tx_pkts += num_tx; 1902 dpaa_qp->tx_errs += nb_ops - num_tx; 1903 1904 return num_tx; 1905 } 1906 1907 static uint16_t 1908 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1909 uint16_t nb_ops) 1910 { 1911 uint16_t num_rx; 1912 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1913 1914 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1915 1916 dpaa_qp->rx_pkts += num_rx; 1917 dpaa_qp->rx_errs += nb_ops - num_rx; 1918 1919 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1920 1921 return num_rx; 1922 } 1923 1924 /** Release queue pair */ 1925 static int 1926 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1927 uint16_t qp_id) 1928 { 1929 struct dpaa_sec_dev_private *internals; 1930 struct dpaa_sec_qp *qp = NULL; 1931 1932 PMD_INIT_FUNC_TRACE(); 1933 1934 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 1935 1936 internals = dev->data->dev_private; 1937 if (qp_id >= internals->max_nb_queue_pairs) { 1938 DPAA_SEC_ERR("Max supported qpid %d", 1939 internals->max_nb_queue_pairs); 1940 return -EINVAL; 1941 } 1942 1943 qp = &internals->qps[qp_id]; 1944 rte_mempool_free(qp->ctx_pool); 1945 qp->internals = NULL; 1946 dev->data->queue_pairs[qp_id] = NULL; 1947 1948 return 0; 1949 } 1950 1951 /** Setup a queue pair */ 1952 static int 1953 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1954 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1955 __rte_unused int socket_id) 1956 { 1957 struct dpaa_sec_dev_private *internals; 1958 struct dpaa_sec_qp *qp = NULL; 1959 char str[20]; 1960 1961 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1962 1963 internals = dev->data->dev_private; 1964 if (qp_id >= internals->max_nb_queue_pairs) { 1965 DPAA_SEC_ERR("Max supported qpid %d", 1966 internals->max_nb_queue_pairs); 1967 return -EINVAL; 1968 } 1969 1970 qp = &internals->qps[qp_id]; 1971 qp->internals = internals; 1972 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 1973 dev->data->dev_id, qp_id); 1974 if (!qp->ctx_pool) { 1975 qp->ctx_pool = rte_mempool_create((const char *)str, 1976 CTX_POOL_NUM_BUFS, 1977 CTX_POOL_BUF_SIZE, 1978 CTX_POOL_CACHE_SIZE, 0, 1979 NULL, NULL, NULL, NULL, 1980 SOCKET_ID_ANY, 0); 1981 if (!qp->ctx_pool) { 1982 DPAA_SEC_ERR("%s create failed\n", str); 1983 return -ENOMEM; 1984 } 1985 } else 1986 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 1987 dev->data->dev_id, qp_id); 1988 dev->data->queue_pairs[qp_id] = qp; 1989 1990 return 0; 1991 } 1992 1993 /** Return the number of allocated queue pairs */ 1994 static uint32_t 1995 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) 1996 { 1997 PMD_INIT_FUNC_TRACE(); 1998 1999 return dev->data->nb_queue_pairs; 2000 } 2001 2002 /** Returns the size of session structure */ 2003 static unsigned int 2004 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2005 { 2006 PMD_INIT_FUNC_TRACE(); 2007 2008 return sizeof(dpaa_sec_session); 2009 } 2010 2011 static int 2012 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2013 struct rte_crypto_sym_xform *xform, 2014 dpaa_sec_session *session) 2015 { 2016 session->ctxt = DPAA_SEC_CIPHER; 2017 session->cipher_alg = xform->cipher.algo; 2018 session->iv.length = xform->cipher.iv.length; 2019 session->iv.offset = xform->cipher.iv.offset; 2020 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2021 RTE_CACHE_LINE_SIZE); 2022 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2023 DPAA_SEC_ERR("No Memory for cipher key"); 2024 return -ENOMEM; 2025 } 2026 session->cipher_key.length = xform->cipher.key.length; 2027 2028 memcpy(session->cipher_key.data, xform->cipher.key.data, 2029 xform->cipher.key.length); 2030 switch (xform->cipher.algo) { 2031 case RTE_CRYPTO_CIPHER_AES_CBC: 2032 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2033 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2034 break; 2035 case RTE_CRYPTO_CIPHER_3DES_CBC: 2036 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2037 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2038 break; 2039 case RTE_CRYPTO_CIPHER_AES_CTR: 2040 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2041 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2042 break; 2043 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2044 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2045 break; 2046 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2047 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2048 break; 2049 default: 2050 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2051 xform->cipher.algo); 2052 rte_free(session->cipher_key.data); 2053 return -1; 2054 } 2055 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2056 DIR_ENC : DIR_DEC; 2057 2058 return 0; 2059 } 2060 2061 static int 2062 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2063 struct rte_crypto_sym_xform *xform, 2064 dpaa_sec_session *session) 2065 { 2066 session->ctxt = DPAA_SEC_AUTH; 2067 session->auth_alg = xform->auth.algo; 2068 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 2069 RTE_CACHE_LINE_SIZE); 2070 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 2071 DPAA_SEC_ERR("No Memory for auth key"); 2072 return -ENOMEM; 2073 } 2074 session->auth_key.length = xform->auth.key.length; 2075 session->digest_length = xform->auth.digest_length; 2076 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2077 session->iv.offset = xform->auth.iv.offset; 2078 session->iv.length = xform->auth.iv.length; 2079 } 2080 2081 memcpy(session->auth_key.data, xform->auth.key.data, 2082 xform->auth.key.length); 2083 2084 switch (xform->auth.algo) { 2085 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2086 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2087 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2088 break; 2089 case RTE_CRYPTO_AUTH_MD5_HMAC: 2090 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2091 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2092 break; 2093 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2094 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2095 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2096 break; 2097 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2098 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2099 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2100 break; 2101 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2102 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2103 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2104 break; 2105 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2106 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2107 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2108 break; 2109 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2110 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2111 session->auth_key.algmode = OP_ALG_AAI_F9; 2112 break; 2113 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2114 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2115 session->auth_key.algmode = OP_ALG_AAI_F9; 2116 break; 2117 default: 2118 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2119 xform->auth.algo); 2120 rte_free(session->auth_key.data); 2121 return -1; 2122 } 2123 2124 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2125 DIR_ENC : DIR_DEC; 2126 2127 return 0; 2128 } 2129 2130 static int 2131 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2132 struct rte_crypto_sym_xform *xform, 2133 dpaa_sec_session *session) 2134 { 2135 2136 struct rte_crypto_cipher_xform *cipher_xform; 2137 struct rte_crypto_auth_xform *auth_xform; 2138 2139 session->ctxt = DPAA_SEC_CIPHER_HASH; 2140 if (session->auth_cipher_text) { 2141 cipher_xform = &xform->cipher; 2142 auth_xform = &xform->next->auth; 2143 } else { 2144 cipher_xform = &xform->next->cipher; 2145 auth_xform = &xform->auth; 2146 } 2147 2148 /* Set IV parameters */ 2149 session->iv.offset = cipher_xform->iv.offset; 2150 session->iv.length = cipher_xform->iv.length; 2151 2152 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2153 RTE_CACHE_LINE_SIZE); 2154 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2155 DPAA_SEC_ERR("No Memory for cipher key"); 2156 return -1; 2157 } 2158 session->cipher_key.length = cipher_xform->key.length; 2159 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2160 RTE_CACHE_LINE_SIZE); 2161 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2162 DPAA_SEC_ERR("No Memory for auth key"); 2163 rte_free(session->cipher_key.data); 2164 return -ENOMEM; 2165 } 2166 session->auth_key.length = auth_xform->key.length; 2167 memcpy(session->cipher_key.data, cipher_xform->key.data, 2168 cipher_xform->key.length); 2169 memcpy(session->auth_key.data, auth_xform->key.data, 2170 auth_xform->key.length); 2171 2172 session->digest_length = auth_xform->digest_length; 2173 session->auth_alg = auth_xform->algo; 2174 2175 switch (auth_xform->algo) { 2176 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2177 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2178 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2179 break; 2180 case RTE_CRYPTO_AUTH_MD5_HMAC: 2181 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2182 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2183 break; 2184 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2185 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2186 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2187 break; 2188 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2189 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2190 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2191 break; 2192 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2193 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2194 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2195 break; 2196 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2197 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2198 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2199 break; 2200 default: 2201 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2202 auth_xform->algo); 2203 goto error_out; 2204 } 2205 2206 session->cipher_alg = cipher_xform->algo; 2207 2208 switch (cipher_xform->algo) { 2209 case RTE_CRYPTO_CIPHER_AES_CBC: 2210 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2211 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2212 break; 2213 case RTE_CRYPTO_CIPHER_3DES_CBC: 2214 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2215 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2216 break; 2217 case RTE_CRYPTO_CIPHER_AES_CTR: 2218 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2219 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2220 break; 2221 default: 2222 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2223 cipher_xform->algo); 2224 goto error_out; 2225 } 2226 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2227 DIR_ENC : DIR_DEC; 2228 return 0; 2229 2230 error_out: 2231 rte_free(session->cipher_key.data); 2232 rte_free(session->auth_key.data); 2233 return -1; 2234 } 2235 2236 static int 2237 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2238 struct rte_crypto_sym_xform *xform, 2239 dpaa_sec_session *session) 2240 { 2241 session->aead_alg = xform->aead.algo; 2242 session->ctxt = DPAA_SEC_AEAD; 2243 session->iv.length = xform->aead.iv.length; 2244 session->iv.offset = xform->aead.iv.offset; 2245 session->auth_only_len = xform->aead.aad_length; 2246 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2247 RTE_CACHE_LINE_SIZE); 2248 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2249 DPAA_SEC_ERR("No Memory for aead key\n"); 2250 return -ENOMEM; 2251 } 2252 session->aead_key.length = xform->aead.key.length; 2253 session->digest_length = xform->aead.digest_length; 2254 2255 memcpy(session->aead_key.data, xform->aead.key.data, 2256 xform->aead.key.length); 2257 2258 switch (session->aead_alg) { 2259 case RTE_CRYPTO_AEAD_AES_GCM: 2260 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2261 session->aead_key.algmode = OP_ALG_AAI_GCM; 2262 break; 2263 default: 2264 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2265 rte_free(session->aead_key.data); 2266 return -ENOMEM; 2267 } 2268 2269 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2270 DIR_ENC : DIR_DEC; 2271 2272 return 0; 2273 } 2274 2275 static struct qman_fq * 2276 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2277 { 2278 unsigned int i; 2279 2280 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2281 if (qi->inq_attach[i] == 0) { 2282 qi->inq_attach[i] = 1; 2283 return &qi->inq[i]; 2284 } 2285 } 2286 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2287 2288 return NULL; 2289 } 2290 2291 static int 2292 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2293 { 2294 unsigned int i; 2295 2296 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2297 if (&qi->inq[i] == fq) { 2298 if (qman_retire_fq(fq, NULL) != 0) 2299 DPAA_SEC_WARN("Queue is not retired\n"); 2300 qman_oos_fq(fq); 2301 qi->inq_attach[i] = 0; 2302 return 0; 2303 } 2304 } 2305 return -1; 2306 } 2307 2308 static int 2309 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2310 { 2311 int ret; 2312 2313 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2314 ret = dpaa_sec_prep_cdb(sess); 2315 if (ret) { 2316 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2317 return -1; 2318 } 2319 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 2320 ret = rte_dpaa_portal_init((void *)0); 2321 if (ret) { 2322 DPAA_SEC_ERR("Failure in affining portal"); 2323 return ret; 2324 } 2325 } 2326 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2327 rte_dpaa_mem_vtop(&sess->cdb), 2328 qman_fq_fqid(&qp->outq)); 2329 if (ret) 2330 DPAA_SEC_ERR("Unable to init sec queue"); 2331 2332 return ret; 2333 } 2334 2335 static int 2336 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2337 struct rte_crypto_sym_xform *xform, void *sess) 2338 { 2339 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2340 dpaa_sec_session *session = sess; 2341 uint32_t i; 2342 int ret; 2343 2344 PMD_INIT_FUNC_TRACE(); 2345 2346 if (unlikely(sess == NULL)) { 2347 DPAA_SEC_ERR("invalid session struct"); 2348 return -EINVAL; 2349 } 2350 memset(session, 0, sizeof(dpaa_sec_session)); 2351 2352 /* Default IV length = 0 */ 2353 session->iv.length = 0; 2354 2355 /* Cipher Only */ 2356 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2357 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2358 ret = dpaa_sec_cipher_init(dev, xform, session); 2359 2360 /* Authentication Only */ 2361 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2362 xform->next == NULL) { 2363 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2364 session->ctxt = DPAA_SEC_AUTH; 2365 ret = dpaa_sec_auth_init(dev, xform, session); 2366 2367 /* Cipher then Authenticate */ 2368 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2369 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2370 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2371 session->auth_cipher_text = 1; 2372 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2373 ret = dpaa_sec_auth_init(dev, xform, session); 2374 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2375 ret = dpaa_sec_cipher_init(dev, xform, session); 2376 else 2377 ret = dpaa_sec_chain_init(dev, xform, session); 2378 } else { 2379 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2380 return -EINVAL; 2381 } 2382 /* Authenticate then Cipher */ 2383 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2384 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2385 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2386 session->auth_cipher_text = 0; 2387 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2388 ret = dpaa_sec_cipher_init(dev, xform, session); 2389 else if (xform->next->cipher.algo 2390 == RTE_CRYPTO_CIPHER_NULL) 2391 ret = dpaa_sec_auth_init(dev, xform, session); 2392 else 2393 ret = dpaa_sec_chain_init(dev, xform, session); 2394 } else { 2395 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2396 return -EINVAL; 2397 } 2398 2399 /* AEAD operation for AES-GCM kind of Algorithms */ 2400 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2401 xform->next == NULL) { 2402 ret = dpaa_sec_aead_init(dev, xform, session); 2403 2404 } else { 2405 DPAA_SEC_ERR("Invalid crypto type"); 2406 return -EINVAL; 2407 } 2408 if (ret) { 2409 DPAA_SEC_ERR("unable to init session"); 2410 goto err1; 2411 } 2412 2413 rte_spinlock_lock(&internals->lock); 2414 for (i = 0; i < MAX_DPAA_CORES; i++) { 2415 session->inq[i] = dpaa_sec_attach_rxq(internals); 2416 if (session->inq[i] == NULL) { 2417 DPAA_SEC_ERR("unable to attach sec queue"); 2418 rte_spinlock_unlock(&internals->lock); 2419 goto err1; 2420 } 2421 } 2422 rte_spinlock_unlock(&internals->lock); 2423 2424 return 0; 2425 2426 err1: 2427 rte_free(session->cipher_key.data); 2428 rte_free(session->auth_key.data); 2429 memset(session, 0, sizeof(dpaa_sec_session)); 2430 2431 return -EINVAL; 2432 } 2433 2434 static int 2435 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2436 struct rte_crypto_sym_xform *xform, 2437 struct rte_cryptodev_sym_session *sess, 2438 struct rte_mempool *mempool) 2439 { 2440 void *sess_private_data; 2441 int ret; 2442 2443 PMD_INIT_FUNC_TRACE(); 2444 2445 if (rte_mempool_get(mempool, &sess_private_data)) { 2446 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2447 return -ENOMEM; 2448 } 2449 2450 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2451 if (ret != 0) { 2452 DPAA_SEC_ERR("failed to configure session parameters"); 2453 2454 /* Return session to mempool */ 2455 rte_mempool_put(mempool, sess_private_data); 2456 return ret; 2457 } 2458 2459 set_sym_session_private_data(sess, dev->driver_id, 2460 sess_private_data); 2461 2462 2463 return 0; 2464 } 2465 2466 static inline void 2467 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2468 { 2469 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2470 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s); 2471 uint8_t i; 2472 2473 for (i = 0; i < MAX_DPAA_CORES; i++) { 2474 if (s->inq[i]) 2475 dpaa_sec_detach_rxq(qi, s->inq[i]); 2476 s->inq[i] = NULL; 2477 s->qp[i] = NULL; 2478 } 2479 rte_free(s->cipher_key.data); 2480 rte_free(s->auth_key.data); 2481 memset(s, 0, sizeof(dpaa_sec_session)); 2482 rte_mempool_put(sess_mp, (void *)s); 2483 } 2484 2485 /** Clear the memory of session so it doesn't leave key material behind */ 2486 static void 2487 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2488 struct rte_cryptodev_sym_session *sess) 2489 { 2490 PMD_INIT_FUNC_TRACE(); 2491 uint8_t index = dev->driver_id; 2492 void *sess_priv = get_sym_session_private_data(sess, index); 2493 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2494 2495 if (sess_priv) { 2496 free_session_memory(dev, s); 2497 set_sym_session_private_data(sess, index, NULL); 2498 } 2499 } 2500 2501 #ifdef RTE_LIBRTE_SECURITY 2502 static int 2503 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2504 struct rte_security_ipsec_xform *ipsec_xform, 2505 dpaa_sec_session *session) 2506 { 2507 PMD_INIT_FUNC_TRACE(); 2508 2509 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2510 RTE_CACHE_LINE_SIZE); 2511 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2512 DPAA_SEC_ERR("No Memory for aead key"); 2513 return -1; 2514 } 2515 memcpy(session->aead_key.data, aead_xform->key.data, 2516 aead_xform->key.length); 2517 2518 session->digest_length = aead_xform->digest_length; 2519 session->aead_key.length = aead_xform->key.length; 2520 2521 switch (aead_xform->algo) { 2522 case RTE_CRYPTO_AEAD_AES_GCM: 2523 switch (session->digest_length) { 2524 case 8: 2525 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2526 break; 2527 case 12: 2528 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2529 break; 2530 case 16: 2531 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2532 break; 2533 default: 2534 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2535 session->digest_length); 2536 return -1; 2537 } 2538 if (session->dir == DIR_ENC) { 2539 memcpy(session->encap_pdb.gcm.salt, 2540 (uint8_t *)&(ipsec_xform->salt), 4); 2541 } else { 2542 memcpy(session->decap_pdb.gcm.salt, 2543 (uint8_t *)&(ipsec_xform->salt), 4); 2544 } 2545 session->aead_key.algmode = OP_ALG_AAI_GCM; 2546 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2547 break; 2548 default: 2549 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2550 aead_xform->algo); 2551 return -1; 2552 } 2553 return 0; 2554 } 2555 2556 static int 2557 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2558 struct rte_crypto_auth_xform *auth_xform, 2559 struct rte_security_ipsec_xform *ipsec_xform, 2560 dpaa_sec_session *session) 2561 { 2562 if (cipher_xform) { 2563 session->cipher_key.data = rte_zmalloc(NULL, 2564 cipher_xform->key.length, 2565 RTE_CACHE_LINE_SIZE); 2566 if (session->cipher_key.data == NULL && 2567 cipher_xform->key.length > 0) { 2568 DPAA_SEC_ERR("No Memory for cipher key"); 2569 return -ENOMEM; 2570 } 2571 2572 session->cipher_key.length = cipher_xform->key.length; 2573 memcpy(session->cipher_key.data, cipher_xform->key.data, 2574 cipher_xform->key.length); 2575 session->cipher_alg = cipher_xform->algo; 2576 } else { 2577 session->cipher_key.data = NULL; 2578 session->cipher_key.length = 0; 2579 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2580 } 2581 2582 if (auth_xform) { 2583 session->auth_key.data = rte_zmalloc(NULL, 2584 auth_xform->key.length, 2585 RTE_CACHE_LINE_SIZE); 2586 if (session->auth_key.data == NULL && 2587 auth_xform->key.length > 0) { 2588 DPAA_SEC_ERR("No Memory for auth key"); 2589 return -ENOMEM; 2590 } 2591 session->auth_key.length = auth_xform->key.length; 2592 memcpy(session->auth_key.data, auth_xform->key.data, 2593 auth_xform->key.length); 2594 session->auth_alg = auth_xform->algo; 2595 session->digest_length = auth_xform->digest_length; 2596 } else { 2597 session->auth_key.data = NULL; 2598 session->auth_key.length = 0; 2599 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2600 } 2601 2602 switch (session->auth_alg) { 2603 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2604 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2605 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2606 break; 2607 case RTE_CRYPTO_AUTH_MD5_HMAC: 2608 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2609 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2610 break; 2611 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2612 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2613 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2614 if (session->digest_length != 16) 2615 DPAA_SEC_WARN( 2616 "+++Using sha256-hmac truncated len is non-standard," 2617 "it will not work with lookaside proto"); 2618 break; 2619 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2620 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2621 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2622 break; 2623 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2624 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2625 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2626 break; 2627 case RTE_CRYPTO_AUTH_AES_CMAC: 2628 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2629 break; 2630 case RTE_CRYPTO_AUTH_NULL: 2631 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2632 break; 2633 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2634 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2635 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2636 case RTE_CRYPTO_AUTH_SHA1: 2637 case RTE_CRYPTO_AUTH_SHA256: 2638 case RTE_CRYPTO_AUTH_SHA512: 2639 case RTE_CRYPTO_AUTH_SHA224: 2640 case RTE_CRYPTO_AUTH_SHA384: 2641 case RTE_CRYPTO_AUTH_MD5: 2642 case RTE_CRYPTO_AUTH_AES_GMAC: 2643 case RTE_CRYPTO_AUTH_KASUMI_F9: 2644 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2645 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2646 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2647 session->auth_alg); 2648 return -1; 2649 default: 2650 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", 2651 session->auth_alg); 2652 return -1; 2653 } 2654 2655 switch (session->cipher_alg) { 2656 case RTE_CRYPTO_CIPHER_AES_CBC: 2657 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2658 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2659 break; 2660 case RTE_CRYPTO_CIPHER_3DES_CBC: 2661 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2662 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2663 break; 2664 case RTE_CRYPTO_CIPHER_AES_CTR: 2665 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2666 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2667 if (session->dir == DIR_ENC) { 2668 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2669 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2670 } else { 2671 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2672 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2673 } 2674 break; 2675 case RTE_CRYPTO_CIPHER_NULL: 2676 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2677 break; 2678 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2679 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2680 case RTE_CRYPTO_CIPHER_3DES_ECB: 2681 case RTE_CRYPTO_CIPHER_AES_ECB: 2682 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2683 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2684 session->cipher_alg); 2685 return -1; 2686 default: 2687 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2688 session->cipher_alg); 2689 return -1; 2690 } 2691 2692 return 0; 2693 } 2694 2695 static int 2696 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2697 struct rte_security_session_conf *conf, 2698 void *sess) 2699 { 2700 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2701 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2702 struct rte_crypto_auth_xform *auth_xform = NULL; 2703 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2704 struct rte_crypto_aead_xform *aead_xform = NULL; 2705 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2706 uint32_t i; 2707 int ret; 2708 2709 PMD_INIT_FUNC_TRACE(); 2710 2711 memset(session, 0, sizeof(dpaa_sec_session)); 2712 session->proto_alg = conf->protocol; 2713 session->ctxt = DPAA_SEC_IPSEC; 2714 2715 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2716 session->dir = DIR_ENC; 2717 else 2718 session->dir = DIR_DEC; 2719 2720 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2721 cipher_xform = &conf->crypto_xform->cipher; 2722 if (conf->crypto_xform->next) 2723 auth_xform = &conf->crypto_xform->next->auth; 2724 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2725 ipsec_xform, session); 2726 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2727 auth_xform = &conf->crypto_xform->auth; 2728 if (conf->crypto_xform->next) 2729 cipher_xform = &conf->crypto_xform->next->cipher; 2730 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2731 ipsec_xform, session); 2732 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2733 aead_xform = &conf->crypto_xform->aead; 2734 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2735 ipsec_xform, session); 2736 } else { 2737 DPAA_SEC_ERR("XFORM not specified"); 2738 ret = -EINVAL; 2739 goto out; 2740 } 2741 if (ret) { 2742 DPAA_SEC_ERR("Failed to process xform"); 2743 goto out; 2744 } 2745 2746 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2747 if (ipsec_xform->tunnel.type == 2748 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2749 session->ip4_hdr.ip_v = IPVERSION; 2750 session->ip4_hdr.ip_hl = 5; 2751 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2752 sizeof(session->ip4_hdr)); 2753 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2754 session->ip4_hdr.ip_id = 0; 2755 session->ip4_hdr.ip_off = 0; 2756 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2757 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2758 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2759 IPPROTO_ESP : IPPROTO_AH; 2760 session->ip4_hdr.ip_sum = 0; 2761 session->ip4_hdr.ip_src = 2762 ipsec_xform->tunnel.ipv4.src_ip; 2763 session->ip4_hdr.ip_dst = 2764 ipsec_xform->tunnel.ipv4.dst_ip; 2765 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2766 (void *)&session->ip4_hdr, 2767 sizeof(struct ip)); 2768 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2769 } else if (ipsec_xform->tunnel.type == 2770 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2771 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2772 DPAA_IPv6_DEFAULT_VTC_FLOW | 2773 ((ipsec_xform->tunnel.ipv6.dscp << 2774 RTE_IPV6_HDR_TC_SHIFT) & 2775 RTE_IPV6_HDR_TC_MASK) | 2776 ((ipsec_xform->tunnel.ipv6.flabel << 2777 RTE_IPV6_HDR_FL_SHIFT) & 2778 RTE_IPV6_HDR_FL_MASK)); 2779 /* Payload length will be updated by HW */ 2780 session->ip6_hdr.payload_len = 0; 2781 session->ip6_hdr.hop_limits = 2782 ipsec_xform->tunnel.ipv6.hlimit; 2783 session->ip6_hdr.proto = (ipsec_xform->proto == 2784 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2785 IPPROTO_ESP : IPPROTO_AH; 2786 memcpy(&session->ip6_hdr.src_addr, 2787 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2788 memcpy(&session->ip6_hdr.dst_addr, 2789 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2790 session->encap_pdb.ip_hdr_len = 2791 sizeof(struct rte_ipv6_hdr); 2792 } 2793 session->encap_pdb.options = 2794 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2795 PDBOPTS_ESP_OIHI_PDB_INL | 2796 PDBOPTS_ESP_IVSRC | 2797 PDBHMO_ESP_ENCAP_DTTL | 2798 PDBHMO_ESP_SNR; 2799 if (ipsec_xform->options.esn) 2800 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 2801 session->encap_pdb.spi = ipsec_xform->spi; 2802 2803 } else if (ipsec_xform->direction == 2804 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2805 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 2806 session->decap_pdb.options = sizeof(struct ip) << 16; 2807 else 2808 session->decap_pdb.options = 2809 sizeof(struct rte_ipv6_hdr) << 16; 2810 if (ipsec_xform->options.esn) 2811 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 2812 if (ipsec_xform->replay_win_sz) { 2813 uint32_t win_sz; 2814 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2815 2816 switch (win_sz) { 2817 case 1: 2818 case 2: 2819 case 4: 2820 case 8: 2821 case 16: 2822 case 32: 2823 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 2824 break; 2825 case 64: 2826 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 2827 break; 2828 default: 2829 session->decap_pdb.options |= 2830 PDBOPTS_ESP_ARS128; 2831 } 2832 } 2833 } else 2834 goto out; 2835 rte_spinlock_lock(&internals->lock); 2836 for (i = 0; i < MAX_DPAA_CORES; i++) { 2837 session->inq[i] = dpaa_sec_attach_rxq(internals); 2838 if (session->inq[i] == NULL) { 2839 DPAA_SEC_ERR("unable to attach sec queue"); 2840 rte_spinlock_unlock(&internals->lock); 2841 goto out; 2842 } 2843 } 2844 rte_spinlock_unlock(&internals->lock); 2845 2846 return 0; 2847 out: 2848 rte_free(session->auth_key.data); 2849 rte_free(session->cipher_key.data); 2850 memset(session, 0, sizeof(dpaa_sec_session)); 2851 return -1; 2852 } 2853 2854 static int 2855 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 2856 struct rte_security_session_conf *conf, 2857 void *sess) 2858 { 2859 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2860 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2861 struct rte_crypto_auth_xform *auth_xform = NULL; 2862 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2863 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2864 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 2865 uint32_t i; 2866 2867 PMD_INIT_FUNC_TRACE(); 2868 2869 memset(session, 0, sizeof(dpaa_sec_session)); 2870 2871 /* find xfrm types */ 2872 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2873 cipher_xform = &xform->cipher; 2874 if (xform->next != NULL) 2875 auth_xform = &xform->next->auth; 2876 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2877 auth_xform = &xform->auth; 2878 if (xform->next != NULL) 2879 cipher_xform = &xform->next->cipher; 2880 } else { 2881 DPAA_SEC_ERR("Invalid crypto type"); 2882 return -EINVAL; 2883 } 2884 2885 session->proto_alg = conf->protocol; 2886 session->ctxt = DPAA_SEC_PDCP; 2887 2888 if (cipher_xform) { 2889 switch (cipher_xform->algo) { 2890 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2891 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 2892 break; 2893 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2894 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 2895 break; 2896 case RTE_CRYPTO_CIPHER_AES_CTR: 2897 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 2898 break; 2899 case RTE_CRYPTO_CIPHER_NULL: 2900 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 2901 break; 2902 default: 2903 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2904 session->cipher_alg); 2905 return -1; 2906 } 2907 2908 session->cipher_key.data = rte_zmalloc(NULL, 2909 cipher_xform->key.length, 2910 RTE_CACHE_LINE_SIZE); 2911 if (session->cipher_key.data == NULL && 2912 cipher_xform->key.length > 0) { 2913 DPAA_SEC_ERR("No Memory for cipher key"); 2914 return -ENOMEM; 2915 } 2916 session->cipher_key.length = cipher_xform->key.length; 2917 memcpy(session->cipher_key.data, cipher_xform->key.data, 2918 cipher_xform->key.length); 2919 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2920 DIR_ENC : DIR_DEC; 2921 session->cipher_alg = cipher_xform->algo; 2922 } else { 2923 session->cipher_key.data = NULL; 2924 session->cipher_key.length = 0; 2925 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2926 session->dir = DIR_ENC; 2927 } 2928 2929 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2930 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 2931 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 2932 DPAA_SEC_ERR( 2933 "PDCP Seq Num size should be 5/12 bits for cmode"); 2934 goto out; 2935 } 2936 } 2937 2938 if (auth_xform) { 2939 switch (auth_xform->algo) { 2940 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2941 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 2942 break; 2943 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2944 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 2945 break; 2946 case RTE_CRYPTO_AUTH_AES_CMAC: 2947 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 2948 break; 2949 case RTE_CRYPTO_AUTH_NULL: 2950 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 2951 break; 2952 default: 2953 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2954 session->auth_alg); 2955 rte_free(session->cipher_key.data); 2956 return -1; 2957 } 2958 session->auth_key.data = rte_zmalloc(NULL, 2959 auth_xform->key.length, 2960 RTE_CACHE_LINE_SIZE); 2961 if (!session->auth_key.data && 2962 auth_xform->key.length > 0) { 2963 DPAA_SEC_ERR("No Memory for auth key"); 2964 rte_free(session->cipher_key.data); 2965 return -ENOMEM; 2966 } 2967 session->auth_key.length = auth_xform->key.length; 2968 memcpy(session->auth_key.data, auth_xform->key.data, 2969 auth_xform->key.length); 2970 session->auth_alg = auth_xform->algo; 2971 } else { 2972 session->auth_key.data = NULL; 2973 session->auth_key.length = 0; 2974 session->auth_alg = 0; 2975 } 2976 session->pdcp.domain = pdcp_xform->domain; 2977 session->pdcp.bearer = pdcp_xform->bearer; 2978 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2979 session->pdcp.sn_size = pdcp_xform->sn_size; 2980 session->pdcp.hfn = pdcp_xform->hfn; 2981 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2982 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 2983 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 2984 2985 rte_spinlock_lock(&dev_priv->lock); 2986 for (i = 0; i < MAX_DPAA_CORES; i++) { 2987 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 2988 if (session->inq[i] == NULL) { 2989 DPAA_SEC_ERR("unable to attach sec queue"); 2990 rte_spinlock_unlock(&dev_priv->lock); 2991 goto out; 2992 } 2993 } 2994 rte_spinlock_unlock(&dev_priv->lock); 2995 return 0; 2996 out: 2997 rte_free(session->auth_key.data); 2998 rte_free(session->cipher_key.data); 2999 memset(session, 0, sizeof(dpaa_sec_session)); 3000 return -1; 3001 } 3002 3003 static int 3004 dpaa_sec_security_session_create(void *dev, 3005 struct rte_security_session_conf *conf, 3006 struct rte_security_session *sess, 3007 struct rte_mempool *mempool) 3008 { 3009 void *sess_private_data; 3010 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3011 int ret; 3012 3013 if (rte_mempool_get(mempool, &sess_private_data)) { 3014 DPAA_SEC_ERR("Couldn't get object from session mempool"); 3015 return -ENOMEM; 3016 } 3017 3018 switch (conf->protocol) { 3019 case RTE_SECURITY_PROTOCOL_IPSEC: 3020 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3021 sess_private_data); 3022 break; 3023 case RTE_SECURITY_PROTOCOL_PDCP: 3024 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3025 sess_private_data); 3026 break; 3027 case RTE_SECURITY_PROTOCOL_MACSEC: 3028 return -ENOTSUP; 3029 default: 3030 return -EINVAL; 3031 } 3032 if (ret != 0) { 3033 DPAA_SEC_ERR("failed to configure session parameters"); 3034 /* Return session to mempool */ 3035 rte_mempool_put(mempool, sess_private_data); 3036 return ret; 3037 } 3038 3039 set_sec_session_private_data(sess, sess_private_data); 3040 3041 return ret; 3042 } 3043 3044 /** Clear the memory of session so it doesn't leave key material behind */ 3045 static int 3046 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3047 struct rte_security_session *sess) 3048 { 3049 PMD_INIT_FUNC_TRACE(); 3050 void *sess_priv = get_sec_session_private_data(sess); 3051 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3052 3053 if (sess_priv) { 3054 free_session_memory((struct rte_cryptodev *)dev, s); 3055 set_sec_session_private_data(sess, NULL); 3056 } 3057 return 0; 3058 } 3059 #endif 3060 static int 3061 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3062 struct rte_cryptodev_config *config __rte_unused) 3063 { 3064 PMD_INIT_FUNC_TRACE(); 3065 3066 return 0; 3067 } 3068 3069 static int 3070 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3071 { 3072 PMD_INIT_FUNC_TRACE(); 3073 return 0; 3074 } 3075 3076 static void 3077 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3078 { 3079 PMD_INIT_FUNC_TRACE(); 3080 } 3081 3082 static int 3083 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3084 { 3085 PMD_INIT_FUNC_TRACE(); 3086 3087 if (dev == NULL) 3088 return -ENOMEM; 3089 3090 return 0; 3091 } 3092 3093 static void 3094 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3095 struct rte_cryptodev_info *info) 3096 { 3097 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3098 3099 PMD_INIT_FUNC_TRACE(); 3100 if (info != NULL) { 3101 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3102 info->feature_flags = dev->feature_flags; 3103 info->capabilities = dpaa_sec_capabilities; 3104 info->sym.max_nb_sessions = internals->max_nb_sessions; 3105 info->driver_id = cryptodev_driver_id; 3106 } 3107 } 3108 3109 static enum qman_cb_dqrr_result 3110 dpaa_sec_process_parallel_event(void *event, 3111 struct qman_portal *qm __always_unused, 3112 struct qman_fq *outq, 3113 const struct qm_dqrr_entry *dqrr, 3114 void **bufs) 3115 { 3116 const struct qm_fd *fd; 3117 struct dpaa_sec_job *job; 3118 struct dpaa_sec_op_ctx *ctx; 3119 struct rte_event *ev = (struct rte_event *)event; 3120 3121 fd = &dqrr->fd; 3122 3123 /* sg is embedded in an op ctx, 3124 * sg[0] is for output 3125 * sg[1] for input 3126 */ 3127 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3128 3129 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3130 ctx->fd_status = fd->status; 3131 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3132 struct qm_sg_entry *sg_out; 3133 uint32_t len; 3134 3135 sg_out = &job->sg[0]; 3136 hw_sg_to_cpu(sg_out); 3137 len = sg_out->length; 3138 ctx->op->sym->m_src->pkt_len = len; 3139 ctx->op->sym->m_src->data_len = len; 3140 } 3141 if (!ctx->fd_status) { 3142 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3143 } else { 3144 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3145 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3146 } 3147 ev->event_ptr = (void *)ctx->op; 3148 3149 ev->flow_id = outq->ev.flow_id; 3150 ev->sub_event_type = outq->ev.sub_event_type; 3151 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3152 ev->op = RTE_EVENT_OP_NEW; 3153 ev->sched_type = outq->ev.sched_type; 3154 ev->queue_id = outq->ev.queue_id; 3155 ev->priority = outq->ev.priority; 3156 *bufs = (void *)ctx->op; 3157 3158 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3159 3160 return qman_cb_dqrr_consume; 3161 } 3162 3163 static enum qman_cb_dqrr_result 3164 dpaa_sec_process_atomic_event(void *event, 3165 struct qman_portal *qm __rte_unused, 3166 struct qman_fq *outq, 3167 const struct qm_dqrr_entry *dqrr, 3168 void **bufs) 3169 { 3170 u8 index; 3171 const struct qm_fd *fd; 3172 struct dpaa_sec_job *job; 3173 struct dpaa_sec_op_ctx *ctx; 3174 struct rte_event *ev = (struct rte_event *)event; 3175 3176 fd = &dqrr->fd; 3177 3178 /* sg is embedded in an op ctx, 3179 * sg[0] is for output 3180 * sg[1] for input 3181 */ 3182 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3183 3184 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3185 ctx->fd_status = fd->status; 3186 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3187 struct qm_sg_entry *sg_out; 3188 uint32_t len; 3189 3190 sg_out = &job->sg[0]; 3191 hw_sg_to_cpu(sg_out); 3192 len = sg_out->length; 3193 ctx->op->sym->m_src->pkt_len = len; 3194 ctx->op->sym->m_src->data_len = len; 3195 } 3196 if (!ctx->fd_status) { 3197 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3198 } else { 3199 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3200 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3201 } 3202 ev->event_ptr = (void *)ctx->op; 3203 ev->flow_id = outq->ev.flow_id; 3204 ev->sub_event_type = outq->ev.sub_event_type; 3205 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3206 ev->op = RTE_EVENT_OP_NEW; 3207 ev->sched_type = outq->ev.sched_type; 3208 ev->queue_id = outq->ev.queue_id; 3209 ev->priority = outq->ev.priority; 3210 3211 /* Save active dqrr entries */ 3212 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3213 DPAA_PER_LCORE_DQRR_SIZE++; 3214 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3215 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3216 ev->impl_opaque = index + 1; 3217 ctx->op->sym->m_src->seqn = (uint32_t)index + 1; 3218 *bufs = (void *)ctx->op; 3219 3220 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3221 3222 return qman_cb_dqrr_defer; 3223 } 3224 3225 int 3226 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3227 int qp_id, 3228 uint16_t ch_id, 3229 const struct rte_event *event) 3230 { 3231 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3232 struct qm_mcc_initfq opts = {0}; 3233 3234 int ret; 3235 3236 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3237 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3238 opts.fqd.dest.channel = ch_id; 3239 3240 switch (event->sched_type) { 3241 case RTE_SCHED_TYPE_ATOMIC: 3242 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3243 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3244 * configuration with HOLD_ACTIVE setting 3245 */ 3246 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3247 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3248 break; 3249 case RTE_SCHED_TYPE_ORDERED: 3250 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3251 return -1; 3252 default: 3253 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3254 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3255 break; 3256 } 3257 3258 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3259 if (unlikely(ret)) { 3260 DPAA_SEC_ERR("unable to init caam source fq!"); 3261 return ret; 3262 } 3263 3264 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3265 3266 return 0; 3267 } 3268 3269 int 3270 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3271 int qp_id) 3272 { 3273 struct qm_mcc_initfq opts = {0}; 3274 int ret; 3275 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3276 3277 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3278 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3279 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3280 qp->outq.cb.ern = ern_sec_fq_handler; 3281 qman_retire_fq(&qp->outq, NULL); 3282 qman_oos_fq(&qp->outq); 3283 ret = qman_init_fq(&qp->outq, 0, &opts); 3284 if (ret) 3285 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret); 3286 qp->outq.cb.dqrr = NULL; 3287 3288 return ret; 3289 } 3290 3291 static struct rte_cryptodev_ops crypto_ops = { 3292 .dev_configure = dpaa_sec_dev_configure, 3293 .dev_start = dpaa_sec_dev_start, 3294 .dev_stop = dpaa_sec_dev_stop, 3295 .dev_close = dpaa_sec_dev_close, 3296 .dev_infos_get = dpaa_sec_dev_infos_get, 3297 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3298 .queue_pair_release = dpaa_sec_queue_pair_release, 3299 .queue_pair_count = dpaa_sec_queue_pair_count, 3300 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3301 .sym_session_configure = dpaa_sec_sym_session_configure, 3302 .sym_session_clear = dpaa_sec_sym_session_clear 3303 }; 3304 3305 #ifdef RTE_LIBRTE_SECURITY 3306 static const struct rte_security_capability * 3307 dpaa_sec_capabilities_get(void *device __rte_unused) 3308 { 3309 return dpaa_sec_security_cap; 3310 } 3311 3312 static const struct rte_security_ops dpaa_sec_security_ops = { 3313 .session_create = dpaa_sec_security_session_create, 3314 .session_update = NULL, 3315 .session_stats_get = NULL, 3316 .session_destroy = dpaa_sec_security_session_destroy, 3317 .set_pkt_metadata = NULL, 3318 .capabilities_get = dpaa_sec_capabilities_get 3319 }; 3320 #endif 3321 static int 3322 dpaa_sec_uninit(struct rte_cryptodev *dev) 3323 { 3324 struct dpaa_sec_dev_private *internals; 3325 3326 if (dev == NULL) 3327 return -ENODEV; 3328 3329 internals = dev->data->dev_private; 3330 rte_free(dev->security_ctx); 3331 3332 rte_free(internals); 3333 3334 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3335 dev->data->name, rte_socket_id()); 3336 3337 return 0; 3338 } 3339 3340 static int 3341 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3342 { 3343 struct dpaa_sec_dev_private *internals; 3344 #ifdef RTE_LIBRTE_SECURITY 3345 struct rte_security_ctx *security_instance; 3346 #endif 3347 struct dpaa_sec_qp *qp; 3348 uint32_t i, flags; 3349 int ret; 3350 3351 PMD_INIT_FUNC_TRACE(); 3352 3353 cryptodev->driver_id = cryptodev_driver_id; 3354 cryptodev->dev_ops = &crypto_ops; 3355 3356 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3357 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3358 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3359 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3360 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3361 RTE_CRYPTODEV_FF_SECURITY | 3362 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3363 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3364 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3365 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3366 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3367 3368 internals = cryptodev->data->dev_private; 3369 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3370 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3371 3372 /* 3373 * For secondary processes, we don't initialise any further as primary 3374 * has already done this work. Only check we don't need a different 3375 * RX function 3376 */ 3377 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3378 DPAA_SEC_WARN("Device already init by primary process"); 3379 return 0; 3380 } 3381 #ifdef RTE_LIBRTE_SECURITY 3382 /* Initialize security_ctx only for primary process*/ 3383 security_instance = rte_malloc("rte_security_instances_ops", 3384 sizeof(struct rte_security_ctx), 0); 3385 if (security_instance == NULL) 3386 return -ENOMEM; 3387 security_instance->device = (void *)cryptodev; 3388 security_instance->ops = &dpaa_sec_security_ops; 3389 security_instance->sess_cnt = 0; 3390 cryptodev->security_ctx = security_instance; 3391 #endif 3392 rte_spinlock_init(&internals->lock); 3393 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3394 /* init qman fq for queue pair */ 3395 qp = &internals->qps[i]; 3396 ret = dpaa_sec_init_tx(&qp->outq); 3397 if (ret) { 3398 DPAA_SEC_ERR("config tx of queue pair %d", i); 3399 goto init_error; 3400 } 3401 } 3402 3403 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3404 QMAN_FQ_FLAG_TO_DCPORTAL; 3405 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3406 /* create rx qman fq for sessions*/ 3407 ret = qman_create_fq(0, flags, &internals->inq[i]); 3408 if (unlikely(ret != 0)) { 3409 DPAA_SEC_ERR("sec qman_create_fq failed"); 3410 goto init_error; 3411 } 3412 } 3413 3414 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); 3415 return 0; 3416 3417 init_error: 3418 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3419 3420 dpaa_sec_uninit(cryptodev); 3421 return -EFAULT; 3422 } 3423 3424 static int 3425 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3426 struct rte_dpaa_device *dpaa_dev) 3427 { 3428 struct rte_cryptodev *cryptodev; 3429 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3430 3431 int retval; 3432 3433 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3434 3435 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3436 if (cryptodev == NULL) 3437 return -ENOMEM; 3438 3439 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3440 cryptodev->data->dev_private = rte_zmalloc_socket( 3441 "cryptodev private structure", 3442 sizeof(struct dpaa_sec_dev_private), 3443 RTE_CACHE_LINE_SIZE, 3444 rte_socket_id()); 3445 3446 if (cryptodev->data->dev_private == NULL) 3447 rte_panic("Cannot allocate memzone for private " 3448 "device data"); 3449 } 3450 3451 dpaa_dev->crypto_dev = cryptodev; 3452 cryptodev->device = &dpaa_dev->device; 3453 3454 /* init user callbacks */ 3455 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3456 3457 /* if sec device version is not configured */ 3458 if (!rta_get_sec_era()) { 3459 const struct device_node *caam_node; 3460 3461 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3462 const uint32_t *prop = of_get_property(caam_node, 3463 "fsl,sec-era", 3464 NULL); 3465 if (prop) { 3466 rta_set_sec_era( 3467 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3468 break; 3469 } 3470 } 3471 } 3472 3473 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 3474 retval = rte_dpaa_portal_init((void *)1); 3475 if (retval) { 3476 DPAA_SEC_ERR("Unable to initialize portal"); 3477 return retval; 3478 } 3479 } 3480 3481 /* Invoke PMD device initialization function */ 3482 retval = dpaa_sec_dev_init(cryptodev); 3483 if (retval == 0) 3484 return 0; 3485 3486 /* In case of error, cleanup is done */ 3487 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3488 rte_free(cryptodev->data->dev_private); 3489 3490 rte_cryptodev_pmd_release_device(cryptodev); 3491 3492 return -ENXIO; 3493 } 3494 3495 static int 3496 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3497 { 3498 struct rte_cryptodev *cryptodev; 3499 int ret; 3500 3501 cryptodev = dpaa_dev->crypto_dev; 3502 if (cryptodev == NULL) 3503 return -ENODEV; 3504 3505 ret = dpaa_sec_uninit(cryptodev); 3506 if (ret) 3507 return ret; 3508 3509 return rte_cryptodev_pmd_destroy(cryptodev); 3510 } 3511 3512 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3513 .drv_type = FSL_DPAA_CRYPTO, 3514 .driver = { 3515 .name = "DPAA SEC PMD" 3516 }, 3517 .probe = cryptodev_dpaa_sec_probe, 3518 .remove = cryptodev_dpaa_sec_remove, 3519 }; 3520 3521 static struct cryptodev_driver dpaa_sec_crypto_drv; 3522 3523 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3524 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3525 cryptodev_driver_id); 3526 3527 RTE_INIT(dpaa_sec_init_log) 3528 { 3529 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); 3530 if (dpaa_logtype_sec >= 0) 3531 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); 3532 } 3533