1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #ifdef RTE_LIBRTE_SECURITY 19 #include <rte_security_driver.h> 20 #endif 21 #include <rte_cycles.h> 22 #include <rte_dev.h> 23 #include <rte_kvargs.h> 24 #include <rte_malloc.h> 25 #include <rte_mbuf.h> 26 #include <rte_memcpy.h> 27 #include <rte_string_fns.h> 28 #include <rte_spinlock.h> 29 30 #include <fsl_usd.h> 31 #include <fsl_qman.h> 32 #include <dpaa_of.h> 33 34 /* RTA header files */ 35 #include <desc/common.h> 36 #include <desc/algo.h> 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 40 #include <rte_dpaa_bus.h> 41 #include <dpaa_sec.h> 42 #include <dpaa_sec_event.h> 43 #include <dpaa_sec_log.h> 44 #include <dpaax_iova_table.h> 45 46 enum rta_sec_era rta_sec_era; 47 48 int dpaa_logtype_sec; 49 50 static uint8_t cryptodev_driver_id; 51 52 static __thread struct rte_crypto_op **dpaa_sec_ops; 53 static __thread int dpaa_sec_op_nb; 54 55 static int 56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 57 58 static inline void 59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 60 { 61 if (!ctx->fd_status) { 62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 63 } else { 64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 66 } 67 } 68 69 static inline struct dpaa_sec_op_ctx * 70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 71 { 72 struct dpaa_sec_op_ctx *ctx; 73 int i, retval; 74 75 retval = rte_mempool_get( 76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 77 (void **)(&ctx)); 78 if (!ctx || retval) { 79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 80 return NULL; 81 } 82 /* 83 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 86 * each packet, memset is costlier than dcbz_64(). 87 */ 88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 89 dcbz_64(&ctx->job.sg[i]); 90 91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 93 94 return ctx; 95 } 96 97 static void 98 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 99 struct qman_fq *fq, 100 const struct qm_mr_entry *msg) 101 { 102 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 103 fq->fqid, msg->ern.rc, msg->ern.seqnum); 104 } 105 106 /* initialize the queue with dest chan as caam chan so that 107 * all the packets in this queue could be dispatched into caam 108 */ 109 static int 110 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 111 uint32_t fqid_out) 112 { 113 struct qm_mcc_initfq fq_opts; 114 uint32_t flags; 115 int ret = -1; 116 117 /* Clear FQ options */ 118 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 119 120 flags = QMAN_INITFQ_FLAG_SCHED; 121 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 122 QM_INITFQ_WE_CONTEXTB; 123 124 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 125 fq_opts.fqd.context_b = fqid_out; 126 fq_opts.fqd.dest.channel = qm_channel_caam; 127 fq_opts.fqd.dest.wq = 0; 128 129 fq_in->cb.ern = ern_sec_fq_handler; 130 131 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 132 133 ret = qman_init_fq(fq_in, flags, &fq_opts); 134 if (unlikely(ret != 0)) 135 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 136 137 return ret; 138 } 139 140 /* something is put into in_fq and caam put the crypto result into out_fq */ 141 static enum qman_cb_dqrr_result 142 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 143 struct qman_fq *fq __always_unused, 144 const struct qm_dqrr_entry *dqrr) 145 { 146 const struct qm_fd *fd; 147 struct dpaa_sec_job *job; 148 struct dpaa_sec_op_ctx *ctx; 149 150 if (dpaa_sec_op_nb >= DPAA_SEC_BURST) 151 return qman_cb_dqrr_defer; 152 153 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 154 return qman_cb_dqrr_consume; 155 156 fd = &dqrr->fd; 157 /* sg is embedded in an op ctx, 158 * sg[0] is for output 159 * sg[1] for input 160 */ 161 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 162 163 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 164 ctx->fd_status = fd->status; 165 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 166 struct qm_sg_entry *sg_out; 167 uint32_t len; 168 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 169 ctx->op->sym->m_src : ctx->op->sym->m_dst; 170 171 sg_out = &job->sg[0]; 172 hw_sg_to_cpu(sg_out); 173 len = sg_out->length; 174 mbuf->pkt_len = len; 175 while (mbuf->next != NULL) { 176 len -= mbuf->data_len; 177 mbuf = mbuf->next; 178 } 179 mbuf->data_len = len; 180 } 181 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op; 182 dpaa_sec_op_ending(ctx); 183 184 return qman_cb_dqrr_consume; 185 } 186 187 /* caam result is put into this queue */ 188 static int 189 dpaa_sec_init_tx(struct qman_fq *fq) 190 { 191 int ret; 192 struct qm_mcc_initfq opts; 193 uint32_t flags; 194 195 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 196 QMAN_FQ_FLAG_DYNAMIC_FQID; 197 198 ret = qman_create_fq(0, flags, fq); 199 if (unlikely(ret)) { 200 DPAA_SEC_ERR("qman_create_fq failed"); 201 return ret; 202 } 203 204 memset(&opts, 0, sizeof(opts)); 205 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 206 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 207 208 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 209 210 fq->cb.dqrr = dqrr_out_fq_cb_rx; 211 fq->cb.ern = ern_sec_fq_handler; 212 213 ret = qman_init_fq(fq, 0, &opts); 214 if (unlikely(ret)) { 215 DPAA_SEC_ERR("unable to init caam source fq!"); 216 return ret; 217 } 218 219 return ret; 220 } 221 222 static inline int is_encode(dpaa_sec_session *ses) 223 { 224 return ses->dir == DIR_ENC; 225 } 226 227 static inline int is_decode(dpaa_sec_session *ses) 228 { 229 return ses->dir == DIR_DEC; 230 } 231 232 #ifdef RTE_LIBRTE_SECURITY 233 static int 234 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 235 { 236 struct alginfo authdata = {0}, cipherdata = {0}; 237 struct sec_cdb *cdb = &ses->cdb; 238 struct alginfo *p_authdata = NULL; 239 int32_t shared_desc_len = 0; 240 int err; 241 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 242 int swap = false; 243 #else 244 int swap = true; 245 #endif 246 247 cipherdata.key = (size_t)ses->cipher_key.data; 248 cipherdata.keylen = ses->cipher_key.length; 249 cipherdata.key_enc_flags = 0; 250 cipherdata.key_type = RTA_DATA_IMM; 251 cipherdata.algtype = ses->cipher_key.alg; 252 cipherdata.algmode = ses->cipher_key.algmode; 253 254 cdb->sh_desc[0] = cipherdata.keylen; 255 cdb->sh_desc[1] = 0; 256 cdb->sh_desc[2] = 0; 257 258 if (ses->auth_alg) { 259 authdata.key = (size_t)ses->auth_key.data; 260 authdata.keylen = ses->auth_key.length; 261 authdata.key_enc_flags = 0; 262 authdata.key_type = RTA_DATA_IMM; 263 authdata.algtype = ses->auth_key.alg; 264 authdata.algmode = ses->auth_key.algmode; 265 266 p_authdata = &authdata; 267 268 cdb->sh_desc[1] = authdata.keylen; 269 } 270 271 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 272 MIN_JOB_DESC_SIZE, 273 (unsigned int *)cdb->sh_desc, 274 &cdb->sh_desc[2], 2); 275 if (err < 0) { 276 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 277 return err; 278 } 279 280 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { 281 cipherdata.key = 282 (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key); 283 cipherdata.key_type = RTA_DATA_PTR; 284 } 285 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) { 286 authdata.key = 287 (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key); 288 authdata.key_type = RTA_DATA_PTR; 289 } 290 291 cdb->sh_desc[0] = 0; 292 cdb->sh_desc[1] = 0; 293 cdb->sh_desc[2] = 0; 294 295 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 296 if (ses->dir == DIR_ENC) 297 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 298 cdb->sh_desc, 1, swap, 299 ses->pdcp.hfn, 300 ses->pdcp.sn_size, 301 ses->pdcp.bearer, 302 ses->pdcp.pkt_dir, 303 ses->pdcp.hfn_threshold, 304 &cipherdata, &authdata, 305 0); 306 else if (ses->dir == DIR_DEC) 307 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 308 cdb->sh_desc, 1, swap, 309 ses->pdcp.hfn, 310 ses->pdcp.sn_size, 311 ses->pdcp.bearer, 312 ses->pdcp.pkt_dir, 313 ses->pdcp.hfn_threshold, 314 &cipherdata, &authdata, 315 0); 316 } else { 317 if (ses->dir == DIR_ENC) 318 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap( 319 cdb->sh_desc, 1, swap, 320 ses->pdcp.sn_size, 321 ses->pdcp.hfn, 322 ses->pdcp.bearer, 323 ses->pdcp.pkt_dir, 324 ses->pdcp.hfn_threshold, 325 &cipherdata, p_authdata, 0); 326 else if (ses->dir == DIR_DEC) 327 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap( 328 cdb->sh_desc, 1, swap, 329 ses->pdcp.sn_size, 330 ses->pdcp.hfn, 331 ses->pdcp.bearer, 332 ses->pdcp.pkt_dir, 333 ses->pdcp.hfn_threshold, 334 &cipherdata, p_authdata, 0); 335 } 336 return shared_desc_len; 337 } 338 339 /* prepare ipsec proto command block of the session */ 340 static int 341 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 342 { 343 struct alginfo cipherdata = {0}, authdata = {0}; 344 struct sec_cdb *cdb = &ses->cdb; 345 int32_t shared_desc_len = 0; 346 int err; 347 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 348 int swap = false; 349 #else 350 int swap = true; 351 #endif 352 353 cipherdata.key = (size_t)ses->cipher_key.data; 354 cipherdata.keylen = ses->cipher_key.length; 355 cipherdata.key_enc_flags = 0; 356 cipherdata.key_type = RTA_DATA_IMM; 357 cipherdata.algtype = ses->cipher_key.alg; 358 cipherdata.algmode = ses->cipher_key.algmode; 359 360 if (ses->auth_key.length) { 361 authdata.key = (size_t)ses->auth_key.data; 362 authdata.keylen = ses->auth_key.length; 363 authdata.key_enc_flags = 0; 364 authdata.key_type = RTA_DATA_IMM; 365 authdata.algtype = ses->auth_key.alg; 366 authdata.algmode = ses->auth_key.algmode; 367 } 368 369 cdb->sh_desc[0] = cipherdata.keylen; 370 cdb->sh_desc[1] = authdata.keylen; 371 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 372 MIN_JOB_DESC_SIZE, 373 (unsigned int *)cdb->sh_desc, 374 &cdb->sh_desc[2], 2); 375 376 if (err < 0) { 377 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 378 return err; 379 } 380 if (cdb->sh_desc[2] & 1) 381 cipherdata.key_type = RTA_DATA_IMM; 382 else { 383 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 384 (void *)(size_t)cipherdata.key); 385 cipherdata.key_type = RTA_DATA_PTR; 386 } 387 if (cdb->sh_desc[2] & (1<<1)) 388 authdata.key_type = RTA_DATA_IMM; 389 else { 390 authdata.key = (size_t)rte_dpaa_mem_vtop( 391 (void *)(size_t)authdata.key); 392 authdata.key_type = RTA_DATA_PTR; 393 } 394 395 cdb->sh_desc[0] = 0; 396 cdb->sh_desc[1] = 0; 397 cdb->sh_desc[2] = 0; 398 if (ses->dir == DIR_ENC) { 399 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 400 cdb->sh_desc, 401 true, swap, SHR_SERIAL, 402 &ses->encap_pdb, 403 (uint8_t *)&ses->ip4_hdr, 404 &cipherdata, &authdata); 405 } else if (ses->dir == DIR_DEC) { 406 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 407 cdb->sh_desc, 408 true, swap, SHR_SERIAL, 409 &ses->decap_pdb, 410 &cipherdata, &authdata); 411 } 412 return shared_desc_len; 413 } 414 #endif 415 /* prepare command block of the session */ 416 static int 417 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 418 { 419 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 420 int32_t shared_desc_len = 0; 421 struct sec_cdb *cdb = &ses->cdb; 422 int err; 423 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 424 int swap = false; 425 #else 426 int swap = true; 427 #endif 428 429 memset(cdb, 0, sizeof(struct sec_cdb)); 430 431 switch (ses->ctxt) { 432 #ifdef RTE_LIBRTE_SECURITY 433 case DPAA_SEC_IPSEC: 434 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 435 break; 436 case DPAA_SEC_PDCP: 437 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 438 break; 439 #endif 440 case DPAA_SEC_CIPHER: 441 alginfo_c.key = (size_t)ses->cipher_key.data; 442 alginfo_c.keylen = ses->cipher_key.length; 443 alginfo_c.key_enc_flags = 0; 444 alginfo_c.key_type = RTA_DATA_IMM; 445 alginfo_c.algtype = ses->cipher_key.alg; 446 alginfo_c.algmode = ses->cipher_key.algmode; 447 448 switch (ses->cipher_alg) { 449 case RTE_CRYPTO_CIPHER_AES_CBC: 450 case RTE_CRYPTO_CIPHER_3DES_CBC: 451 case RTE_CRYPTO_CIPHER_AES_CTR: 452 case RTE_CRYPTO_CIPHER_3DES_CTR: 453 shared_desc_len = cnstr_shdsc_blkcipher( 454 cdb->sh_desc, true, 455 swap, SHR_NEVER, &alginfo_c, 456 ses->iv.length, 457 ses->dir); 458 break; 459 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 460 shared_desc_len = cnstr_shdsc_snow_f8( 461 cdb->sh_desc, true, swap, 462 &alginfo_c, 463 ses->dir); 464 break; 465 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 466 shared_desc_len = cnstr_shdsc_zuce( 467 cdb->sh_desc, true, swap, 468 &alginfo_c, 469 ses->dir); 470 break; 471 default: 472 DPAA_SEC_ERR("unsupported cipher alg %d", 473 ses->cipher_alg); 474 return -ENOTSUP; 475 } 476 break; 477 case DPAA_SEC_AUTH: 478 alginfo_a.key = (size_t)ses->auth_key.data; 479 alginfo_a.keylen = ses->auth_key.length; 480 alginfo_a.key_enc_flags = 0; 481 alginfo_a.key_type = RTA_DATA_IMM; 482 alginfo_a.algtype = ses->auth_key.alg; 483 alginfo_a.algmode = ses->auth_key.algmode; 484 switch (ses->auth_alg) { 485 case RTE_CRYPTO_AUTH_MD5_HMAC: 486 case RTE_CRYPTO_AUTH_SHA1_HMAC: 487 case RTE_CRYPTO_AUTH_SHA224_HMAC: 488 case RTE_CRYPTO_AUTH_SHA256_HMAC: 489 case RTE_CRYPTO_AUTH_SHA384_HMAC: 490 case RTE_CRYPTO_AUTH_SHA512_HMAC: 491 shared_desc_len = cnstr_shdsc_hmac( 492 cdb->sh_desc, true, 493 swap, SHR_NEVER, &alginfo_a, 494 !ses->dir, 495 ses->digest_length); 496 break; 497 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 498 shared_desc_len = cnstr_shdsc_snow_f9( 499 cdb->sh_desc, true, swap, 500 &alginfo_a, 501 !ses->dir, 502 ses->digest_length); 503 break; 504 case RTE_CRYPTO_AUTH_ZUC_EIA3: 505 shared_desc_len = cnstr_shdsc_zuca( 506 cdb->sh_desc, true, swap, 507 &alginfo_a, 508 !ses->dir, 509 ses->digest_length); 510 break; 511 default: 512 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); 513 } 514 break; 515 case DPAA_SEC_AEAD: 516 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 517 DPAA_SEC_ERR("not supported aead alg"); 518 return -ENOTSUP; 519 } 520 alginfo.key = (size_t)ses->aead_key.data; 521 alginfo.keylen = ses->aead_key.length; 522 alginfo.key_enc_flags = 0; 523 alginfo.key_type = RTA_DATA_IMM; 524 alginfo.algtype = ses->aead_key.alg; 525 alginfo.algmode = ses->aead_key.algmode; 526 527 if (ses->dir == DIR_ENC) 528 shared_desc_len = cnstr_shdsc_gcm_encap( 529 cdb->sh_desc, true, swap, SHR_NEVER, 530 &alginfo, 531 ses->iv.length, 532 ses->digest_length); 533 else 534 shared_desc_len = cnstr_shdsc_gcm_decap( 535 cdb->sh_desc, true, swap, SHR_NEVER, 536 &alginfo, 537 ses->iv.length, 538 ses->digest_length); 539 break; 540 case DPAA_SEC_CIPHER_HASH: 541 alginfo_c.key = (size_t)ses->cipher_key.data; 542 alginfo_c.keylen = ses->cipher_key.length; 543 alginfo_c.key_enc_flags = 0; 544 alginfo_c.key_type = RTA_DATA_IMM; 545 alginfo_c.algtype = ses->cipher_key.alg; 546 alginfo_c.algmode = ses->cipher_key.algmode; 547 548 alginfo_a.key = (size_t)ses->auth_key.data; 549 alginfo_a.keylen = ses->auth_key.length; 550 alginfo_a.key_enc_flags = 0; 551 alginfo_a.key_type = RTA_DATA_IMM; 552 alginfo_a.algtype = ses->auth_key.alg; 553 alginfo_a.algmode = ses->auth_key.algmode; 554 555 cdb->sh_desc[0] = alginfo_c.keylen; 556 cdb->sh_desc[1] = alginfo_a.keylen; 557 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 558 MIN_JOB_DESC_SIZE, 559 (unsigned int *)cdb->sh_desc, 560 &cdb->sh_desc[2], 2); 561 562 if (err < 0) { 563 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 564 return err; 565 } 566 if (cdb->sh_desc[2] & 1) 567 alginfo_c.key_type = RTA_DATA_IMM; 568 else { 569 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 570 (void *)(size_t)alginfo_c.key); 571 alginfo_c.key_type = RTA_DATA_PTR; 572 } 573 if (cdb->sh_desc[2] & (1<<1)) 574 alginfo_a.key_type = RTA_DATA_IMM; 575 else { 576 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 577 (void *)(size_t)alginfo_a.key); 578 alginfo_a.key_type = RTA_DATA_PTR; 579 } 580 cdb->sh_desc[0] = 0; 581 cdb->sh_desc[1] = 0; 582 cdb->sh_desc[2] = 0; 583 /* Auth_only_len is set as 0 here and it will be 584 * overwritten in fd for each packet. 585 */ 586 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 587 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 588 ses->iv.length, 589 ses->digest_length, ses->dir); 590 break; 591 case DPAA_SEC_HASH_CIPHER: 592 default: 593 DPAA_SEC_ERR("error: Unsupported session"); 594 return -ENOTSUP; 595 } 596 597 if (shared_desc_len < 0) { 598 DPAA_SEC_ERR("error in preparing command block"); 599 return shared_desc_len; 600 } 601 602 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 603 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 604 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 605 606 return 0; 607 } 608 609 /* qp is lockless, should be accessed by only one thread */ 610 static int 611 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 612 { 613 struct qman_fq *fq; 614 unsigned int pkts = 0; 615 int num_rx_bufs, ret; 616 struct qm_dqrr_entry *dq; 617 uint32_t vdqcr_flags = 0; 618 619 fq = &qp->outq; 620 /* 621 * Until request for four buffers, we provide exact number of buffers. 622 * Otherwise we do not set the QM_VDQCR_EXACT flag. 623 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 624 * requested, so we request two less in this case. 625 */ 626 if (nb_ops < 4) { 627 vdqcr_flags = QM_VDQCR_EXACT; 628 num_rx_bufs = nb_ops; 629 } else { 630 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 631 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 632 } 633 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 634 if (ret) 635 return 0; 636 637 do { 638 const struct qm_fd *fd; 639 struct dpaa_sec_job *job; 640 struct dpaa_sec_op_ctx *ctx; 641 struct rte_crypto_op *op; 642 643 dq = qman_dequeue(fq); 644 if (!dq) 645 continue; 646 647 fd = &dq->fd; 648 /* sg is embedded in an op ctx, 649 * sg[0] is for output 650 * sg[1] for input 651 */ 652 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 653 654 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 655 ctx->fd_status = fd->status; 656 op = ctx->op; 657 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 658 struct qm_sg_entry *sg_out; 659 uint32_t len; 660 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 661 op->sym->m_src : op->sym->m_dst; 662 663 sg_out = &job->sg[0]; 664 hw_sg_to_cpu(sg_out); 665 len = sg_out->length; 666 mbuf->pkt_len = len; 667 while (mbuf->next != NULL) { 668 len -= mbuf->data_len; 669 mbuf = mbuf->next; 670 } 671 mbuf->data_len = len; 672 } 673 if (!ctx->fd_status) { 674 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 675 } else { 676 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); 677 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 678 } 679 ops[pkts++] = op; 680 681 /* report op status to sym->op and then free the ctx memeory */ 682 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 683 684 qman_dqrr_consume(fq, dq); 685 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 686 687 return pkts; 688 } 689 690 static inline struct dpaa_sec_job * 691 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 692 { 693 struct rte_crypto_sym_op *sym = op->sym; 694 struct rte_mbuf *mbuf = sym->m_src; 695 struct dpaa_sec_job *cf; 696 struct dpaa_sec_op_ctx *ctx; 697 struct qm_sg_entry *sg, *out_sg, *in_sg; 698 phys_addr_t start_addr; 699 uint8_t *old_digest, extra_segs; 700 int data_len, data_offset; 701 702 data_len = sym->auth.data.length; 703 data_offset = sym->auth.data.offset; 704 705 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 706 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 707 if ((data_len & 7) || (data_offset & 7)) { 708 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 709 return NULL; 710 } 711 712 data_len = data_len >> 3; 713 data_offset = data_offset >> 3; 714 } 715 716 if (is_decode(ses)) 717 extra_segs = 3; 718 else 719 extra_segs = 2; 720 721 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 722 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 723 MAX_SG_ENTRIES); 724 return NULL; 725 } 726 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 727 if (!ctx) 728 return NULL; 729 730 cf = &ctx->job; 731 ctx->op = op; 732 old_digest = ctx->digest; 733 734 /* output */ 735 out_sg = &cf->sg[0]; 736 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 737 out_sg->length = ses->digest_length; 738 cpu_to_hw_sg(out_sg); 739 740 /* input */ 741 in_sg = &cf->sg[1]; 742 /* need to extend the input to a compound frame */ 743 in_sg->extension = 1; 744 in_sg->final = 1; 745 in_sg->length = data_len; 746 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 747 748 /* 1st seg */ 749 sg = in_sg + 1; 750 751 if (ses->iv.length) { 752 uint8_t *iv_ptr; 753 754 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 755 ses->iv.offset); 756 757 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 758 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 759 sg->length = 12; 760 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 761 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 762 sg->length = 8; 763 } else { 764 sg->length = ses->iv.length; 765 } 766 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 767 in_sg->length += sg->length; 768 cpu_to_hw_sg(sg); 769 sg++; 770 } 771 772 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 773 sg->offset = data_offset; 774 775 if (data_len <= (mbuf->data_len - data_offset)) { 776 sg->length = data_len; 777 } else { 778 sg->length = mbuf->data_len - data_offset; 779 780 /* remaining i/p segs */ 781 while ((data_len = data_len - sg->length) && 782 (mbuf = mbuf->next)) { 783 cpu_to_hw_sg(sg); 784 sg++; 785 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 786 if (data_len > mbuf->data_len) 787 sg->length = mbuf->data_len; 788 else 789 sg->length = data_len; 790 } 791 } 792 793 if (is_decode(ses)) { 794 /* Digest verification case */ 795 cpu_to_hw_sg(sg); 796 sg++; 797 rte_memcpy(old_digest, sym->auth.digest.data, 798 ses->digest_length); 799 start_addr = rte_dpaa_mem_vtop(old_digest); 800 qm_sg_entry_set64(sg, start_addr); 801 sg->length = ses->digest_length; 802 in_sg->length += ses->digest_length; 803 } 804 sg->final = 1; 805 cpu_to_hw_sg(sg); 806 cpu_to_hw_sg(in_sg); 807 808 return cf; 809 } 810 811 /** 812 * packet looks like: 813 * |<----data_len------->| 814 * |ip_header|ah_header|icv|payload| 815 * ^ 816 * | 817 * mbuf->pkt.data 818 */ 819 static inline struct dpaa_sec_job * 820 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 821 { 822 struct rte_crypto_sym_op *sym = op->sym; 823 struct rte_mbuf *mbuf = sym->m_src; 824 struct dpaa_sec_job *cf; 825 struct dpaa_sec_op_ctx *ctx; 826 struct qm_sg_entry *sg, *in_sg; 827 rte_iova_t start_addr; 828 uint8_t *old_digest; 829 int data_len, data_offset; 830 831 data_len = sym->auth.data.length; 832 data_offset = sym->auth.data.offset; 833 834 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 835 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 836 if ((data_len & 7) || (data_offset & 7)) { 837 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 838 return NULL; 839 } 840 841 data_len = data_len >> 3; 842 data_offset = data_offset >> 3; 843 } 844 845 ctx = dpaa_sec_alloc_ctx(ses, 4); 846 if (!ctx) 847 return NULL; 848 849 cf = &ctx->job; 850 ctx->op = op; 851 old_digest = ctx->digest; 852 853 start_addr = rte_pktmbuf_iova(mbuf); 854 /* output */ 855 sg = &cf->sg[0]; 856 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 857 sg->length = ses->digest_length; 858 cpu_to_hw_sg(sg); 859 860 /* input */ 861 in_sg = &cf->sg[1]; 862 /* need to extend the input to a compound frame */ 863 in_sg->extension = 1; 864 in_sg->final = 1; 865 in_sg->length = data_len; 866 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 867 sg = &cf->sg[2]; 868 869 if (ses->iv.length) { 870 uint8_t *iv_ptr; 871 872 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 873 ses->iv.offset); 874 875 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 876 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 877 sg->length = 12; 878 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 879 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 880 sg->length = 8; 881 } else { 882 sg->length = ses->iv.length; 883 } 884 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 885 in_sg->length += sg->length; 886 cpu_to_hw_sg(sg); 887 sg++; 888 } 889 890 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 891 sg->offset = data_offset; 892 sg->length = data_len; 893 894 if (is_decode(ses)) { 895 /* Digest verification case */ 896 cpu_to_hw_sg(sg); 897 /* hash result or digest, save digest first */ 898 rte_memcpy(old_digest, sym->auth.digest.data, 899 ses->digest_length); 900 /* let's check digest by hw */ 901 start_addr = rte_dpaa_mem_vtop(old_digest); 902 sg++; 903 qm_sg_entry_set64(sg, start_addr); 904 sg->length = ses->digest_length; 905 in_sg->length += ses->digest_length; 906 } 907 sg->final = 1; 908 cpu_to_hw_sg(sg); 909 cpu_to_hw_sg(in_sg); 910 911 return cf; 912 } 913 914 static inline struct dpaa_sec_job * 915 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 916 { 917 struct rte_crypto_sym_op *sym = op->sym; 918 struct dpaa_sec_job *cf; 919 struct dpaa_sec_op_ctx *ctx; 920 struct qm_sg_entry *sg, *out_sg, *in_sg; 921 struct rte_mbuf *mbuf; 922 uint8_t req_segs; 923 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 924 ses->iv.offset); 925 int data_len, data_offset; 926 927 data_len = sym->cipher.data.length; 928 data_offset = sym->cipher.data.offset; 929 930 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 931 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 932 if ((data_len & 7) || (data_offset & 7)) { 933 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 934 return NULL; 935 } 936 937 data_len = data_len >> 3; 938 data_offset = data_offset >> 3; 939 } 940 941 if (sym->m_dst) { 942 mbuf = sym->m_dst; 943 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 944 } else { 945 mbuf = sym->m_src; 946 req_segs = mbuf->nb_segs * 2 + 3; 947 } 948 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 949 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 950 MAX_SG_ENTRIES); 951 return NULL; 952 } 953 954 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 955 if (!ctx) 956 return NULL; 957 958 cf = &ctx->job; 959 ctx->op = op; 960 961 /* output */ 962 out_sg = &cf->sg[0]; 963 out_sg->extension = 1; 964 out_sg->length = data_len; 965 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 966 cpu_to_hw_sg(out_sg); 967 968 /* 1st seg */ 969 sg = &cf->sg[2]; 970 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 971 sg->length = mbuf->data_len - data_offset; 972 sg->offset = data_offset; 973 974 /* Successive segs */ 975 mbuf = mbuf->next; 976 while (mbuf) { 977 cpu_to_hw_sg(sg); 978 sg++; 979 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 980 sg->length = mbuf->data_len; 981 mbuf = mbuf->next; 982 } 983 sg->final = 1; 984 cpu_to_hw_sg(sg); 985 986 /* input */ 987 mbuf = sym->m_src; 988 in_sg = &cf->sg[1]; 989 in_sg->extension = 1; 990 in_sg->final = 1; 991 in_sg->length = data_len + ses->iv.length; 992 993 sg++; 994 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 995 cpu_to_hw_sg(in_sg); 996 997 /* IV */ 998 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 999 sg->length = ses->iv.length; 1000 cpu_to_hw_sg(sg); 1001 1002 /* 1st seg */ 1003 sg++; 1004 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1005 sg->length = mbuf->data_len - data_offset; 1006 sg->offset = data_offset; 1007 1008 /* Successive segs */ 1009 mbuf = mbuf->next; 1010 while (mbuf) { 1011 cpu_to_hw_sg(sg); 1012 sg++; 1013 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1014 sg->length = mbuf->data_len; 1015 mbuf = mbuf->next; 1016 } 1017 sg->final = 1; 1018 cpu_to_hw_sg(sg); 1019 1020 return cf; 1021 } 1022 1023 static inline struct dpaa_sec_job * 1024 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1025 { 1026 struct rte_crypto_sym_op *sym = op->sym; 1027 struct dpaa_sec_job *cf; 1028 struct dpaa_sec_op_ctx *ctx; 1029 struct qm_sg_entry *sg; 1030 rte_iova_t src_start_addr, dst_start_addr; 1031 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1032 ses->iv.offset); 1033 int data_len, data_offset; 1034 1035 data_len = sym->cipher.data.length; 1036 data_offset = sym->cipher.data.offset; 1037 1038 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1039 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1040 if ((data_len & 7) || (data_offset & 7)) { 1041 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1042 return NULL; 1043 } 1044 1045 data_len = data_len >> 3; 1046 data_offset = data_offset >> 3; 1047 } 1048 1049 ctx = dpaa_sec_alloc_ctx(ses, 4); 1050 if (!ctx) 1051 return NULL; 1052 1053 cf = &ctx->job; 1054 ctx->op = op; 1055 1056 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1057 1058 if (sym->m_dst) 1059 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1060 else 1061 dst_start_addr = src_start_addr; 1062 1063 /* output */ 1064 sg = &cf->sg[0]; 1065 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1066 sg->length = data_len + ses->iv.length; 1067 cpu_to_hw_sg(sg); 1068 1069 /* input */ 1070 sg = &cf->sg[1]; 1071 1072 /* need to extend the input to a compound frame */ 1073 sg->extension = 1; 1074 sg->final = 1; 1075 sg->length = data_len + ses->iv.length; 1076 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1077 cpu_to_hw_sg(sg); 1078 1079 sg = &cf->sg[2]; 1080 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1081 sg->length = ses->iv.length; 1082 cpu_to_hw_sg(sg); 1083 1084 sg++; 1085 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1086 sg->length = data_len; 1087 sg->final = 1; 1088 cpu_to_hw_sg(sg); 1089 1090 return cf; 1091 } 1092 1093 static inline struct dpaa_sec_job * 1094 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1095 { 1096 struct rte_crypto_sym_op *sym = op->sym; 1097 struct dpaa_sec_job *cf; 1098 struct dpaa_sec_op_ctx *ctx; 1099 struct qm_sg_entry *sg, *out_sg, *in_sg; 1100 struct rte_mbuf *mbuf; 1101 uint8_t req_segs; 1102 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1103 ses->iv.offset); 1104 1105 if (sym->m_dst) { 1106 mbuf = sym->m_dst; 1107 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1108 } else { 1109 mbuf = sym->m_src; 1110 req_segs = mbuf->nb_segs * 2 + 4; 1111 } 1112 1113 if (ses->auth_only_len) 1114 req_segs++; 1115 1116 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1117 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1118 MAX_SG_ENTRIES); 1119 return NULL; 1120 } 1121 1122 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1123 if (!ctx) 1124 return NULL; 1125 1126 cf = &ctx->job; 1127 ctx->op = op; 1128 1129 rte_prefetch0(cf->sg); 1130 1131 /* output */ 1132 out_sg = &cf->sg[0]; 1133 out_sg->extension = 1; 1134 if (is_encode(ses)) 1135 out_sg->length = sym->aead.data.length + ses->digest_length; 1136 else 1137 out_sg->length = sym->aead.data.length; 1138 1139 /* output sg entries */ 1140 sg = &cf->sg[2]; 1141 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1142 cpu_to_hw_sg(out_sg); 1143 1144 /* 1st seg */ 1145 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1146 sg->length = mbuf->data_len - sym->aead.data.offset; 1147 sg->offset = sym->aead.data.offset; 1148 1149 /* Successive segs */ 1150 mbuf = mbuf->next; 1151 while (mbuf) { 1152 cpu_to_hw_sg(sg); 1153 sg++; 1154 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1155 sg->length = mbuf->data_len; 1156 mbuf = mbuf->next; 1157 } 1158 sg->length -= ses->digest_length; 1159 1160 if (is_encode(ses)) { 1161 cpu_to_hw_sg(sg); 1162 /* set auth output */ 1163 sg++; 1164 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1165 sg->length = ses->digest_length; 1166 } 1167 sg->final = 1; 1168 cpu_to_hw_sg(sg); 1169 1170 /* input */ 1171 mbuf = sym->m_src; 1172 in_sg = &cf->sg[1]; 1173 in_sg->extension = 1; 1174 in_sg->final = 1; 1175 if (is_encode(ses)) 1176 in_sg->length = ses->iv.length + sym->aead.data.length 1177 + ses->auth_only_len; 1178 else 1179 in_sg->length = ses->iv.length + sym->aead.data.length 1180 + ses->auth_only_len + ses->digest_length; 1181 1182 /* input sg entries */ 1183 sg++; 1184 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1185 cpu_to_hw_sg(in_sg); 1186 1187 /* 1st seg IV */ 1188 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1189 sg->length = ses->iv.length; 1190 cpu_to_hw_sg(sg); 1191 1192 /* 2nd seg auth only */ 1193 if (ses->auth_only_len) { 1194 sg++; 1195 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1196 sg->length = ses->auth_only_len; 1197 cpu_to_hw_sg(sg); 1198 } 1199 1200 /* 3rd seg */ 1201 sg++; 1202 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1203 sg->length = mbuf->data_len - sym->aead.data.offset; 1204 sg->offset = sym->aead.data.offset; 1205 1206 /* Successive segs */ 1207 mbuf = mbuf->next; 1208 while (mbuf) { 1209 cpu_to_hw_sg(sg); 1210 sg++; 1211 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1212 sg->length = mbuf->data_len; 1213 mbuf = mbuf->next; 1214 } 1215 1216 if (is_decode(ses)) { 1217 cpu_to_hw_sg(sg); 1218 sg++; 1219 memcpy(ctx->digest, sym->aead.digest.data, 1220 ses->digest_length); 1221 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1222 sg->length = ses->digest_length; 1223 } 1224 sg->final = 1; 1225 cpu_to_hw_sg(sg); 1226 1227 return cf; 1228 } 1229 1230 static inline struct dpaa_sec_job * 1231 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1232 { 1233 struct rte_crypto_sym_op *sym = op->sym; 1234 struct dpaa_sec_job *cf; 1235 struct dpaa_sec_op_ctx *ctx; 1236 struct qm_sg_entry *sg; 1237 uint32_t length = 0; 1238 rte_iova_t src_start_addr, dst_start_addr; 1239 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1240 ses->iv.offset); 1241 1242 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1243 1244 if (sym->m_dst) 1245 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1246 else 1247 dst_start_addr = src_start_addr; 1248 1249 ctx = dpaa_sec_alloc_ctx(ses, 7); 1250 if (!ctx) 1251 return NULL; 1252 1253 cf = &ctx->job; 1254 ctx->op = op; 1255 1256 /* input */ 1257 rte_prefetch0(cf->sg); 1258 sg = &cf->sg[2]; 1259 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1260 if (is_encode(ses)) { 1261 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1262 sg->length = ses->iv.length; 1263 length += sg->length; 1264 cpu_to_hw_sg(sg); 1265 1266 sg++; 1267 if (ses->auth_only_len) { 1268 qm_sg_entry_set64(sg, 1269 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1270 sg->length = ses->auth_only_len; 1271 length += sg->length; 1272 cpu_to_hw_sg(sg); 1273 sg++; 1274 } 1275 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1276 sg->length = sym->aead.data.length; 1277 length += sg->length; 1278 sg->final = 1; 1279 cpu_to_hw_sg(sg); 1280 } else { 1281 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1282 sg->length = ses->iv.length; 1283 length += sg->length; 1284 cpu_to_hw_sg(sg); 1285 1286 sg++; 1287 if (ses->auth_only_len) { 1288 qm_sg_entry_set64(sg, 1289 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1290 sg->length = ses->auth_only_len; 1291 length += sg->length; 1292 cpu_to_hw_sg(sg); 1293 sg++; 1294 } 1295 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1296 sg->length = sym->aead.data.length; 1297 length += sg->length; 1298 cpu_to_hw_sg(sg); 1299 1300 memcpy(ctx->digest, sym->aead.digest.data, 1301 ses->digest_length); 1302 sg++; 1303 1304 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1305 sg->length = ses->digest_length; 1306 length += sg->length; 1307 sg->final = 1; 1308 cpu_to_hw_sg(sg); 1309 } 1310 /* input compound frame */ 1311 cf->sg[1].length = length; 1312 cf->sg[1].extension = 1; 1313 cf->sg[1].final = 1; 1314 cpu_to_hw_sg(&cf->sg[1]); 1315 1316 /* output */ 1317 sg++; 1318 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1319 qm_sg_entry_set64(sg, 1320 dst_start_addr + sym->aead.data.offset); 1321 sg->length = sym->aead.data.length; 1322 length = sg->length; 1323 if (is_encode(ses)) { 1324 cpu_to_hw_sg(sg); 1325 /* set auth output */ 1326 sg++; 1327 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1328 sg->length = ses->digest_length; 1329 length += sg->length; 1330 } 1331 sg->final = 1; 1332 cpu_to_hw_sg(sg); 1333 1334 /* output compound frame */ 1335 cf->sg[0].length = length; 1336 cf->sg[0].extension = 1; 1337 cpu_to_hw_sg(&cf->sg[0]); 1338 1339 return cf; 1340 } 1341 1342 static inline struct dpaa_sec_job * 1343 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1344 { 1345 struct rte_crypto_sym_op *sym = op->sym; 1346 struct dpaa_sec_job *cf; 1347 struct dpaa_sec_op_ctx *ctx; 1348 struct qm_sg_entry *sg, *out_sg, *in_sg; 1349 struct rte_mbuf *mbuf; 1350 uint8_t req_segs; 1351 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1352 ses->iv.offset); 1353 1354 if (sym->m_dst) { 1355 mbuf = sym->m_dst; 1356 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1357 } else { 1358 mbuf = sym->m_src; 1359 req_segs = mbuf->nb_segs * 2 + 4; 1360 } 1361 1362 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1363 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1364 MAX_SG_ENTRIES); 1365 return NULL; 1366 } 1367 1368 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1369 if (!ctx) 1370 return NULL; 1371 1372 cf = &ctx->job; 1373 ctx->op = op; 1374 1375 rte_prefetch0(cf->sg); 1376 1377 /* output */ 1378 out_sg = &cf->sg[0]; 1379 out_sg->extension = 1; 1380 if (is_encode(ses)) 1381 out_sg->length = sym->auth.data.length + ses->digest_length; 1382 else 1383 out_sg->length = sym->auth.data.length; 1384 1385 /* output sg entries */ 1386 sg = &cf->sg[2]; 1387 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1388 cpu_to_hw_sg(out_sg); 1389 1390 /* 1st seg */ 1391 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1392 sg->length = mbuf->data_len - sym->auth.data.offset; 1393 sg->offset = sym->auth.data.offset; 1394 1395 /* Successive segs */ 1396 mbuf = mbuf->next; 1397 while (mbuf) { 1398 cpu_to_hw_sg(sg); 1399 sg++; 1400 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1401 sg->length = mbuf->data_len; 1402 mbuf = mbuf->next; 1403 } 1404 sg->length -= ses->digest_length; 1405 1406 if (is_encode(ses)) { 1407 cpu_to_hw_sg(sg); 1408 /* set auth output */ 1409 sg++; 1410 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1411 sg->length = ses->digest_length; 1412 } 1413 sg->final = 1; 1414 cpu_to_hw_sg(sg); 1415 1416 /* input */ 1417 mbuf = sym->m_src; 1418 in_sg = &cf->sg[1]; 1419 in_sg->extension = 1; 1420 in_sg->final = 1; 1421 if (is_encode(ses)) 1422 in_sg->length = ses->iv.length + sym->auth.data.length; 1423 else 1424 in_sg->length = ses->iv.length + sym->auth.data.length 1425 + ses->digest_length; 1426 1427 /* input sg entries */ 1428 sg++; 1429 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1430 cpu_to_hw_sg(in_sg); 1431 1432 /* 1st seg IV */ 1433 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1434 sg->length = ses->iv.length; 1435 cpu_to_hw_sg(sg); 1436 1437 /* 2nd seg */ 1438 sg++; 1439 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1440 sg->length = mbuf->data_len - sym->auth.data.offset; 1441 sg->offset = sym->auth.data.offset; 1442 1443 /* Successive segs */ 1444 mbuf = mbuf->next; 1445 while (mbuf) { 1446 cpu_to_hw_sg(sg); 1447 sg++; 1448 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1449 sg->length = mbuf->data_len; 1450 mbuf = mbuf->next; 1451 } 1452 1453 sg->length -= ses->digest_length; 1454 if (is_decode(ses)) { 1455 cpu_to_hw_sg(sg); 1456 sg++; 1457 memcpy(ctx->digest, sym->auth.digest.data, 1458 ses->digest_length); 1459 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1460 sg->length = ses->digest_length; 1461 } 1462 sg->final = 1; 1463 cpu_to_hw_sg(sg); 1464 1465 return cf; 1466 } 1467 1468 static inline struct dpaa_sec_job * 1469 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1470 { 1471 struct rte_crypto_sym_op *sym = op->sym; 1472 struct dpaa_sec_job *cf; 1473 struct dpaa_sec_op_ctx *ctx; 1474 struct qm_sg_entry *sg; 1475 rte_iova_t src_start_addr, dst_start_addr; 1476 uint32_t length = 0; 1477 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1478 ses->iv.offset); 1479 1480 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1481 if (sym->m_dst) 1482 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1483 else 1484 dst_start_addr = src_start_addr; 1485 1486 ctx = dpaa_sec_alloc_ctx(ses, 7); 1487 if (!ctx) 1488 return NULL; 1489 1490 cf = &ctx->job; 1491 ctx->op = op; 1492 1493 /* input */ 1494 rte_prefetch0(cf->sg); 1495 sg = &cf->sg[2]; 1496 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1497 if (is_encode(ses)) { 1498 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1499 sg->length = ses->iv.length; 1500 length += sg->length; 1501 cpu_to_hw_sg(sg); 1502 1503 sg++; 1504 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1505 sg->length = sym->auth.data.length; 1506 length += sg->length; 1507 sg->final = 1; 1508 cpu_to_hw_sg(sg); 1509 } else { 1510 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1511 sg->length = ses->iv.length; 1512 length += sg->length; 1513 cpu_to_hw_sg(sg); 1514 1515 sg++; 1516 1517 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1518 sg->length = sym->auth.data.length; 1519 length += sg->length; 1520 cpu_to_hw_sg(sg); 1521 1522 memcpy(ctx->digest, sym->auth.digest.data, 1523 ses->digest_length); 1524 sg++; 1525 1526 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1527 sg->length = ses->digest_length; 1528 length += sg->length; 1529 sg->final = 1; 1530 cpu_to_hw_sg(sg); 1531 } 1532 /* input compound frame */ 1533 cf->sg[1].length = length; 1534 cf->sg[1].extension = 1; 1535 cf->sg[1].final = 1; 1536 cpu_to_hw_sg(&cf->sg[1]); 1537 1538 /* output */ 1539 sg++; 1540 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1541 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1542 sg->length = sym->cipher.data.length; 1543 length = sg->length; 1544 if (is_encode(ses)) { 1545 cpu_to_hw_sg(sg); 1546 /* set auth output */ 1547 sg++; 1548 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1549 sg->length = ses->digest_length; 1550 length += sg->length; 1551 } 1552 sg->final = 1; 1553 cpu_to_hw_sg(sg); 1554 1555 /* output compound frame */ 1556 cf->sg[0].length = length; 1557 cf->sg[0].extension = 1; 1558 cpu_to_hw_sg(&cf->sg[0]); 1559 1560 return cf; 1561 } 1562 1563 #ifdef RTE_LIBRTE_SECURITY 1564 static inline struct dpaa_sec_job * 1565 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1566 { 1567 struct rte_crypto_sym_op *sym = op->sym; 1568 struct dpaa_sec_job *cf; 1569 struct dpaa_sec_op_ctx *ctx; 1570 struct qm_sg_entry *sg; 1571 phys_addr_t src_start_addr, dst_start_addr; 1572 1573 ctx = dpaa_sec_alloc_ctx(ses, 2); 1574 if (!ctx) 1575 return NULL; 1576 cf = &ctx->job; 1577 ctx->op = op; 1578 1579 src_start_addr = rte_pktmbuf_mtophys(sym->m_src); 1580 1581 if (sym->m_dst) 1582 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst); 1583 else 1584 dst_start_addr = src_start_addr; 1585 1586 /* input */ 1587 sg = &cf->sg[1]; 1588 qm_sg_entry_set64(sg, src_start_addr); 1589 sg->length = sym->m_src->pkt_len; 1590 sg->final = 1; 1591 cpu_to_hw_sg(sg); 1592 1593 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1594 /* output */ 1595 sg = &cf->sg[0]; 1596 qm_sg_entry_set64(sg, dst_start_addr); 1597 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1598 cpu_to_hw_sg(sg); 1599 1600 return cf; 1601 } 1602 1603 static inline struct dpaa_sec_job * 1604 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1605 { 1606 struct rte_crypto_sym_op *sym = op->sym; 1607 struct dpaa_sec_job *cf; 1608 struct dpaa_sec_op_ctx *ctx; 1609 struct qm_sg_entry *sg, *out_sg, *in_sg; 1610 struct rte_mbuf *mbuf; 1611 uint8_t req_segs; 1612 uint32_t in_len = 0, out_len = 0; 1613 1614 if (sym->m_dst) 1615 mbuf = sym->m_dst; 1616 else 1617 mbuf = sym->m_src; 1618 1619 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1620 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1621 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1622 MAX_SG_ENTRIES); 1623 return NULL; 1624 } 1625 1626 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1627 if (!ctx) 1628 return NULL; 1629 cf = &ctx->job; 1630 ctx->op = op; 1631 /* output */ 1632 out_sg = &cf->sg[0]; 1633 out_sg->extension = 1; 1634 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1635 1636 /* 1st seg */ 1637 sg = &cf->sg[2]; 1638 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1639 sg->offset = 0; 1640 1641 /* Successive segs */ 1642 while (mbuf->next) { 1643 sg->length = mbuf->data_len; 1644 out_len += sg->length; 1645 mbuf = mbuf->next; 1646 cpu_to_hw_sg(sg); 1647 sg++; 1648 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1649 sg->offset = 0; 1650 } 1651 sg->length = mbuf->buf_len - mbuf->data_off; 1652 out_len += sg->length; 1653 sg->final = 1; 1654 cpu_to_hw_sg(sg); 1655 1656 out_sg->length = out_len; 1657 cpu_to_hw_sg(out_sg); 1658 1659 /* input */ 1660 mbuf = sym->m_src; 1661 in_sg = &cf->sg[1]; 1662 in_sg->extension = 1; 1663 in_sg->final = 1; 1664 in_len = mbuf->data_len; 1665 1666 sg++; 1667 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1668 1669 /* 1st seg */ 1670 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1671 sg->length = mbuf->data_len; 1672 sg->offset = 0; 1673 1674 /* Successive segs */ 1675 mbuf = mbuf->next; 1676 while (mbuf) { 1677 cpu_to_hw_sg(sg); 1678 sg++; 1679 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1680 sg->length = mbuf->data_len; 1681 sg->offset = 0; 1682 in_len += sg->length; 1683 mbuf = mbuf->next; 1684 } 1685 sg->final = 1; 1686 cpu_to_hw_sg(sg); 1687 1688 in_sg->length = in_len; 1689 cpu_to_hw_sg(in_sg); 1690 1691 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1692 1693 return cf; 1694 } 1695 #endif 1696 1697 static uint16_t 1698 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1699 uint16_t nb_ops) 1700 { 1701 /* Function to transmit the frames to given device and queuepair */ 1702 uint32_t loop; 1703 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1704 uint16_t num_tx = 0; 1705 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1706 uint32_t frames_to_send; 1707 struct rte_crypto_op *op; 1708 struct dpaa_sec_job *cf; 1709 dpaa_sec_session *ses; 1710 uint16_t auth_hdr_len, auth_tail_len; 1711 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1712 struct qman_fq *inq[DPAA_SEC_BURST]; 1713 1714 while (nb_ops) { 1715 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1716 DPAA_SEC_BURST : nb_ops; 1717 for (loop = 0; loop < frames_to_send; loop++) { 1718 op = *(ops++); 1719 if (op->sym->m_src->seqn != 0) { 1720 index = op->sym->m_src->seqn - 1; 1721 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1722 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1723 flags[loop] = ((index & 0x0f) << 8); 1724 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1725 DPAA_PER_LCORE_DQRR_SIZE--; 1726 DPAA_PER_LCORE_DQRR_HELD &= 1727 ~(1 << index); 1728 } 1729 } 1730 1731 switch (op->sess_type) { 1732 case RTE_CRYPTO_OP_WITH_SESSION: 1733 ses = (dpaa_sec_session *) 1734 get_sym_session_private_data( 1735 op->sym->session, 1736 cryptodev_driver_id); 1737 break; 1738 #ifdef RTE_LIBRTE_SECURITY 1739 case RTE_CRYPTO_OP_SECURITY_SESSION: 1740 ses = (dpaa_sec_session *) 1741 get_sec_session_private_data( 1742 op->sym->sec_session); 1743 break; 1744 #endif 1745 default: 1746 DPAA_SEC_DP_ERR( 1747 "sessionless crypto op not supported"); 1748 frames_to_send = loop; 1749 nb_ops = loop; 1750 goto send_pkts; 1751 } 1752 1753 if (!ses) { 1754 DPAA_SEC_DP_ERR("session not available"); 1755 frames_to_send = loop; 1756 nb_ops = loop; 1757 goto send_pkts; 1758 } 1759 1760 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1761 if (dpaa_sec_attach_sess_q(qp, ses)) { 1762 frames_to_send = loop; 1763 nb_ops = loop; 1764 goto send_pkts; 1765 } 1766 } else if (unlikely(ses->qp[rte_lcore_id() % 1767 MAX_DPAA_CORES] != qp)) { 1768 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1769 " New qp = %p\n", 1770 ses->qp[rte_lcore_id() % 1771 MAX_DPAA_CORES], qp); 1772 frames_to_send = loop; 1773 nb_ops = loop; 1774 goto send_pkts; 1775 } 1776 1777 auth_hdr_len = op->sym->auth.data.length - 1778 op->sym->cipher.data.length; 1779 auth_tail_len = 0; 1780 1781 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1782 ((op->sym->m_dst == NULL) || 1783 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1784 switch (ses->ctxt) { 1785 #ifdef RTE_LIBRTE_SECURITY 1786 case DPAA_SEC_PDCP: 1787 case DPAA_SEC_IPSEC: 1788 cf = build_proto(op, ses); 1789 break; 1790 #endif 1791 case DPAA_SEC_AUTH: 1792 cf = build_auth_only(op, ses); 1793 break; 1794 case DPAA_SEC_CIPHER: 1795 cf = build_cipher_only(op, ses); 1796 break; 1797 case DPAA_SEC_AEAD: 1798 cf = build_cipher_auth_gcm(op, ses); 1799 auth_hdr_len = ses->auth_only_len; 1800 break; 1801 case DPAA_SEC_CIPHER_HASH: 1802 auth_hdr_len = 1803 op->sym->cipher.data.offset 1804 - op->sym->auth.data.offset; 1805 auth_tail_len = 1806 op->sym->auth.data.length 1807 - op->sym->cipher.data.length 1808 - auth_hdr_len; 1809 cf = build_cipher_auth(op, ses); 1810 break; 1811 default: 1812 DPAA_SEC_DP_ERR("not supported ops"); 1813 frames_to_send = loop; 1814 nb_ops = loop; 1815 goto send_pkts; 1816 } 1817 } else { 1818 switch (ses->ctxt) { 1819 #ifdef RTE_LIBRTE_SECURITY 1820 case DPAA_SEC_PDCP: 1821 case DPAA_SEC_IPSEC: 1822 cf = build_proto_sg(op, ses); 1823 break; 1824 #endif 1825 case DPAA_SEC_AUTH: 1826 cf = build_auth_only_sg(op, ses); 1827 break; 1828 case DPAA_SEC_CIPHER: 1829 cf = build_cipher_only_sg(op, ses); 1830 break; 1831 case DPAA_SEC_AEAD: 1832 cf = build_cipher_auth_gcm_sg(op, ses); 1833 auth_hdr_len = ses->auth_only_len; 1834 break; 1835 case DPAA_SEC_CIPHER_HASH: 1836 auth_hdr_len = 1837 op->sym->cipher.data.offset 1838 - op->sym->auth.data.offset; 1839 auth_tail_len = 1840 op->sym->auth.data.length 1841 - op->sym->cipher.data.length 1842 - auth_hdr_len; 1843 cf = build_cipher_auth_sg(op, ses); 1844 break; 1845 default: 1846 DPAA_SEC_DP_ERR("not supported ops"); 1847 frames_to_send = loop; 1848 nb_ops = loop; 1849 goto send_pkts; 1850 } 1851 } 1852 if (unlikely(!cf)) { 1853 frames_to_send = loop; 1854 nb_ops = loop; 1855 goto send_pkts; 1856 } 1857 1858 fd = &fds[loop]; 1859 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 1860 fd->opaque_addr = 0; 1861 fd->cmd = 0; 1862 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 1863 fd->_format1 = qm_fd_compound; 1864 fd->length29 = 2 * sizeof(struct qm_sg_entry); 1865 1866 /* Auth_only_len is set as 0 in descriptor and it is 1867 * overwritten here in the fd.cmd which will update 1868 * the DPOVRD reg. 1869 */ 1870 if (auth_hdr_len || auth_tail_len) { 1871 fd->cmd = 0x80000000; 1872 fd->cmd |= 1873 ((auth_tail_len << 16) | auth_hdr_len); 1874 } 1875 1876 #ifdef RTE_LIBRTE_SECURITY 1877 /* In case of PDCP, per packet HFN is stored in 1878 * mbuf priv after sym_op. 1879 */ 1880 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 1881 fd->cmd = 0x80000000 | 1882 *((uint32_t *)((uint8_t *)op + 1883 ses->pdcp.hfn_ovd_offset)); 1884 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 1885 *((uint32_t *)((uint8_t *)op + 1886 ses->pdcp.hfn_ovd_offset)), 1887 ses->pdcp.hfn_ovd); 1888 } 1889 #endif 1890 } 1891 send_pkts: 1892 loop = 0; 1893 while (loop < frames_to_send) { 1894 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 1895 &flags[loop], frames_to_send - loop); 1896 } 1897 nb_ops -= frames_to_send; 1898 num_tx += frames_to_send; 1899 } 1900 1901 dpaa_qp->tx_pkts += num_tx; 1902 dpaa_qp->tx_errs += nb_ops - num_tx; 1903 1904 return num_tx; 1905 } 1906 1907 static uint16_t 1908 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1909 uint16_t nb_ops) 1910 { 1911 uint16_t num_rx; 1912 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1913 1914 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1915 1916 dpaa_qp->rx_pkts += num_rx; 1917 dpaa_qp->rx_errs += nb_ops - num_rx; 1918 1919 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1920 1921 return num_rx; 1922 } 1923 1924 /** Release queue pair */ 1925 static int 1926 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1927 uint16_t qp_id) 1928 { 1929 struct dpaa_sec_dev_private *internals; 1930 struct dpaa_sec_qp *qp = NULL; 1931 1932 PMD_INIT_FUNC_TRACE(); 1933 1934 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 1935 1936 internals = dev->data->dev_private; 1937 if (qp_id >= internals->max_nb_queue_pairs) { 1938 DPAA_SEC_ERR("Max supported qpid %d", 1939 internals->max_nb_queue_pairs); 1940 return -EINVAL; 1941 } 1942 1943 qp = &internals->qps[qp_id]; 1944 rte_mempool_free(qp->ctx_pool); 1945 qp->internals = NULL; 1946 dev->data->queue_pairs[qp_id] = NULL; 1947 1948 return 0; 1949 } 1950 1951 /** Setup a queue pair */ 1952 static int 1953 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1954 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1955 __rte_unused int socket_id) 1956 { 1957 struct dpaa_sec_dev_private *internals; 1958 struct dpaa_sec_qp *qp = NULL; 1959 char str[20]; 1960 1961 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1962 1963 internals = dev->data->dev_private; 1964 if (qp_id >= internals->max_nb_queue_pairs) { 1965 DPAA_SEC_ERR("Max supported qpid %d", 1966 internals->max_nb_queue_pairs); 1967 return -EINVAL; 1968 } 1969 1970 qp = &internals->qps[qp_id]; 1971 qp->internals = internals; 1972 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 1973 dev->data->dev_id, qp_id); 1974 if (!qp->ctx_pool) { 1975 qp->ctx_pool = rte_mempool_create((const char *)str, 1976 CTX_POOL_NUM_BUFS, 1977 CTX_POOL_BUF_SIZE, 1978 CTX_POOL_CACHE_SIZE, 0, 1979 NULL, NULL, NULL, NULL, 1980 SOCKET_ID_ANY, 0); 1981 if (!qp->ctx_pool) { 1982 DPAA_SEC_ERR("%s create failed\n", str); 1983 return -ENOMEM; 1984 } 1985 } else 1986 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 1987 dev->data->dev_id, qp_id); 1988 dev->data->queue_pairs[qp_id] = qp; 1989 1990 return 0; 1991 } 1992 1993 /** Returns the size of session structure */ 1994 static unsigned int 1995 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1996 { 1997 PMD_INIT_FUNC_TRACE(); 1998 1999 return sizeof(dpaa_sec_session); 2000 } 2001 2002 static int 2003 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2004 struct rte_crypto_sym_xform *xform, 2005 dpaa_sec_session *session) 2006 { 2007 session->ctxt = DPAA_SEC_CIPHER; 2008 session->cipher_alg = xform->cipher.algo; 2009 session->iv.length = xform->cipher.iv.length; 2010 session->iv.offset = xform->cipher.iv.offset; 2011 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2012 RTE_CACHE_LINE_SIZE); 2013 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2014 DPAA_SEC_ERR("No Memory for cipher key"); 2015 return -ENOMEM; 2016 } 2017 session->cipher_key.length = xform->cipher.key.length; 2018 2019 memcpy(session->cipher_key.data, xform->cipher.key.data, 2020 xform->cipher.key.length); 2021 switch (xform->cipher.algo) { 2022 case RTE_CRYPTO_CIPHER_AES_CBC: 2023 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2024 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2025 break; 2026 case RTE_CRYPTO_CIPHER_3DES_CBC: 2027 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2028 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2029 break; 2030 case RTE_CRYPTO_CIPHER_AES_CTR: 2031 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2032 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2033 break; 2034 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2035 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2036 break; 2037 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2038 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2039 break; 2040 default: 2041 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2042 xform->cipher.algo); 2043 rte_free(session->cipher_key.data); 2044 return -1; 2045 } 2046 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2047 DIR_ENC : DIR_DEC; 2048 2049 return 0; 2050 } 2051 2052 static int 2053 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2054 struct rte_crypto_sym_xform *xform, 2055 dpaa_sec_session *session) 2056 { 2057 session->ctxt = DPAA_SEC_AUTH; 2058 session->auth_alg = xform->auth.algo; 2059 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 2060 RTE_CACHE_LINE_SIZE); 2061 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 2062 DPAA_SEC_ERR("No Memory for auth key"); 2063 return -ENOMEM; 2064 } 2065 session->auth_key.length = xform->auth.key.length; 2066 session->digest_length = xform->auth.digest_length; 2067 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2068 session->iv.offset = xform->auth.iv.offset; 2069 session->iv.length = xform->auth.iv.length; 2070 } 2071 2072 memcpy(session->auth_key.data, xform->auth.key.data, 2073 xform->auth.key.length); 2074 2075 switch (xform->auth.algo) { 2076 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2077 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2078 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2079 break; 2080 case RTE_CRYPTO_AUTH_MD5_HMAC: 2081 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2082 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2083 break; 2084 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2085 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2086 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2087 break; 2088 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2089 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2090 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2091 break; 2092 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2093 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2094 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2095 break; 2096 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2097 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2098 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2099 break; 2100 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2101 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2102 session->auth_key.algmode = OP_ALG_AAI_F9; 2103 break; 2104 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2105 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2106 session->auth_key.algmode = OP_ALG_AAI_F9; 2107 break; 2108 default: 2109 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2110 xform->auth.algo); 2111 rte_free(session->auth_key.data); 2112 return -1; 2113 } 2114 2115 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2116 DIR_ENC : DIR_DEC; 2117 2118 return 0; 2119 } 2120 2121 static int 2122 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2123 struct rte_crypto_sym_xform *xform, 2124 dpaa_sec_session *session) 2125 { 2126 2127 struct rte_crypto_cipher_xform *cipher_xform; 2128 struct rte_crypto_auth_xform *auth_xform; 2129 2130 session->ctxt = DPAA_SEC_CIPHER_HASH; 2131 if (session->auth_cipher_text) { 2132 cipher_xform = &xform->cipher; 2133 auth_xform = &xform->next->auth; 2134 } else { 2135 cipher_xform = &xform->next->cipher; 2136 auth_xform = &xform->auth; 2137 } 2138 2139 /* Set IV parameters */ 2140 session->iv.offset = cipher_xform->iv.offset; 2141 session->iv.length = cipher_xform->iv.length; 2142 2143 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2144 RTE_CACHE_LINE_SIZE); 2145 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2146 DPAA_SEC_ERR("No Memory for cipher key"); 2147 return -1; 2148 } 2149 session->cipher_key.length = cipher_xform->key.length; 2150 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2151 RTE_CACHE_LINE_SIZE); 2152 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2153 DPAA_SEC_ERR("No Memory for auth key"); 2154 rte_free(session->cipher_key.data); 2155 return -ENOMEM; 2156 } 2157 session->auth_key.length = auth_xform->key.length; 2158 memcpy(session->cipher_key.data, cipher_xform->key.data, 2159 cipher_xform->key.length); 2160 memcpy(session->auth_key.data, auth_xform->key.data, 2161 auth_xform->key.length); 2162 2163 session->digest_length = auth_xform->digest_length; 2164 session->auth_alg = auth_xform->algo; 2165 2166 switch (auth_xform->algo) { 2167 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2168 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2169 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2170 break; 2171 case RTE_CRYPTO_AUTH_MD5_HMAC: 2172 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2173 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2174 break; 2175 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2176 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2177 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2178 break; 2179 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2180 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2181 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2182 break; 2183 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2184 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2185 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2186 break; 2187 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2188 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2189 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2190 break; 2191 default: 2192 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2193 auth_xform->algo); 2194 goto error_out; 2195 } 2196 2197 session->cipher_alg = cipher_xform->algo; 2198 2199 switch (cipher_xform->algo) { 2200 case RTE_CRYPTO_CIPHER_AES_CBC: 2201 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2202 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2203 break; 2204 case RTE_CRYPTO_CIPHER_3DES_CBC: 2205 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2206 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2207 break; 2208 case RTE_CRYPTO_CIPHER_AES_CTR: 2209 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2210 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2211 break; 2212 default: 2213 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2214 cipher_xform->algo); 2215 goto error_out; 2216 } 2217 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2218 DIR_ENC : DIR_DEC; 2219 return 0; 2220 2221 error_out: 2222 rte_free(session->cipher_key.data); 2223 rte_free(session->auth_key.data); 2224 return -1; 2225 } 2226 2227 static int 2228 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2229 struct rte_crypto_sym_xform *xform, 2230 dpaa_sec_session *session) 2231 { 2232 session->aead_alg = xform->aead.algo; 2233 session->ctxt = DPAA_SEC_AEAD; 2234 session->iv.length = xform->aead.iv.length; 2235 session->iv.offset = xform->aead.iv.offset; 2236 session->auth_only_len = xform->aead.aad_length; 2237 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2238 RTE_CACHE_LINE_SIZE); 2239 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2240 DPAA_SEC_ERR("No Memory for aead key\n"); 2241 return -ENOMEM; 2242 } 2243 session->aead_key.length = xform->aead.key.length; 2244 session->digest_length = xform->aead.digest_length; 2245 2246 memcpy(session->aead_key.data, xform->aead.key.data, 2247 xform->aead.key.length); 2248 2249 switch (session->aead_alg) { 2250 case RTE_CRYPTO_AEAD_AES_GCM: 2251 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2252 session->aead_key.algmode = OP_ALG_AAI_GCM; 2253 break; 2254 default: 2255 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2256 rte_free(session->aead_key.data); 2257 return -ENOMEM; 2258 } 2259 2260 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2261 DIR_ENC : DIR_DEC; 2262 2263 return 0; 2264 } 2265 2266 static struct qman_fq * 2267 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2268 { 2269 unsigned int i; 2270 2271 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2272 if (qi->inq_attach[i] == 0) { 2273 qi->inq_attach[i] = 1; 2274 return &qi->inq[i]; 2275 } 2276 } 2277 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2278 2279 return NULL; 2280 } 2281 2282 static int 2283 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2284 { 2285 unsigned int i; 2286 2287 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2288 if (&qi->inq[i] == fq) { 2289 if (qman_retire_fq(fq, NULL) != 0) 2290 DPAA_SEC_WARN("Queue is not retired\n"); 2291 qman_oos_fq(fq); 2292 qi->inq_attach[i] = 0; 2293 return 0; 2294 } 2295 } 2296 return -1; 2297 } 2298 2299 static int 2300 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2301 { 2302 int ret; 2303 2304 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2305 ret = dpaa_sec_prep_cdb(sess); 2306 if (ret) { 2307 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2308 return -1; 2309 } 2310 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 2311 ret = rte_dpaa_portal_init((void *)0); 2312 if (ret) { 2313 DPAA_SEC_ERR("Failure in affining portal"); 2314 return ret; 2315 } 2316 } 2317 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2318 rte_dpaa_mem_vtop(&sess->cdb), 2319 qman_fq_fqid(&qp->outq)); 2320 if (ret) 2321 DPAA_SEC_ERR("Unable to init sec queue"); 2322 2323 return ret; 2324 } 2325 2326 static int 2327 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2328 struct rte_crypto_sym_xform *xform, void *sess) 2329 { 2330 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2331 dpaa_sec_session *session = sess; 2332 uint32_t i; 2333 int ret; 2334 2335 PMD_INIT_FUNC_TRACE(); 2336 2337 if (unlikely(sess == NULL)) { 2338 DPAA_SEC_ERR("invalid session struct"); 2339 return -EINVAL; 2340 } 2341 memset(session, 0, sizeof(dpaa_sec_session)); 2342 2343 /* Default IV length = 0 */ 2344 session->iv.length = 0; 2345 2346 /* Cipher Only */ 2347 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2348 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2349 ret = dpaa_sec_cipher_init(dev, xform, session); 2350 2351 /* Authentication Only */ 2352 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2353 xform->next == NULL) { 2354 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2355 session->ctxt = DPAA_SEC_AUTH; 2356 ret = dpaa_sec_auth_init(dev, xform, session); 2357 2358 /* Cipher then Authenticate */ 2359 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2360 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2361 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2362 session->auth_cipher_text = 1; 2363 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2364 ret = dpaa_sec_auth_init(dev, xform, session); 2365 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2366 ret = dpaa_sec_cipher_init(dev, xform, session); 2367 else 2368 ret = dpaa_sec_chain_init(dev, xform, session); 2369 } else { 2370 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2371 return -EINVAL; 2372 } 2373 /* Authenticate then Cipher */ 2374 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2375 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2376 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2377 session->auth_cipher_text = 0; 2378 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2379 ret = dpaa_sec_cipher_init(dev, xform, session); 2380 else if (xform->next->cipher.algo 2381 == RTE_CRYPTO_CIPHER_NULL) 2382 ret = dpaa_sec_auth_init(dev, xform, session); 2383 else 2384 ret = dpaa_sec_chain_init(dev, xform, session); 2385 } else { 2386 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2387 return -EINVAL; 2388 } 2389 2390 /* AEAD operation for AES-GCM kind of Algorithms */ 2391 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2392 xform->next == NULL) { 2393 ret = dpaa_sec_aead_init(dev, xform, session); 2394 2395 } else { 2396 DPAA_SEC_ERR("Invalid crypto type"); 2397 return -EINVAL; 2398 } 2399 if (ret) { 2400 DPAA_SEC_ERR("unable to init session"); 2401 goto err1; 2402 } 2403 2404 rte_spinlock_lock(&internals->lock); 2405 for (i = 0; i < MAX_DPAA_CORES; i++) { 2406 session->inq[i] = dpaa_sec_attach_rxq(internals); 2407 if (session->inq[i] == NULL) { 2408 DPAA_SEC_ERR("unable to attach sec queue"); 2409 rte_spinlock_unlock(&internals->lock); 2410 goto err1; 2411 } 2412 } 2413 rte_spinlock_unlock(&internals->lock); 2414 2415 return 0; 2416 2417 err1: 2418 rte_free(session->cipher_key.data); 2419 rte_free(session->auth_key.data); 2420 memset(session, 0, sizeof(dpaa_sec_session)); 2421 2422 return -EINVAL; 2423 } 2424 2425 static int 2426 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2427 struct rte_crypto_sym_xform *xform, 2428 struct rte_cryptodev_sym_session *sess, 2429 struct rte_mempool *mempool) 2430 { 2431 void *sess_private_data; 2432 int ret; 2433 2434 PMD_INIT_FUNC_TRACE(); 2435 2436 if (rte_mempool_get(mempool, &sess_private_data)) { 2437 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2438 return -ENOMEM; 2439 } 2440 2441 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2442 if (ret != 0) { 2443 DPAA_SEC_ERR("failed to configure session parameters"); 2444 2445 /* Return session to mempool */ 2446 rte_mempool_put(mempool, sess_private_data); 2447 return ret; 2448 } 2449 2450 set_sym_session_private_data(sess, dev->driver_id, 2451 sess_private_data); 2452 2453 2454 return 0; 2455 } 2456 2457 static inline void 2458 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2459 { 2460 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2461 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s); 2462 uint8_t i; 2463 2464 for (i = 0; i < MAX_DPAA_CORES; i++) { 2465 if (s->inq[i]) 2466 dpaa_sec_detach_rxq(qi, s->inq[i]); 2467 s->inq[i] = NULL; 2468 s->qp[i] = NULL; 2469 } 2470 rte_free(s->cipher_key.data); 2471 rte_free(s->auth_key.data); 2472 memset(s, 0, sizeof(dpaa_sec_session)); 2473 rte_mempool_put(sess_mp, (void *)s); 2474 } 2475 2476 /** Clear the memory of session so it doesn't leave key material behind */ 2477 static void 2478 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2479 struct rte_cryptodev_sym_session *sess) 2480 { 2481 PMD_INIT_FUNC_TRACE(); 2482 uint8_t index = dev->driver_id; 2483 void *sess_priv = get_sym_session_private_data(sess, index); 2484 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2485 2486 if (sess_priv) { 2487 free_session_memory(dev, s); 2488 set_sym_session_private_data(sess, index, NULL); 2489 } 2490 } 2491 2492 #ifdef RTE_LIBRTE_SECURITY 2493 static int 2494 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2495 struct rte_security_ipsec_xform *ipsec_xform, 2496 dpaa_sec_session *session) 2497 { 2498 PMD_INIT_FUNC_TRACE(); 2499 2500 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2501 RTE_CACHE_LINE_SIZE); 2502 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2503 DPAA_SEC_ERR("No Memory for aead key"); 2504 return -1; 2505 } 2506 memcpy(session->aead_key.data, aead_xform->key.data, 2507 aead_xform->key.length); 2508 2509 session->digest_length = aead_xform->digest_length; 2510 session->aead_key.length = aead_xform->key.length; 2511 2512 switch (aead_xform->algo) { 2513 case RTE_CRYPTO_AEAD_AES_GCM: 2514 switch (session->digest_length) { 2515 case 8: 2516 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2517 break; 2518 case 12: 2519 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2520 break; 2521 case 16: 2522 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2523 break; 2524 default: 2525 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2526 session->digest_length); 2527 return -1; 2528 } 2529 if (session->dir == DIR_ENC) { 2530 memcpy(session->encap_pdb.gcm.salt, 2531 (uint8_t *)&(ipsec_xform->salt), 4); 2532 } else { 2533 memcpy(session->decap_pdb.gcm.salt, 2534 (uint8_t *)&(ipsec_xform->salt), 4); 2535 } 2536 session->aead_key.algmode = OP_ALG_AAI_GCM; 2537 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2538 break; 2539 default: 2540 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2541 aead_xform->algo); 2542 return -1; 2543 } 2544 return 0; 2545 } 2546 2547 static int 2548 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2549 struct rte_crypto_auth_xform *auth_xform, 2550 struct rte_security_ipsec_xform *ipsec_xform, 2551 dpaa_sec_session *session) 2552 { 2553 if (cipher_xform) { 2554 session->cipher_key.data = rte_zmalloc(NULL, 2555 cipher_xform->key.length, 2556 RTE_CACHE_LINE_SIZE); 2557 if (session->cipher_key.data == NULL && 2558 cipher_xform->key.length > 0) { 2559 DPAA_SEC_ERR("No Memory for cipher key"); 2560 return -ENOMEM; 2561 } 2562 2563 session->cipher_key.length = cipher_xform->key.length; 2564 memcpy(session->cipher_key.data, cipher_xform->key.data, 2565 cipher_xform->key.length); 2566 session->cipher_alg = cipher_xform->algo; 2567 } else { 2568 session->cipher_key.data = NULL; 2569 session->cipher_key.length = 0; 2570 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2571 } 2572 2573 if (auth_xform) { 2574 session->auth_key.data = rte_zmalloc(NULL, 2575 auth_xform->key.length, 2576 RTE_CACHE_LINE_SIZE); 2577 if (session->auth_key.data == NULL && 2578 auth_xform->key.length > 0) { 2579 DPAA_SEC_ERR("No Memory for auth key"); 2580 return -ENOMEM; 2581 } 2582 session->auth_key.length = auth_xform->key.length; 2583 memcpy(session->auth_key.data, auth_xform->key.data, 2584 auth_xform->key.length); 2585 session->auth_alg = auth_xform->algo; 2586 session->digest_length = auth_xform->digest_length; 2587 } else { 2588 session->auth_key.data = NULL; 2589 session->auth_key.length = 0; 2590 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2591 } 2592 2593 switch (session->auth_alg) { 2594 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2595 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2596 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2597 break; 2598 case RTE_CRYPTO_AUTH_MD5_HMAC: 2599 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2600 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2601 break; 2602 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2603 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2604 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2605 if (session->digest_length != 16) 2606 DPAA_SEC_WARN( 2607 "+++Using sha256-hmac truncated len is non-standard," 2608 "it will not work with lookaside proto"); 2609 break; 2610 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2611 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2612 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2613 break; 2614 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2615 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2616 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2617 break; 2618 case RTE_CRYPTO_AUTH_AES_CMAC: 2619 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2620 break; 2621 case RTE_CRYPTO_AUTH_NULL: 2622 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2623 break; 2624 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2625 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2626 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2627 case RTE_CRYPTO_AUTH_SHA1: 2628 case RTE_CRYPTO_AUTH_SHA256: 2629 case RTE_CRYPTO_AUTH_SHA512: 2630 case RTE_CRYPTO_AUTH_SHA224: 2631 case RTE_CRYPTO_AUTH_SHA384: 2632 case RTE_CRYPTO_AUTH_MD5: 2633 case RTE_CRYPTO_AUTH_AES_GMAC: 2634 case RTE_CRYPTO_AUTH_KASUMI_F9: 2635 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2636 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2637 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2638 session->auth_alg); 2639 return -1; 2640 default: 2641 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", 2642 session->auth_alg); 2643 return -1; 2644 } 2645 2646 switch (session->cipher_alg) { 2647 case RTE_CRYPTO_CIPHER_AES_CBC: 2648 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2649 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2650 break; 2651 case RTE_CRYPTO_CIPHER_3DES_CBC: 2652 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2653 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2654 break; 2655 case RTE_CRYPTO_CIPHER_AES_CTR: 2656 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2657 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2658 if (session->dir == DIR_ENC) { 2659 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2660 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2661 } else { 2662 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2663 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2664 } 2665 break; 2666 case RTE_CRYPTO_CIPHER_NULL: 2667 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2668 break; 2669 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2670 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2671 case RTE_CRYPTO_CIPHER_3DES_ECB: 2672 case RTE_CRYPTO_CIPHER_AES_ECB: 2673 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2674 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2675 session->cipher_alg); 2676 return -1; 2677 default: 2678 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2679 session->cipher_alg); 2680 return -1; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static int 2687 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2688 struct rte_security_session_conf *conf, 2689 void *sess) 2690 { 2691 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2692 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2693 struct rte_crypto_auth_xform *auth_xform = NULL; 2694 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2695 struct rte_crypto_aead_xform *aead_xform = NULL; 2696 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2697 uint32_t i; 2698 int ret; 2699 2700 PMD_INIT_FUNC_TRACE(); 2701 2702 memset(session, 0, sizeof(dpaa_sec_session)); 2703 session->proto_alg = conf->protocol; 2704 session->ctxt = DPAA_SEC_IPSEC; 2705 2706 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2707 session->dir = DIR_ENC; 2708 else 2709 session->dir = DIR_DEC; 2710 2711 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2712 cipher_xform = &conf->crypto_xform->cipher; 2713 if (conf->crypto_xform->next) 2714 auth_xform = &conf->crypto_xform->next->auth; 2715 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2716 ipsec_xform, session); 2717 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2718 auth_xform = &conf->crypto_xform->auth; 2719 if (conf->crypto_xform->next) 2720 cipher_xform = &conf->crypto_xform->next->cipher; 2721 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2722 ipsec_xform, session); 2723 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2724 aead_xform = &conf->crypto_xform->aead; 2725 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2726 ipsec_xform, session); 2727 } else { 2728 DPAA_SEC_ERR("XFORM not specified"); 2729 ret = -EINVAL; 2730 goto out; 2731 } 2732 if (ret) { 2733 DPAA_SEC_ERR("Failed to process xform"); 2734 goto out; 2735 } 2736 2737 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2738 if (ipsec_xform->tunnel.type == 2739 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2740 session->ip4_hdr.ip_v = IPVERSION; 2741 session->ip4_hdr.ip_hl = 5; 2742 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2743 sizeof(session->ip4_hdr)); 2744 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2745 session->ip4_hdr.ip_id = 0; 2746 session->ip4_hdr.ip_off = 0; 2747 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2748 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2749 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2750 IPPROTO_ESP : IPPROTO_AH; 2751 session->ip4_hdr.ip_sum = 0; 2752 session->ip4_hdr.ip_src = 2753 ipsec_xform->tunnel.ipv4.src_ip; 2754 session->ip4_hdr.ip_dst = 2755 ipsec_xform->tunnel.ipv4.dst_ip; 2756 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2757 (void *)&session->ip4_hdr, 2758 sizeof(struct ip)); 2759 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2760 } else if (ipsec_xform->tunnel.type == 2761 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2762 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2763 DPAA_IPv6_DEFAULT_VTC_FLOW | 2764 ((ipsec_xform->tunnel.ipv6.dscp << 2765 RTE_IPV6_HDR_TC_SHIFT) & 2766 RTE_IPV6_HDR_TC_MASK) | 2767 ((ipsec_xform->tunnel.ipv6.flabel << 2768 RTE_IPV6_HDR_FL_SHIFT) & 2769 RTE_IPV6_HDR_FL_MASK)); 2770 /* Payload length will be updated by HW */ 2771 session->ip6_hdr.payload_len = 0; 2772 session->ip6_hdr.hop_limits = 2773 ipsec_xform->tunnel.ipv6.hlimit; 2774 session->ip6_hdr.proto = (ipsec_xform->proto == 2775 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2776 IPPROTO_ESP : IPPROTO_AH; 2777 memcpy(&session->ip6_hdr.src_addr, 2778 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2779 memcpy(&session->ip6_hdr.dst_addr, 2780 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2781 session->encap_pdb.ip_hdr_len = 2782 sizeof(struct rte_ipv6_hdr); 2783 } 2784 session->encap_pdb.options = 2785 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2786 PDBOPTS_ESP_OIHI_PDB_INL | 2787 PDBOPTS_ESP_IVSRC | 2788 PDBHMO_ESP_ENCAP_DTTL | 2789 PDBHMO_ESP_SNR; 2790 if (ipsec_xform->options.esn) 2791 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 2792 session->encap_pdb.spi = ipsec_xform->spi; 2793 2794 } else if (ipsec_xform->direction == 2795 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2796 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 2797 session->decap_pdb.options = sizeof(struct ip) << 16; 2798 else 2799 session->decap_pdb.options = 2800 sizeof(struct rte_ipv6_hdr) << 16; 2801 if (ipsec_xform->options.esn) 2802 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 2803 if (ipsec_xform->replay_win_sz) { 2804 uint32_t win_sz; 2805 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2806 2807 switch (win_sz) { 2808 case 1: 2809 case 2: 2810 case 4: 2811 case 8: 2812 case 16: 2813 case 32: 2814 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 2815 break; 2816 case 64: 2817 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 2818 break; 2819 default: 2820 session->decap_pdb.options |= 2821 PDBOPTS_ESP_ARS128; 2822 } 2823 } 2824 } else 2825 goto out; 2826 rte_spinlock_lock(&internals->lock); 2827 for (i = 0; i < MAX_DPAA_CORES; i++) { 2828 session->inq[i] = dpaa_sec_attach_rxq(internals); 2829 if (session->inq[i] == NULL) { 2830 DPAA_SEC_ERR("unable to attach sec queue"); 2831 rte_spinlock_unlock(&internals->lock); 2832 goto out; 2833 } 2834 } 2835 rte_spinlock_unlock(&internals->lock); 2836 2837 return 0; 2838 out: 2839 rte_free(session->auth_key.data); 2840 rte_free(session->cipher_key.data); 2841 memset(session, 0, sizeof(dpaa_sec_session)); 2842 return -1; 2843 } 2844 2845 static int 2846 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 2847 struct rte_security_session_conf *conf, 2848 void *sess) 2849 { 2850 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2851 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2852 struct rte_crypto_auth_xform *auth_xform = NULL; 2853 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2854 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2855 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 2856 uint32_t i; 2857 2858 PMD_INIT_FUNC_TRACE(); 2859 2860 memset(session, 0, sizeof(dpaa_sec_session)); 2861 2862 /* find xfrm types */ 2863 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2864 cipher_xform = &xform->cipher; 2865 if (xform->next != NULL) 2866 auth_xform = &xform->next->auth; 2867 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2868 auth_xform = &xform->auth; 2869 if (xform->next != NULL) 2870 cipher_xform = &xform->next->cipher; 2871 } else { 2872 DPAA_SEC_ERR("Invalid crypto type"); 2873 return -EINVAL; 2874 } 2875 2876 session->proto_alg = conf->protocol; 2877 session->ctxt = DPAA_SEC_PDCP; 2878 2879 if (cipher_xform) { 2880 switch (cipher_xform->algo) { 2881 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2882 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 2883 break; 2884 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2885 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 2886 break; 2887 case RTE_CRYPTO_CIPHER_AES_CTR: 2888 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 2889 break; 2890 case RTE_CRYPTO_CIPHER_NULL: 2891 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 2892 break; 2893 default: 2894 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2895 session->cipher_alg); 2896 return -1; 2897 } 2898 2899 session->cipher_key.data = rte_zmalloc(NULL, 2900 cipher_xform->key.length, 2901 RTE_CACHE_LINE_SIZE); 2902 if (session->cipher_key.data == NULL && 2903 cipher_xform->key.length > 0) { 2904 DPAA_SEC_ERR("No Memory for cipher key"); 2905 return -ENOMEM; 2906 } 2907 session->cipher_key.length = cipher_xform->key.length; 2908 memcpy(session->cipher_key.data, cipher_xform->key.data, 2909 cipher_xform->key.length); 2910 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2911 DIR_ENC : DIR_DEC; 2912 session->cipher_alg = cipher_xform->algo; 2913 } else { 2914 session->cipher_key.data = NULL; 2915 session->cipher_key.length = 0; 2916 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2917 session->dir = DIR_ENC; 2918 } 2919 2920 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2921 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 2922 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 2923 DPAA_SEC_ERR( 2924 "PDCP Seq Num size should be 5/12 bits for cmode"); 2925 goto out; 2926 } 2927 } 2928 2929 if (auth_xform) { 2930 switch (auth_xform->algo) { 2931 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2932 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 2933 break; 2934 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2935 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 2936 break; 2937 case RTE_CRYPTO_AUTH_AES_CMAC: 2938 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 2939 break; 2940 case RTE_CRYPTO_AUTH_NULL: 2941 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 2942 break; 2943 default: 2944 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2945 session->auth_alg); 2946 rte_free(session->cipher_key.data); 2947 return -1; 2948 } 2949 session->auth_key.data = rte_zmalloc(NULL, 2950 auth_xform->key.length, 2951 RTE_CACHE_LINE_SIZE); 2952 if (!session->auth_key.data && 2953 auth_xform->key.length > 0) { 2954 DPAA_SEC_ERR("No Memory for auth key"); 2955 rte_free(session->cipher_key.data); 2956 return -ENOMEM; 2957 } 2958 session->auth_key.length = auth_xform->key.length; 2959 memcpy(session->auth_key.data, auth_xform->key.data, 2960 auth_xform->key.length); 2961 session->auth_alg = auth_xform->algo; 2962 } else { 2963 session->auth_key.data = NULL; 2964 session->auth_key.length = 0; 2965 session->auth_alg = 0; 2966 } 2967 session->pdcp.domain = pdcp_xform->domain; 2968 session->pdcp.bearer = pdcp_xform->bearer; 2969 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2970 session->pdcp.sn_size = pdcp_xform->sn_size; 2971 session->pdcp.hfn = pdcp_xform->hfn; 2972 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2973 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 2974 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 2975 2976 rte_spinlock_lock(&dev_priv->lock); 2977 for (i = 0; i < MAX_DPAA_CORES; i++) { 2978 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 2979 if (session->inq[i] == NULL) { 2980 DPAA_SEC_ERR("unable to attach sec queue"); 2981 rte_spinlock_unlock(&dev_priv->lock); 2982 goto out; 2983 } 2984 } 2985 rte_spinlock_unlock(&dev_priv->lock); 2986 return 0; 2987 out: 2988 rte_free(session->auth_key.data); 2989 rte_free(session->cipher_key.data); 2990 memset(session, 0, sizeof(dpaa_sec_session)); 2991 return -1; 2992 } 2993 2994 static int 2995 dpaa_sec_security_session_create(void *dev, 2996 struct rte_security_session_conf *conf, 2997 struct rte_security_session *sess, 2998 struct rte_mempool *mempool) 2999 { 3000 void *sess_private_data; 3001 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3002 int ret; 3003 3004 if (rte_mempool_get(mempool, &sess_private_data)) { 3005 DPAA_SEC_ERR("Couldn't get object from session mempool"); 3006 return -ENOMEM; 3007 } 3008 3009 switch (conf->protocol) { 3010 case RTE_SECURITY_PROTOCOL_IPSEC: 3011 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3012 sess_private_data); 3013 break; 3014 case RTE_SECURITY_PROTOCOL_PDCP: 3015 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3016 sess_private_data); 3017 break; 3018 case RTE_SECURITY_PROTOCOL_MACSEC: 3019 return -ENOTSUP; 3020 default: 3021 return -EINVAL; 3022 } 3023 if (ret != 0) { 3024 DPAA_SEC_ERR("failed to configure session parameters"); 3025 /* Return session to mempool */ 3026 rte_mempool_put(mempool, sess_private_data); 3027 return ret; 3028 } 3029 3030 set_sec_session_private_data(sess, sess_private_data); 3031 3032 return ret; 3033 } 3034 3035 /** Clear the memory of session so it doesn't leave key material behind */ 3036 static int 3037 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3038 struct rte_security_session *sess) 3039 { 3040 PMD_INIT_FUNC_TRACE(); 3041 void *sess_priv = get_sec_session_private_data(sess); 3042 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3043 3044 if (sess_priv) { 3045 free_session_memory((struct rte_cryptodev *)dev, s); 3046 set_sec_session_private_data(sess, NULL); 3047 } 3048 return 0; 3049 } 3050 #endif 3051 static int 3052 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3053 struct rte_cryptodev_config *config __rte_unused) 3054 { 3055 PMD_INIT_FUNC_TRACE(); 3056 3057 return 0; 3058 } 3059 3060 static int 3061 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3062 { 3063 PMD_INIT_FUNC_TRACE(); 3064 return 0; 3065 } 3066 3067 static void 3068 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3069 { 3070 PMD_INIT_FUNC_TRACE(); 3071 } 3072 3073 static int 3074 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3075 { 3076 PMD_INIT_FUNC_TRACE(); 3077 3078 if (dev == NULL) 3079 return -ENOMEM; 3080 3081 return 0; 3082 } 3083 3084 static void 3085 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3086 struct rte_cryptodev_info *info) 3087 { 3088 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3089 3090 PMD_INIT_FUNC_TRACE(); 3091 if (info != NULL) { 3092 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3093 info->feature_flags = dev->feature_flags; 3094 info->capabilities = dpaa_sec_capabilities; 3095 info->sym.max_nb_sessions = internals->max_nb_sessions; 3096 info->driver_id = cryptodev_driver_id; 3097 } 3098 } 3099 3100 static enum qman_cb_dqrr_result 3101 dpaa_sec_process_parallel_event(void *event, 3102 struct qman_portal *qm __always_unused, 3103 struct qman_fq *outq, 3104 const struct qm_dqrr_entry *dqrr, 3105 void **bufs) 3106 { 3107 const struct qm_fd *fd; 3108 struct dpaa_sec_job *job; 3109 struct dpaa_sec_op_ctx *ctx; 3110 struct rte_event *ev = (struct rte_event *)event; 3111 3112 fd = &dqrr->fd; 3113 3114 /* sg is embedded in an op ctx, 3115 * sg[0] is for output 3116 * sg[1] for input 3117 */ 3118 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3119 3120 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3121 ctx->fd_status = fd->status; 3122 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3123 struct qm_sg_entry *sg_out; 3124 uint32_t len; 3125 3126 sg_out = &job->sg[0]; 3127 hw_sg_to_cpu(sg_out); 3128 len = sg_out->length; 3129 ctx->op->sym->m_src->pkt_len = len; 3130 ctx->op->sym->m_src->data_len = len; 3131 } 3132 if (!ctx->fd_status) { 3133 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3134 } else { 3135 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3136 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3137 } 3138 ev->event_ptr = (void *)ctx->op; 3139 3140 ev->flow_id = outq->ev.flow_id; 3141 ev->sub_event_type = outq->ev.sub_event_type; 3142 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3143 ev->op = RTE_EVENT_OP_NEW; 3144 ev->sched_type = outq->ev.sched_type; 3145 ev->queue_id = outq->ev.queue_id; 3146 ev->priority = outq->ev.priority; 3147 *bufs = (void *)ctx->op; 3148 3149 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3150 3151 return qman_cb_dqrr_consume; 3152 } 3153 3154 static enum qman_cb_dqrr_result 3155 dpaa_sec_process_atomic_event(void *event, 3156 struct qman_portal *qm __rte_unused, 3157 struct qman_fq *outq, 3158 const struct qm_dqrr_entry *dqrr, 3159 void **bufs) 3160 { 3161 u8 index; 3162 const struct qm_fd *fd; 3163 struct dpaa_sec_job *job; 3164 struct dpaa_sec_op_ctx *ctx; 3165 struct rte_event *ev = (struct rte_event *)event; 3166 3167 fd = &dqrr->fd; 3168 3169 /* sg is embedded in an op ctx, 3170 * sg[0] is for output 3171 * sg[1] for input 3172 */ 3173 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3174 3175 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3176 ctx->fd_status = fd->status; 3177 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3178 struct qm_sg_entry *sg_out; 3179 uint32_t len; 3180 3181 sg_out = &job->sg[0]; 3182 hw_sg_to_cpu(sg_out); 3183 len = sg_out->length; 3184 ctx->op->sym->m_src->pkt_len = len; 3185 ctx->op->sym->m_src->data_len = len; 3186 } 3187 if (!ctx->fd_status) { 3188 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3189 } else { 3190 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3191 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3192 } 3193 ev->event_ptr = (void *)ctx->op; 3194 ev->flow_id = outq->ev.flow_id; 3195 ev->sub_event_type = outq->ev.sub_event_type; 3196 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3197 ev->op = RTE_EVENT_OP_NEW; 3198 ev->sched_type = outq->ev.sched_type; 3199 ev->queue_id = outq->ev.queue_id; 3200 ev->priority = outq->ev.priority; 3201 3202 /* Save active dqrr entries */ 3203 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3204 DPAA_PER_LCORE_DQRR_SIZE++; 3205 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3206 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3207 ev->impl_opaque = index + 1; 3208 ctx->op->sym->m_src->seqn = (uint32_t)index + 1; 3209 *bufs = (void *)ctx->op; 3210 3211 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3212 3213 return qman_cb_dqrr_defer; 3214 } 3215 3216 int 3217 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3218 int qp_id, 3219 uint16_t ch_id, 3220 const struct rte_event *event) 3221 { 3222 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3223 struct qm_mcc_initfq opts = {0}; 3224 3225 int ret; 3226 3227 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3228 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3229 opts.fqd.dest.channel = ch_id; 3230 3231 switch (event->sched_type) { 3232 case RTE_SCHED_TYPE_ATOMIC: 3233 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3234 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3235 * configuration with HOLD_ACTIVE setting 3236 */ 3237 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3238 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3239 break; 3240 case RTE_SCHED_TYPE_ORDERED: 3241 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3242 return -1; 3243 default: 3244 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3245 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3246 break; 3247 } 3248 3249 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3250 if (unlikely(ret)) { 3251 DPAA_SEC_ERR("unable to init caam source fq!"); 3252 return ret; 3253 } 3254 3255 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3256 3257 return 0; 3258 } 3259 3260 int 3261 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3262 int qp_id) 3263 { 3264 struct qm_mcc_initfq opts = {0}; 3265 int ret; 3266 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3267 3268 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3269 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3270 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3271 qp->outq.cb.ern = ern_sec_fq_handler; 3272 qman_retire_fq(&qp->outq, NULL); 3273 qman_oos_fq(&qp->outq); 3274 ret = qman_init_fq(&qp->outq, 0, &opts); 3275 if (ret) 3276 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret); 3277 qp->outq.cb.dqrr = NULL; 3278 3279 return ret; 3280 } 3281 3282 static struct rte_cryptodev_ops crypto_ops = { 3283 .dev_configure = dpaa_sec_dev_configure, 3284 .dev_start = dpaa_sec_dev_start, 3285 .dev_stop = dpaa_sec_dev_stop, 3286 .dev_close = dpaa_sec_dev_close, 3287 .dev_infos_get = dpaa_sec_dev_infos_get, 3288 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3289 .queue_pair_release = dpaa_sec_queue_pair_release, 3290 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3291 .sym_session_configure = dpaa_sec_sym_session_configure, 3292 .sym_session_clear = dpaa_sec_sym_session_clear 3293 }; 3294 3295 #ifdef RTE_LIBRTE_SECURITY 3296 static const struct rte_security_capability * 3297 dpaa_sec_capabilities_get(void *device __rte_unused) 3298 { 3299 return dpaa_sec_security_cap; 3300 } 3301 3302 static const struct rte_security_ops dpaa_sec_security_ops = { 3303 .session_create = dpaa_sec_security_session_create, 3304 .session_update = NULL, 3305 .session_stats_get = NULL, 3306 .session_destroy = dpaa_sec_security_session_destroy, 3307 .set_pkt_metadata = NULL, 3308 .capabilities_get = dpaa_sec_capabilities_get 3309 }; 3310 #endif 3311 static int 3312 dpaa_sec_uninit(struct rte_cryptodev *dev) 3313 { 3314 struct dpaa_sec_dev_private *internals; 3315 3316 if (dev == NULL) 3317 return -ENODEV; 3318 3319 internals = dev->data->dev_private; 3320 rte_free(dev->security_ctx); 3321 3322 rte_free(internals); 3323 3324 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3325 dev->data->name, rte_socket_id()); 3326 3327 return 0; 3328 } 3329 3330 static int 3331 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3332 { 3333 struct dpaa_sec_dev_private *internals; 3334 #ifdef RTE_LIBRTE_SECURITY 3335 struct rte_security_ctx *security_instance; 3336 #endif 3337 struct dpaa_sec_qp *qp; 3338 uint32_t i, flags; 3339 int ret; 3340 3341 PMD_INIT_FUNC_TRACE(); 3342 3343 cryptodev->driver_id = cryptodev_driver_id; 3344 cryptodev->dev_ops = &crypto_ops; 3345 3346 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3347 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3348 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3349 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3350 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3351 RTE_CRYPTODEV_FF_SECURITY | 3352 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3353 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3354 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3355 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3356 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3357 3358 internals = cryptodev->data->dev_private; 3359 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3360 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3361 3362 /* 3363 * For secondary processes, we don't initialise any further as primary 3364 * has already done this work. Only check we don't need a different 3365 * RX function 3366 */ 3367 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3368 DPAA_SEC_WARN("Device already init by primary process"); 3369 return 0; 3370 } 3371 #ifdef RTE_LIBRTE_SECURITY 3372 /* Initialize security_ctx only for primary process*/ 3373 security_instance = rte_malloc("rte_security_instances_ops", 3374 sizeof(struct rte_security_ctx), 0); 3375 if (security_instance == NULL) 3376 return -ENOMEM; 3377 security_instance->device = (void *)cryptodev; 3378 security_instance->ops = &dpaa_sec_security_ops; 3379 security_instance->sess_cnt = 0; 3380 cryptodev->security_ctx = security_instance; 3381 #endif 3382 rte_spinlock_init(&internals->lock); 3383 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3384 /* init qman fq for queue pair */ 3385 qp = &internals->qps[i]; 3386 ret = dpaa_sec_init_tx(&qp->outq); 3387 if (ret) { 3388 DPAA_SEC_ERR("config tx of queue pair %d", i); 3389 goto init_error; 3390 } 3391 } 3392 3393 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3394 QMAN_FQ_FLAG_TO_DCPORTAL; 3395 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3396 /* create rx qman fq for sessions*/ 3397 ret = qman_create_fq(0, flags, &internals->inq[i]); 3398 if (unlikely(ret != 0)) { 3399 DPAA_SEC_ERR("sec qman_create_fq failed"); 3400 goto init_error; 3401 } 3402 } 3403 3404 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); 3405 return 0; 3406 3407 init_error: 3408 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3409 3410 dpaa_sec_uninit(cryptodev); 3411 return -EFAULT; 3412 } 3413 3414 static int 3415 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3416 struct rte_dpaa_device *dpaa_dev) 3417 { 3418 struct rte_cryptodev *cryptodev; 3419 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3420 3421 int retval; 3422 3423 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3424 3425 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3426 if (cryptodev == NULL) 3427 return -ENOMEM; 3428 3429 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3430 cryptodev->data->dev_private = rte_zmalloc_socket( 3431 "cryptodev private structure", 3432 sizeof(struct dpaa_sec_dev_private), 3433 RTE_CACHE_LINE_SIZE, 3434 rte_socket_id()); 3435 3436 if (cryptodev->data->dev_private == NULL) 3437 rte_panic("Cannot allocate memzone for private " 3438 "device data"); 3439 } 3440 3441 dpaa_dev->crypto_dev = cryptodev; 3442 cryptodev->device = &dpaa_dev->device; 3443 3444 /* init user callbacks */ 3445 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3446 3447 /* if sec device version is not configured */ 3448 if (!rta_get_sec_era()) { 3449 const struct device_node *caam_node; 3450 3451 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3452 const uint32_t *prop = of_get_property(caam_node, 3453 "fsl,sec-era", 3454 NULL); 3455 if (prop) { 3456 rta_set_sec_era( 3457 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3458 break; 3459 } 3460 } 3461 } 3462 3463 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 3464 retval = rte_dpaa_portal_init((void *)1); 3465 if (retval) { 3466 DPAA_SEC_ERR("Unable to initialize portal"); 3467 return retval; 3468 } 3469 } 3470 3471 /* Invoke PMD device initialization function */ 3472 retval = dpaa_sec_dev_init(cryptodev); 3473 if (retval == 0) 3474 return 0; 3475 3476 /* In case of error, cleanup is done */ 3477 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3478 rte_free(cryptodev->data->dev_private); 3479 3480 rte_cryptodev_pmd_release_device(cryptodev); 3481 3482 return -ENXIO; 3483 } 3484 3485 static int 3486 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3487 { 3488 struct rte_cryptodev *cryptodev; 3489 int ret; 3490 3491 cryptodev = dpaa_dev->crypto_dev; 3492 if (cryptodev == NULL) 3493 return -ENODEV; 3494 3495 ret = dpaa_sec_uninit(cryptodev); 3496 if (ret) 3497 return ret; 3498 3499 return rte_cryptodev_pmd_destroy(cryptodev); 3500 } 3501 3502 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3503 .drv_type = FSL_DPAA_CRYPTO, 3504 .driver = { 3505 .name = "DPAA SEC PMD" 3506 }, 3507 .probe = cryptodev_dpaa_sec_probe, 3508 .remove = cryptodev_dpaa_sec_remove, 3509 }; 3510 3511 static struct cryptodev_driver dpaa_sec_crypto_drv; 3512 3513 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3514 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3515 cryptodev_driver_id); 3516 3517 RTE_INIT(dpaa_sec_init_log) 3518 { 3519 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); 3520 if (dpaa_logtype_sec >= 0) 3521 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); 3522 } 3523