1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #ifdef RTE_LIB_SECURITY 19 #include <rte_security_driver.h> 20 #endif 21 #include <rte_cycles.h> 22 #include <rte_dev.h> 23 #include <rte_ip.h> 24 #include <rte_kvargs.h> 25 #include <rte_malloc.h> 26 #include <rte_mbuf.h> 27 #include <rte_memcpy.h> 28 #include <rte_string_fns.h> 29 #include <rte_spinlock.h> 30 31 #include <fsl_usd.h> 32 #include <fsl_qman.h> 33 #include <dpaa_of.h> 34 35 /* RTA header files */ 36 #include <desc/common.h> 37 #include <desc/algo.h> 38 #include <desc/ipsec.h> 39 #include <desc/pdcp.h> 40 #include <desc/sdap.h> 41 42 #include <rte_dpaa_bus.h> 43 #include <dpaa_sec.h> 44 #include <dpaa_sec_event.h> 45 #include <dpaa_sec_log.h> 46 #include <dpaax_iova_table.h> 47 48 static uint8_t cryptodev_driver_id; 49 50 static int 51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 52 53 static inline void 54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 55 { 56 if (!ctx->fd_status) { 57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 58 } else { 59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 61 } 62 } 63 64 static inline struct dpaa_sec_op_ctx * 65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 66 { 67 struct dpaa_sec_op_ctx *ctx; 68 int i, retval; 69 70 retval = rte_mempool_get( 71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 72 (void **)(&ctx)); 73 if (!ctx || retval) { 74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 75 return NULL; 76 } 77 /* 78 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 81 * each packet, memset is costlier than dcbz_64(). 82 */ 83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 84 dcbz_64(&ctx->job.sg[i]); 85 86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 88 89 return ctx; 90 } 91 92 static void 93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 94 struct qman_fq *fq, 95 const struct qm_mr_entry *msg) 96 { 97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 98 fq->fqid, msg->ern.rc, msg->ern.seqnum); 99 } 100 101 /* initialize the queue with dest chan as caam chan so that 102 * all the packets in this queue could be dispatched into caam 103 */ 104 static int 105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 106 uint32_t fqid_out) 107 { 108 struct qm_mcc_initfq fq_opts; 109 uint32_t flags; 110 int ret = -1; 111 112 /* Clear FQ options */ 113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 114 115 flags = QMAN_INITFQ_FLAG_SCHED; 116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 117 QM_INITFQ_WE_CONTEXTB; 118 119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 120 fq_opts.fqd.context_b = fqid_out; 121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam(); 122 fq_opts.fqd.dest.wq = 0; 123 124 fq_in->cb.ern = ern_sec_fq_handler; 125 126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 127 128 ret = qman_init_fq(fq_in, flags, &fq_opts); 129 if (unlikely(ret != 0)) 130 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 131 132 return ret; 133 } 134 135 /* something is put into in_fq and caam put the crypto result into out_fq */ 136 static enum qman_cb_dqrr_result 137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 138 struct qman_fq *fq __always_unused, 139 const struct qm_dqrr_entry *dqrr) 140 { 141 const struct qm_fd *fd; 142 struct dpaa_sec_job *job; 143 struct dpaa_sec_op_ctx *ctx; 144 145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST) 146 return qman_cb_dqrr_defer; 147 148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 149 return qman_cb_dqrr_consume; 150 151 fd = &dqrr->fd; 152 /* sg is embedded in an op ctx, 153 * sg[0] is for output 154 * sg[1] for input 155 */ 156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 157 158 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 159 ctx->fd_status = fd->status; 160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 161 struct qm_sg_entry *sg_out; 162 uint32_t len; 163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 164 ctx->op->sym->m_src : ctx->op->sym->m_dst; 165 166 sg_out = &job->sg[0]; 167 hw_sg_to_cpu(sg_out); 168 len = sg_out->length; 169 mbuf->pkt_len = len; 170 while (mbuf->next != NULL) { 171 len -= mbuf->data_len; 172 mbuf = mbuf->next; 173 } 174 mbuf->data_len = len; 175 } 176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op; 177 dpaa_sec_op_ending(ctx); 178 179 return qman_cb_dqrr_consume; 180 } 181 182 /* caam result is put into this queue */ 183 static int 184 dpaa_sec_init_tx(struct qman_fq *fq) 185 { 186 int ret; 187 struct qm_mcc_initfq opts; 188 uint32_t flags; 189 190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 191 QMAN_FQ_FLAG_DYNAMIC_FQID; 192 193 ret = qman_create_fq(0, flags, fq); 194 if (unlikely(ret)) { 195 DPAA_SEC_ERR("qman_create_fq failed"); 196 return ret; 197 } 198 199 memset(&opts, 0, sizeof(opts)); 200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 202 203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 204 205 fq->cb.dqrr = dqrr_out_fq_cb_rx; 206 fq->cb.ern = ern_sec_fq_handler; 207 208 ret = qman_init_fq(fq, 0, &opts); 209 if (unlikely(ret)) { 210 DPAA_SEC_ERR("unable to init caam source fq!"); 211 return ret; 212 } 213 214 return ret; 215 } 216 217 static inline int is_aead(dpaa_sec_session *ses) 218 { 219 return ((ses->cipher_alg == 0) && 220 (ses->auth_alg == 0) && 221 (ses->aead_alg != 0)); 222 } 223 224 static inline int is_encode(dpaa_sec_session *ses) 225 { 226 return ses->dir == DIR_ENC; 227 } 228 229 static inline int is_decode(dpaa_sec_session *ses) 230 { 231 return ses->dir == DIR_DEC; 232 } 233 234 #ifdef RTE_LIB_SECURITY 235 static int 236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 237 { 238 struct alginfo authdata = {0}, cipherdata = {0}; 239 struct sec_cdb *cdb = &ses->cdb; 240 struct alginfo *p_authdata = NULL; 241 int32_t shared_desc_len = 0; 242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 243 int swap = false; 244 #else 245 int swap = true; 246 #endif 247 248 cipherdata.key = (size_t)ses->cipher_key.data; 249 cipherdata.keylen = ses->cipher_key.length; 250 cipherdata.key_enc_flags = 0; 251 cipherdata.key_type = RTA_DATA_IMM; 252 cipherdata.algtype = ses->cipher_key.alg; 253 cipherdata.algmode = ses->cipher_key.algmode; 254 255 if (ses->auth_alg) { 256 authdata.key = (size_t)ses->auth_key.data; 257 authdata.keylen = ses->auth_key.length; 258 authdata.key_enc_flags = 0; 259 authdata.key_type = RTA_DATA_IMM; 260 authdata.algtype = ses->auth_key.alg; 261 authdata.algmode = ses->auth_key.algmode; 262 263 p_authdata = &authdata; 264 } 265 266 if (rta_inline_pdcp_query(authdata.algtype, 267 cipherdata.algtype, 268 ses->pdcp.sn_size, 269 ses->pdcp.hfn_ovd)) { 270 cipherdata.key = 271 (size_t)rte_dpaa_mem_vtop((void *) 272 (size_t)cipherdata.key); 273 cipherdata.key_type = RTA_DATA_PTR; 274 } 275 276 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 277 if (ses->dir == DIR_ENC) 278 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 279 cdb->sh_desc, 1, swap, 280 ses->pdcp.hfn, 281 ses->pdcp.sn_size, 282 ses->pdcp.bearer, 283 ses->pdcp.pkt_dir, 284 ses->pdcp.hfn_threshold, 285 &cipherdata, &authdata, 286 0); 287 else if (ses->dir == DIR_DEC) 288 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 289 cdb->sh_desc, 1, swap, 290 ses->pdcp.hfn, 291 ses->pdcp.sn_size, 292 ses->pdcp.bearer, 293 ses->pdcp.pkt_dir, 294 ses->pdcp.hfn_threshold, 295 &cipherdata, &authdata, 296 0); 297 } else { 298 if (ses->dir == DIR_ENC) { 299 if (ses->pdcp.sdap_enabled) 300 shared_desc_len = 301 cnstr_shdsc_pdcp_sdap_u_plane_encap( 302 cdb->sh_desc, 1, swap, 303 ses->pdcp.sn_size, 304 ses->pdcp.hfn, 305 ses->pdcp.bearer, 306 ses->pdcp.pkt_dir, 307 ses->pdcp.hfn_threshold, 308 &cipherdata, p_authdata, 0); 309 else 310 shared_desc_len = 311 cnstr_shdsc_pdcp_u_plane_encap( 312 cdb->sh_desc, 1, swap, 313 ses->pdcp.sn_size, 314 ses->pdcp.hfn, 315 ses->pdcp.bearer, 316 ses->pdcp.pkt_dir, 317 ses->pdcp.hfn_threshold, 318 &cipherdata, p_authdata, 0); 319 } else if (ses->dir == DIR_DEC) { 320 if (ses->pdcp.sdap_enabled) 321 shared_desc_len = 322 cnstr_shdsc_pdcp_sdap_u_plane_decap( 323 cdb->sh_desc, 1, swap, 324 ses->pdcp.sn_size, 325 ses->pdcp.hfn, 326 ses->pdcp.bearer, 327 ses->pdcp.pkt_dir, 328 ses->pdcp.hfn_threshold, 329 &cipherdata, p_authdata, 0); 330 else 331 shared_desc_len = 332 cnstr_shdsc_pdcp_u_plane_decap( 333 cdb->sh_desc, 1, swap, 334 ses->pdcp.sn_size, 335 ses->pdcp.hfn, 336 ses->pdcp.bearer, 337 ses->pdcp.pkt_dir, 338 ses->pdcp.hfn_threshold, 339 &cipherdata, p_authdata, 0); 340 } 341 } 342 return shared_desc_len; 343 } 344 345 /* prepare ipsec proto command block of the session */ 346 static int 347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 348 { 349 struct alginfo cipherdata = {0}, authdata = {0}; 350 struct sec_cdb *cdb = &ses->cdb; 351 int32_t shared_desc_len = 0; 352 int err; 353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 354 int swap = false; 355 #else 356 int swap = true; 357 #endif 358 359 cipherdata.key = (size_t)ses->cipher_key.data; 360 cipherdata.keylen = ses->cipher_key.length; 361 cipherdata.key_enc_flags = 0; 362 cipherdata.key_type = RTA_DATA_IMM; 363 cipherdata.algtype = ses->cipher_key.alg; 364 cipherdata.algmode = ses->cipher_key.algmode; 365 366 if (ses->auth_key.length) { 367 authdata.key = (size_t)ses->auth_key.data; 368 authdata.keylen = ses->auth_key.length; 369 authdata.key_enc_flags = 0; 370 authdata.key_type = RTA_DATA_IMM; 371 authdata.algtype = ses->auth_key.alg; 372 authdata.algmode = ses->auth_key.algmode; 373 } 374 375 cdb->sh_desc[0] = cipherdata.keylen; 376 cdb->sh_desc[1] = authdata.keylen; 377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 378 DESC_JOB_IO_LEN, 379 (unsigned int *)cdb->sh_desc, 380 &cdb->sh_desc[2], 2); 381 382 if (err < 0) { 383 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 384 return err; 385 } 386 if (cdb->sh_desc[2] & 1) 387 cipherdata.key_type = RTA_DATA_IMM; 388 else { 389 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 390 (void *)(size_t)cipherdata.key); 391 cipherdata.key_type = RTA_DATA_PTR; 392 } 393 if (cdb->sh_desc[2] & (1<<1)) 394 authdata.key_type = RTA_DATA_IMM; 395 else { 396 authdata.key = (size_t)rte_dpaa_mem_vtop( 397 (void *)(size_t)authdata.key); 398 authdata.key_type = RTA_DATA_PTR; 399 } 400 401 cdb->sh_desc[0] = 0; 402 cdb->sh_desc[1] = 0; 403 cdb->sh_desc[2] = 0; 404 if (ses->dir == DIR_ENC) { 405 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 406 cdb->sh_desc, 407 true, swap, SHR_SERIAL, 408 &ses->encap_pdb, 409 (uint8_t *)&ses->ip4_hdr, 410 &cipherdata, &authdata); 411 } else if (ses->dir == DIR_DEC) { 412 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 413 cdb->sh_desc, 414 true, swap, SHR_SERIAL, 415 &ses->decap_pdb, 416 &cipherdata, &authdata); 417 } 418 return shared_desc_len; 419 } 420 #endif 421 /* prepare command block of the session */ 422 static int 423 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 424 { 425 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 426 int32_t shared_desc_len = 0; 427 struct sec_cdb *cdb = &ses->cdb; 428 int err; 429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 430 int swap = false; 431 #else 432 int swap = true; 433 #endif 434 435 memset(cdb, 0, sizeof(struct sec_cdb)); 436 437 switch (ses->ctxt) { 438 #ifdef RTE_LIB_SECURITY 439 case DPAA_SEC_IPSEC: 440 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 441 break; 442 case DPAA_SEC_PDCP: 443 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 444 break; 445 #endif 446 case DPAA_SEC_CIPHER: 447 alginfo_c.key = (size_t)ses->cipher_key.data; 448 alginfo_c.keylen = ses->cipher_key.length; 449 alginfo_c.key_enc_flags = 0; 450 alginfo_c.key_type = RTA_DATA_IMM; 451 alginfo_c.algtype = ses->cipher_key.alg; 452 alginfo_c.algmode = ses->cipher_key.algmode; 453 454 switch (ses->cipher_alg) { 455 case RTE_CRYPTO_CIPHER_AES_CBC: 456 case RTE_CRYPTO_CIPHER_3DES_CBC: 457 case RTE_CRYPTO_CIPHER_AES_CTR: 458 case RTE_CRYPTO_CIPHER_3DES_CTR: 459 shared_desc_len = cnstr_shdsc_blkcipher( 460 cdb->sh_desc, true, 461 swap, SHR_NEVER, &alginfo_c, 462 ses->iv.length, 463 ses->dir); 464 break; 465 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 466 shared_desc_len = cnstr_shdsc_snow_f8( 467 cdb->sh_desc, true, swap, 468 &alginfo_c, 469 ses->dir); 470 break; 471 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 472 shared_desc_len = cnstr_shdsc_zuce( 473 cdb->sh_desc, true, swap, 474 &alginfo_c, 475 ses->dir); 476 break; 477 default: 478 DPAA_SEC_ERR("unsupported cipher alg %d", 479 ses->cipher_alg); 480 return -ENOTSUP; 481 } 482 break; 483 case DPAA_SEC_AUTH: 484 alginfo_a.key = (size_t)ses->auth_key.data; 485 alginfo_a.keylen = ses->auth_key.length; 486 alginfo_a.key_enc_flags = 0; 487 alginfo_a.key_type = RTA_DATA_IMM; 488 alginfo_a.algtype = ses->auth_key.alg; 489 alginfo_a.algmode = ses->auth_key.algmode; 490 switch (ses->auth_alg) { 491 case RTE_CRYPTO_AUTH_MD5_HMAC: 492 case RTE_CRYPTO_AUTH_SHA1_HMAC: 493 case RTE_CRYPTO_AUTH_SHA224_HMAC: 494 case RTE_CRYPTO_AUTH_SHA256_HMAC: 495 case RTE_CRYPTO_AUTH_SHA384_HMAC: 496 case RTE_CRYPTO_AUTH_SHA512_HMAC: 497 shared_desc_len = cnstr_shdsc_hmac( 498 cdb->sh_desc, true, 499 swap, SHR_NEVER, &alginfo_a, 500 !ses->dir, 501 ses->digest_length); 502 break; 503 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 504 shared_desc_len = cnstr_shdsc_snow_f9( 505 cdb->sh_desc, true, swap, 506 &alginfo_a, 507 !ses->dir, 508 ses->digest_length); 509 break; 510 case RTE_CRYPTO_AUTH_ZUC_EIA3: 511 shared_desc_len = cnstr_shdsc_zuca( 512 cdb->sh_desc, true, swap, 513 &alginfo_a, 514 !ses->dir, 515 ses->digest_length); 516 break; 517 default: 518 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); 519 } 520 break; 521 case DPAA_SEC_AEAD: 522 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 523 DPAA_SEC_ERR("not supported aead alg"); 524 return -ENOTSUP; 525 } 526 alginfo.key = (size_t)ses->aead_key.data; 527 alginfo.keylen = ses->aead_key.length; 528 alginfo.key_enc_flags = 0; 529 alginfo.key_type = RTA_DATA_IMM; 530 alginfo.algtype = ses->aead_key.alg; 531 alginfo.algmode = ses->aead_key.algmode; 532 533 if (ses->dir == DIR_ENC) 534 shared_desc_len = cnstr_shdsc_gcm_encap( 535 cdb->sh_desc, true, swap, SHR_NEVER, 536 &alginfo, 537 ses->iv.length, 538 ses->digest_length); 539 else 540 shared_desc_len = cnstr_shdsc_gcm_decap( 541 cdb->sh_desc, true, swap, SHR_NEVER, 542 &alginfo, 543 ses->iv.length, 544 ses->digest_length); 545 break; 546 case DPAA_SEC_CIPHER_HASH: 547 alginfo_c.key = (size_t)ses->cipher_key.data; 548 alginfo_c.keylen = ses->cipher_key.length; 549 alginfo_c.key_enc_flags = 0; 550 alginfo_c.key_type = RTA_DATA_IMM; 551 alginfo_c.algtype = ses->cipher_key.alg; 552 alginfo_c.algmode = ses->cipher_key.algmode; 553 554 alginfo_a.key = (size_t)ses->auth_key.data; 555 alginfo_a.keylen = ses->auth_key.length; 556 alginfo_a.key_enc_flags = 0; 557 alginfo_a.key_type = RTA_DATA_IMM; 558 alginfo_a.algtype = ses->auth_key.alg; 559 alginfo_a.algmode = ses->auth_key.algmode; 560 561 cdb->sh_desc[0] = alginfo_c.keylen; 562 cdb->sh_desc[1] = alginfo_a.keylen; 563 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 564 DESC_JOB_IO_LEN, 565 (unsigned int *)cdb->sh_desc, 566 &cdb->sh_desc[2], 2); 567 568 if (err < 0) { 569 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 570 return err; 571 } 572 if (cdb->sh_desc[2] & 1) 573 alginfo_c.key_type = RTA_DATA_IMM; 574 else { 575 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 576 (void *)(size_t)alginfo_c.key); 577 alginfo_c.key_type = RTA_DATA_PTR; 578 } 579 if (cdb->sh_desc[2] & (1<<1)) 580 alginfo_a.key_type = RTA_DATA_IMM; 581 else { 582 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 583 (void *)(size_t)alginfo_a.key); 584 alginfo_a.key_type = RTA_DATA_PTR; 585 } 586 cdb->sh_desc[0] = 0; 587 cdb->sh_desc[1] = 0; 588 cdb->sh_desc[2] = 0; 589 /* Auth_only_len is set as 0 here and it will be 590 * overwritten in fd for each packet. 591 */ 592 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 593 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 594 ses->iv.length, 595 ses->digest_length, ses->dir); 596 break; 597 case DPAA_SEC_HASH_CIPHER: 598 default: 599 DPAA_SEC_ERR("error: Unsupported session"); 600 return -ENOTSUP; 601 } 602 603 if (shared_desc_len < 0) { 604 DPAA_SEC_ERR("error in preparing command block"); 605 return shared_desc_len; 606 } 607 608 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 609 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 610 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 611 612 return 0; 613 } 614 615 /* qp is lockless, should be accessed by only one thread */ 616 static int 617 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 618 { 619 struct qman_fq *fq; 620 unsigned int pkts = 0; 621 int num_rx_bufs, ret; 622 struct qm_dqrr_entry *dq; 623 uint32_t vdqcr_flags = 0; 624 625 fq = &qp->outq; 626 /* 627 * Until request for four buffers, we provide exact number of buffers. 628 * Otherwise we do not set the QM_VDQCR_EXACT flag. 629 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 630 * requested, so we request two less in this case. 631 */ 632 if (nb_ops < 4) { 633 vdqcr_flags = QM_VDQCR_EXACT; 634 num_rx_bufs = nb_ops; 635 } else { 636 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 637 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 638 } 639 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 640 if (ret) 641 return 0; 642 643 do { 644 const struct qm_fd *fd; 645 struct dpaa_sec_job *job; 646 struct dpaa_sec_op_ctx *ctx; 647 struct rte_crypto_op *op; 648 649 dq = qman_dequeue(fq); 650 if (!dq) 651 continue; 652 653 fd = &dq->fd; 654 /* sg is embedded in an op ctx, 655 * sg[0] is for output 656 * sg[1] for input 657 */ 658 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 659 660 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 661 ctx->fd_status = fd->status; 662 op = ctx->op; 663 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 664 struct qm_sg_entry *sg_out; 665 uint32_t len; 666 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 667 op->sym->m_src : op->sym->m_dst; 668 669 sg_out = &job->sg[0]; 670 hw_sg_to_cpu(sg_out); 671 len = sg_out->length; 672 mbuf->pkt_len = len; 673 while (mbuf->next != NULL) { 674 len -= mbuf->data_len; 675 mbuf = mbuf->next; 676 } 677 mbuf->data_len = len; 678 } 679 if (!ctx->fd_status) { 680 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 681 } else { 682 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); 683 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 684 } 685 ops[pkts++] = op; 686 687 /* report op status to sym->op and then free the ctx memeory */ 688 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 689 690 qman_dqrr_consume(fq, dq); 691 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 692 693 return pkts; 694 } 695 696 static inline struct dpaa_sec_job * 697 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 698 { 699 struct rte_crypto_sym_op *sym = op->sym; 700 struct rte_mbuf *mbuf = sym->m_src; 701 struct dpaa_sec_job *cf; 702 struct dpaa_sec_op_ctx *ctx; 703 struct qm_sg_entry *sg, *out_sg, *in_sg; 704 phys_addr_t start_addr; 705 uint8_t *old_digest, extra_segs; 706 int data_len, data_offset; 707 708 data_len = sym->auth.data.length; 709 data_offset = sym->auth.data.offset; 710 711 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 712 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 713 if ((data_len & 7) || (data_offset & 7)) { 714 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 715 return NULL; 716 } 717 718 data_len = data_len >> 3; 719 data_offset = data_offset >> 3; 720 } 721 722 if (is_decode(ses)) 723 extra_segs = 3; 724 else 725 extra_segs = 2; 726 727 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 728 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 729 MAX_SG_ENTRIES); 730 return NULL; 731 } 732 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 733 if (!ctx) 734 return NULL; 735 736 cf = &ctx->job; 737 ctx->op = op; 738 old_digest = ctx->digest; 739 740 /* output */ 741 out_sg = &cf->sg[0]; 742 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 743 out_sg->length = ses->digest_length; 744 cpu_to_hw_sg(out_sg); 745 746 /* input */ 747 in_sg = &cf->sg[1]; 748 /* need to extend the input to a compound frame */ 749 in_sg->extension = 1; 750 in_sg->final = 1; 751 in_sg->length = data_len; 752 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 753 754 /* 1st seg */ 755 sg = in_sg + 1; 756 757 if (ses->iv.length) { 758 uint8_t *iv_ptr; 759 760 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 761 ses->iv.offset); 762 763 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 764 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 765 sg->length = 12; 766 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 767 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 768 sg->length = 8; 769 } else { 770 sg->length = ses->iv.length; 771 } 772 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 773 in_sg->length += sg->length; 774 cpu_to_hw_sg(sg); 775 sg++; 776 } 777 778 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 779 sg->offset = data_offset; 780 781 if (data_len <= (mbuf->data_len - data_offset)) { 782 sg->length = data_len; 783 } else { 784 sg->length = mbuf->data_len - data_offset; 785 786 /* remaining i/p segs */ 787 while ((data_len = data_len - sg->length) && 788 (mbuf = mbuf->next)) { 789 cpu_to_hw_sg(sg); 790 sg++; 791 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 792 if (data_len > mbuf->data_len) 793 sg->length = mbuf->data_len; 794 else 795 sg->length = data_len; 796 } 797 } 798 799 if (is_decode(ses)) { 800 /* Digest verification case */ 801 cpu_to_hw_sg(sg); 802 sg++; 803 rte_memcpy(old_digest, sym->auth.digest.data, 804 ses->digest_length); 805 start_addr = rte_dpaa_mem_vtop(old_digest); 806 qm_sg_entry_set64(sg, start_addr); 807 sg->length = ses->digest_length; 808 in_sg->length += ses->digest_length; 809 } 810 sg->final = 1; 811 cpu_to_hw_sg(sg); 812 cpu_to_hw_sg(in_sg); 813 814 return cf; 815 } 816 817 /** 818 * packet looks like: 819 * |<----data_len------->| 820 * |ip_header|ah_header|icv|payload| 821 * ^ 822 * | 823 * mbuf->pkt.data 824 */ 825 static inline struct dpaa_sec_job * 826 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 827 { 828 struct rte_crypto_sym_op *sym = op->sym; 829 struct rte_mbuf *mbuf = sym->m_src; 830 struct dpaa_sec_job *cf; 831 struct dpaa_sec_op_ctx *ctx; 832 struct qm_sg_entry *sg, *in_sg; 833 rte_iova_t start_addr; 834 uint8_t *old_digest; 835 int data_len, data_offset; 836 837 data_len = sym->auth.data.length; 838 data_offset = sym->auth.data.offset; 839 840 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 841 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 842 if ((data_len & 7) || (data_offset & 7)) { 843 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 844 return NULL; 845 } 846 847 data_len = data_len >> 3; 848 data_offset = data_offset >> 3; 849 } 850 851 ctx = dpaa_sec_alloc_ctx(ses, 4); 852 if (!ctx) 853 return NULL; 854 855 cf = &ctx->job; 856 ctx->op = op; 857 old_digest = ctx->digest; 858 859 start_addr = rte_pktmbuf_iova(mbuf); 860 /* output */ 861 sg = &cf->sg[0]; 862 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 863 sg->length = ses->digest_length; 864 cpu_to_hw_sg(sg); 865 866 /* input */ 867 in_sg = &cf->sg[1]; 868 /* need to extend the input to a compound frame */ 869 in_sg->extension = 1; 870 in_sg->final = 1; 871 in_sg->length = data_len; 872 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 873 sg = &cf->sg[2]; 874 875 if (ses->iv.length) { 876 uint8_t *iv_ptr; 877 878 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 879 ses->iv.offset); 880 881 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 882 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 883 sg->length = 12; 884 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 885 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 886 sg->length = 8; 887 } else { 888 sg->length = ses->iv.length; 889 } 890 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 891 in_sg->length += sg->length; 892 cpu_to_hw_sg(sg); 893 sg++; 894 } 895 896 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 897 sg->offset = data_offset; 898 sg->length = data_len; 899 900 if (is_decode(ses)) { 901 /* Digest verification case */ 902 cpu_to_hw_sg(sg); 903 /* hash result or digest, save digest first */ 904 rte_memcpy(old_digest, sym->auth.digest.data, 905 ses->digest_length); 906 /* let's check digest by hw */ 907 start_addr = rte_dpaa_mem_vtop(old_digest); 908 sg++; 909 qm_sg_entry_set64(sg, start_addr); 910 sg->length = ses->digest_length; 911 in_sg->length += ses->digest_length; 912 } 913 sg->final = 1; 914 cpu_to_hw_sg(sg); 915 cpu_to_hw_sg(in_sg); 916 917 return cf; 918 } 919 920 static inline struct dpaa_sec_job * 921 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 922 { 923 struct rte_crypto_sym_op *sym = op->sym; 924 struct dpaa_sec_job *cf; 925 struct dpaa_sec_op_ctx *ctx; 926 struct qm_sg_entry *sg, *out_sg, *in_sg; 927 struct rte_mbuf *mbuf; 928 uint8_t req_segs; 929 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 930 ses->iv.offset); 931 int data_len, data_offset; 932 933 data_len = sym->cipher.data.length; 934 data_offset = sym->cipher.data.offset; 935 936 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 937 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 938 if ((data_len & 7) || (data_offset & 7)) { 939 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 940 return NULL; 941 } 942 943 data_len = data_len >> 3; 944 data_offset = data_offset >> 3; 945 } 946 947 if (sym->m_dst) { 948 mbuf = sym->m_dst; 949 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 950 } else { 951 mbuf = sym->m_src; 952 req_segs = mbuf->nb_segs * 2 + 3; 953 } 954 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 955 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 956 MAX_SG_ENTRIES); 957 return NULL; 958 } 959 960 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 961 if (!ctx) 962 return NULL; 963 964 cf = &ctx->job; 965 ctx->op = op; 966 967 /* output */ 968 out_sg = &cf->sg[0]; 969 out_sg->extension = 1; 970 out_sg->length = data_len; 971 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 972 cpu_to_hw_sg(out_sg); 973 974 /* 1st seg */ 975 sg = &cf->sg[2]; 976 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 977 sg->length = mbuf->data_len - data_offset; 978 sg->offset = data_offset; 979 980 /* Successive segs */ 981 mbuf = mbuf->next; 982 while (mbuf) { 983 cpu_to_hw_sg(sg); 984 sg++; 985 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 986 sg->length = mbuf->data_len; 987 mbuf = mbuf->next; 988 } 989 sg->final = 1; 990 cpu_to_hw_sg(sg); 991 992 /* input */ 993 mbuf = sym->m_src; 994 in_sg = &cf->sg[1]; 995 in_sg->extension = 1; 996 in_sg->final = 1; 997 in_sg->length = data_len + ses->iv.length; 998 999 sg++; 1000 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1001 cpu_to_hw_sg(in_sg); 1002 1003 /* IV */ 1004 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1005 sg->length = ses->iv.length; 1006 cpu_to_hw_sg(sg); 1007 1008 /* 1st seg */ 1009 sg++; 1010 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1011 sg->length = mbuf->data_len - data_offset; 1012 sg->offset = data_offset; 1013 1014 /* Successive segs */ 1015 mbuf = mbuf->next; 1016 while (mbuf) { 1017 cpu_to_hw_sg(sg); 1018 sg++; 1019 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1020 sg->length = mbuf->data_len; 1021 mbuf = mbuf->next; 1022 } 1023 sg->final = 1; 1024 cpu_to_hw_sg(sg); 1025 1026 return cf; 1027 } 1028 1029 static inline struct dpaa_sec_job * 1030 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1031 { 1032 struct rte_crypto_sym_op *sym = op->sym; 1033 struct dpaa_sec_job *cf; 1034 struct dpaa_sec_op_ctx *ctx; 1035 struct qm_sg_entry *sg; 1036 rte_iova_t src_start_addr, dst_start_addr; 1037 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1038 ses->iv.offset); 1039 int data_len, data_offset; 1040 1041 data_len = sym->cipher.data.length; 1042 data_offset = sym->cipher.data.offset; 1043 1044 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1045 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1046 if ((data_len & 7) || (data_offset & 7)) { 1047 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1048 return NULL; 1049 } 1050 1051 data_len = data_len >> 3; 1052 data_offset = data_offset >> 3; 1053 } 1054 1055 ctx = dpaa_sec_alloc_ctx(ses, 4); 1056 if (!ctx) 1057 return NULL; 1058 1059 cf = &ctx->job; 1060 ctx->op = op; 1061 1062 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1063 1064 if (sym->m_dst) 1065 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1066 else 1067 dst_start_addr = src_start_addr; 1068 1069 /* output */ 1070 sg = &cf->sg[0]; 1071 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1072 sg->length = data_len + ses->iv.length; 1073 cpu_to_hw_sg(sg); 1074 1075 /* input */ 1076 sg = &cf->sg[1]; 1077 1078 /* need to extend the input to a compound frame */ 1079 sg->extension = 1; 1080 sg->final = 1; 1081 sg->length = data_len + ses->iv.length; 1082 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1083 cpu_to_hw_sg(sg); 1084 1085 sg = &cf->sg[2]; 1086 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1087 sg->length = ses->iv.length; 1088 cpu_to_hw_sg(sg); 1089 1090 sg++; 1091 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1092 sg->length = data_len; 1093 sg->final = 1; 1094 cpu_to_hw_sg(sg); 1095 1096 return cf; 1097 } 1098 1099 static inline struct dpaa_sec_job * 1100 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1101 { 1102 struct rte_crypto_sym_op *sym = op->sym; 1103 struct dpaa_sec_job *cf; 1104 struct dpaa_sec_op_ctx *ctx; 1105 struct qm_sg_entry *sg, *out_sg, *in_sg; 1106 struct rte_mbuf *mbuf; 1107 uint8_t req_segs; 1108 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1109 ses->iv.offset); 1110 1111 if (sym->m_dst) { 1112 mbuf = sym->m_dst; 1113 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1114 } else { 1115 mbuf = sym->m_src; 1116 req_segs = mbuf->nb_segs * 2 + 4; 1117 } 1118 1119 if (ses->auth_only_len) 1120 req_segs++; 1121 1122 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1123 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1124 MAX_SG_ENTRIES); 1125 return NULL; 1126 } 1127 1128 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1129 if (!ctx) 1130 return NULL; 1131 1132 cf = &ctx->job; 1133 ctx->op = op; 1134 1135 rte_prefetch0(cf->sg); 1136 1137 /* output */ 1138 out_sg = &cf->sg[0]; 1139 out_sg->extension = 1; 1140 if (is_encode(ses)) 1141 out_sg->length = sym->aead.data.length + ses->digest_length; 1142 else 1143 out_sg->length = sym->aead.data.length; 1144 1145 /* output sg entries */ 1146 sg = &cf->sg[2]; 1147 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1148 cpu_to_hw_sg(out_sg); 1149 1150 /* 1st seg */ 1151 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1152 sg->length = mbuf->data_len - sym->aead.data.offset; 1153 sg->offset = sym->aead.data.offset; 1154 1155 /* Successive segs */ 1156 mbuf = mbuf->next; 1157 while (mbuf) { 1158 cpu_to_hw_sg(sg); 1159 sg++; 1160 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1161 sg->length = mbuf->data_len; 1162 mbuf = mbuf->next; 1163 } 1164 sg->length -= ses->digest_length; 1165 1166 if (is_encode(ses)) { 1167 cpu_to_hw_sg(sg); 1168 /* set auth output */ 1169 sg++; 1170 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1171 sg->length = ses->digest_length; 1172 } 1173 sg->final = 1; 1174 cpu_to_hw_sg(sg); 1175 1176 /* input */ 1177 mbuf = sym->m_src; 1178 in_sg = &cf->sg[1]; 1179 in_sg->extension = 1; 1180 in_sg->final = 1; 1181 if (is_encode(ses)) 1182 in_sg->length = ses->iv.length + sym->aead.data.length 1183 + ses->auth_only_len; 1184 else 1185 in_sg->length = ses->iv.length + sym->aead.data.length 1186 + ses->auth_only_len + ses->digest_length; 1187 1188 /* input sg entries */ 1189 sg++; 1190 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1191 cpu_to_hw_sg(in_sg); 1192 1193 /* 1st seg IV */ 1194 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1195 sg->length = ses->iv.length; 1196 cpu_to_hw_sg(sg); 1197 1198 /* 2nd seg auth only */ 1199 if (ses->auth_only_len) { 1200 sg++; 1201 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1202 sg->length = ses->auth_only_len; 1203 cpu_to_hw_sg(sg); 1204 } 1205 1206 /* 3rd seg */ 1207 sg++; 1208 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1209 sg->length = mbuf->data_len - sym->aead.data.offset; 1210 sg->offset = sym->aead.data.offset; 1211 1212 /* Successive segs */ 1213 mbuf = mbuf->next; 1214 while (mbuf) { 1215 cpu_to_hw_sg(sg); 1216 sg++; 1217 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1218 sg->length = mbuf->data_len; 1219 mbuf = mbuf->next; 1220 } 1221 1222 if (is_decode(ses)) { 1223 cpu_to_hw_sg(sg); 1224 sg++; 1225 memcpy(ctx->digest, sym->aead.digest.data, 1226 ses->digest_length); 1227 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1228 sg->length = ses->digest_length; 1229 } 1230 sg->final = 1; 1231 cpu_to_hw_sg(sg); 1232 1233 return cf; 1234 } 1235 1236 static inline struct dpaa_sec_job * 1237 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1238 { 1239 struct rte_crypto_sym_op *sym = op->sym; 1240 struct dpaa_sec_job *cf; 1241 struct dpaa_sec_op_ctx *ctx; 1242 struct qm_sg_entry *sg; 1243 uint32_t length = 0; 1244 rte_iova_t src_start_addr, dst_start_addr; 1245 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1246 ses->iv.offset); 1247 1248 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1249 1250 if (sym->m_dst) 1251 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1252 else 1253 dst_start_addr = src_start_addr; 1254 1255 ctx = dpaa_sec_alloc_ctx(ses, 7); 1256 if (!ctx) 1257 return NULL; 1258 1259 cf = &ctx->job; 1260 ctx->op = op; 1261 1262 /* input */ 1263 rte_prefetch0(cf->sg); 1264 sg = &cf->sg[2]; 1265 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1266 if (is_encode(ses)) { 1267 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1268 sg->length = ses->iv.length; 1269 length += sg->length; 1270 cpu_to_hw_sg(sg); 1271 1272 sg++; 1273 if (ses->auth_only_len) { 1274 qm_sg_entry_set64(sg, 1275 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1276 sg->length = ses->auth_only_len; 1277 length += sg->length; 1278 cpu_to_hw_sg(sg); 1279 sg++; 1280 } 1281 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1282 sg->length = sym->aead.data.length; 1283 length += sg->length; 1284 sg->final = 1; 1285 cpu_to_hw_sg(sg); 1286 } else { 1287 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1288 sg->length = ses->iv.length; 1289 length += sg->length; 1290 cpu_to_hw_sg(sg); 1291 1292 sg++; 1293 if (ses->auth_only_len) { 1294 qm_sg_entry_set64(sg, 1295 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1296 sg->length = ses->auth_only_len; 1297 length += sg->length; 1298 cpu_to_hw_sg(sg); 1299 sg++; 1300 } 1301 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1302 sg->length = sym->aead.data.length; 1303 length += sg->length; 1304 cpu_to_hw_sg(sg); 1305 1306 memcpy(ctx->digest, sym->aead.digest.data, 1307 ses->digest_length); 1308 sg++; 1309 1310 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1311 sg->length = ses->digest_length; 1312 length += sg->length; 1313 sg->final = 1; 1314 cpu_to_hw_sg(sg); 1315 } 1316 /* input compound frame */ 1317 cf->sg[1].length = length; 1318 cf->sg[1].extension = 1; 1319 cf->sg[1].final = 1; 1320 cpu_to_hw_sg(&cf->sg[1]); 1321 1322 /* output */ 1323 sg++; 1324 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1325 qm_sg_entry_set64(sg, 1326 dst_start_addr + sym->aead.data.offset); 1327 sg->length = sym->aead.data.length; 1328 length = sg->length; 1329 if (is_encode(ses)) { 1330 cpu_to_hw_sg(sg); 1331 /* set auth output */ 1332 sg++; 1333 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1334 sg->length = ses->digest_length; 1335 length += sg->length; 1336 } 1337 sg->final = 1; 1338 cpu_to_hw_sg(sg); 1339 1340 /* output compound frame */ 1341 cf->sg[0].length = length; 1342 cf->sg[0].extension = 1; 1343 cpu_to_hw_sg(&cf->sg[0]); 1344 1345 return cf; 1346 } 1347 1348 static inline struct dpaa_sec_job * 1349 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1350 { 1351 struct rte_crypto_sym_op *sym = op->sym; 1352 struct dpaa_sec_job *cf; 1353 struct dpaa_sec_op_ctx *ctx; 1354 struct qm_sg_entry *sg, *out_sg, *in_sg; 1355 struct rte_mbuf *mbuf; 1356 uint8_t req_segs; 1357 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1358 ses->iv.offset); 1359 1360 if (sym->m_dst) { 1361 mbuf = sym->m_dst; 1362 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1363 } else { 1364 mbuf = sym->m_src; 1365 req_segs = mbuf->nb_segs * 2 + 4; 1366 } 1367 1368 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1369 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1370 MAX_SG_ENTRIES); 1371 return NULL; 1372 } 1373 1374 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1375 if (!ctx) 1376 return NULL; 1377 1378 cf = &ctx->job; 1379 ctx->op = op; 1380 1381 rte_prefetch0(cf->sg); 1382 1383 /* output */ 1384 out_sg = &cf->sg[0]; 1385 out_sg->extension = 1; 1386 if (is_encode(ses)) 1387 out_sg->length = sym->auth.data.length + ses->digest_length; 1388 else 1389 out_sg->length = sym->auth.data.length; 1390 1391 /* output sg entries */ 1392 sg = &cf->sg[2]; 1393 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1394 cpu_to_hw_sg(out_sg); 1395 1396 /* 1st seg */ 1397 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1398 sg->length = mbuf->data_len - sym->auth.data.offset; 1399 sg->offset = sym->auth.data.offset; 1400 1401 /* Successive segs */ 1402 mbuf = mbuf->next; 1403 while (mbuf) { 1404 cpu_to_hw_sg(sg); 1405 sg++; 1406 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1407 sg->length = mbuf->data_len; 1408 mbuf = mbuf->next; 1409 } 1410 sg->length -= ses->digest_length; 1411 1412 if (is_encode(ses)) { 1413 cpu_to_hw_sg(sg); 1414 /* set auth output */ 1415 sg++; 1416 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1417 sg->length = ses->digest_length; 1418 } 1419 sg->final = 1; 1420 cpu_to_hw_sg(sg); 1421 1422 /* input */ 1423 mbuf = sym->m_src; 1424 in_sg = &cf->sg[1]; 1425 in_sg->extension = 1; 1426 in_sg->final = 1; 1427 if (is_encode(ses)) 1428 in_sg->length = ses->iv.length + sym->auth.data.length; 1429 else 1430 in_sg->length = ses->iv.length + sym->auth.data.length 1431 + ses->digest_length; 1432 1433 /* input sg entries */ 1434 sg++; 1435 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1436 cpu_to_hw_sg(in_sg); 1437 1438 /* 1st seg IV */ 1439 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1440 sg->length = ses->iv.length; 1441 cpu_to_hw_sg(sg); 1442 1443 /* 2nd seg */ 1444 sg++; 1445 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1446 sg->length = mbuf->data_len - sym->auth.data.offset; 1447 sg->offset = sym->auth.data.offset; 1448 1449 /* Successive segs */ 1450 mbuf = mbuf->next; 1451 while (mbuf) { 1452 cpu_to_hw_sg(sg); 1453 sg++; 1454 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1455 sg->length = mbuf->data_len; 1456 mbuf = mbuf->next; 1457 } 1458 1459 sg->length -= ses->digest_length; 1460 if (is_decode(ses)) { 1461 cpu_to_hw_sg(sg); 1462 sg++; 1463 memcpy(ctx->digest, sym->auth.digest.data, 1464 ses->digest_length); 1465 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1466 sg->length = ses->digest_length; 1467 } 1468 sg->final = 1; 1469 cpu_to_hw_sg(sg); 1470 1471 return cf; 1472 } 1473 1474 static inline struct dpaa_sec_job * 1475 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1476 { 1477 struct rte_crypto_sym_op *sym = op->sym; 1478 struct dpaa_sec_job *cf; 1479 struct dpaa_sec_op_ctx *ctx; 1480 struct qm_sg_entry *sg; 1481 rte_iova_t src_start_addr, dst_start_addr; 1482 uint32_t length = 0; 1483 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1484 ses->iv.offset); 1485 1486 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1487 if (sym->m_dst) 1488 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1489 else 1490 dst_start_addr = src_start_addr; 1491 1492 ctx = dpaa_sec_alloc_ctx(ses, 7); 1493 if (!ctx) 1494 return NULL; 1495 1496 cf = &ctx->job; 1497 ctx->op = op; 1498 1499 /* input */ 1500 rte_prefetch0(cf->sg); 1501 sg = &cf->sg[2]; 1502 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1503 if (is_encode(ses)) { 1504 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1505 sg->length = ses->iv.length; 1506 length += sg->length; 1507 cpu_to_hw_sg(sg); 1508 1509 sg++; 1510 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1511 sg->length = sym->auth.data.length; 1512 length += sg->length; 1513 sg->final = 1; 1514 cpu_to_hw_sg(sg); 1515 } else { 1516 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1517 sg->length = ses->iv.length; 1518 length += sg->length; 1519 cpu_to_hw_sg(sg); 1520 1521 sg++; 1522 1523 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1524 sg->length = sym->auth.data.length; 1525 length += sg->length; 1526 cpu_to_hw_sg(sg); 1527 1528 memcpy(ctx->digest, sym->auth.digest.data, 1529 ses->digest_length); 1530 sg++; 1531 1532 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1533 sg->length = ses->digest_length; 1534 length += sg->length; 1535 sg->final = 1; 1536 cpu_to_hw_sg(sg); 1537 } 1538 /* input compound frame */ 1539 cf->sg[1].length = length; 1540 cf->sg[1].extension = 1; 1541 cf->sg[1].final = 1; 1542 cpu_to_hw_sg(&cf->sg[1]); 1543 1544 /* output */ 1545 sg++; 1546 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1547 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1548 sg->length = sym->cipher.data.length; 1549 length = sg->length; 1550 if (is_encode(ses)) { 1551 cpu_to_hw_sg(sg); 1552 /* set auth output */ 1553 sg++; 1554 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1555 sg->length = ses->digest_length; 1556 length += sg->length; 1557 } 1558 sg->final = 1; 1559 cpu_to_hw_sg(sg); 1560 1561 /* output compound frame */ 1562 cf->sg[0].length = length; 1563 cf->sg[0].extension = 1; 1564 cpu_to_hw_sg(&cf->sg[0]); 1565 1566 return cf; 1567 } 1568 1569 #ifdef RTE_LIB_SECURITY 1570 static inline struct dpaa_sec_job * 1571 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1572 { 1573 struct rte_crypto_sym_op *sym = op->sym; 1574 struct dpaa_sec_job *cf; 1575 struct dpaa_sec_op_ctx *ctx; 1576 struct qm_sg_entry *sg; 1577 phys_addr_t src_start_addr, dst_start_addr; 1578 1579 ctx = dpaa_sec_alloc_ctx(ses, 2); 1580 if (!ctx) 1581 return NULL; 1582 cf = &ctx->job; 1583 ctx->op = op; 1584 1585 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1586 1587 if (sym->m_dst) 1588 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1589 else 1590 dst_start_addr = src_start_addr; 1591 1592 /* input */ 1593 sg = &cf->sg[1]; 1594 qm_sg_entry_set64(sg, src_start_addr); 1595 sg->length = sym->m_src->pkt_len; 1596 sg->final = 1; 1597 cpu_to_hw_sg(sg); 1598 1599 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1600 /* output */ 1601 sg = &cf->sg[0]; 1602 qm_sg_entry_set64(sg, dst_start_addr); 1603 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1604 cpu_to_hw_sg(sg); 1605 1606 return cf; 1607 } 1608 1609 static inline struct dpaa_sec_job * 1610 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1611 { 1612 struct rte_crypto_sym_op *sym = op->sym; 1613 struct dpaa_sec_job *cf; 1614 struct dpaa_sec_op_ctx *ctx; 1615 struct qm_sg_entry *sg, *out_sg, *in_sg; 1616 struct rte_mbuf *mbuf; 1617 uint8_t req_segs; 1618 uint32_t in_len = 0, out_len = 0; 1619 1620 if (sym->m_dst) 1621 mbuf = sym->m_dst; 1622 else 1623 mbuf = sym->m_src; 1624 1625 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1626 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1627 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1628 MAX_SG_ENTRIES); 1629 return NULL; 1630 } 1631 1632 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1633 if (!ctx) 1634 return NULL; 1635 cf = &ctx->job; 1636 ctx->op = op; 1637 /* output */ 1638 out_sg = &cf->sg[0]; 1639 out_sg->extension = 1; 1640 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1641 1642 /* 1st seg */ 1643 sg = &cf->sg[2]; 1644 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1645 sg->offset = 0; 1646 1647 /* Successive segs */ 1648 while (mbuf->next) { 1649 sg->length = mbuf->data_len; 1650 out_len += sg->length; 1651 mbuf = mbuf->next; 1652 cpu_to_hw_sg(sg); 1653 sg++; 1654 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1655 sg->offset = 0; 1656 } 1657 sg->length = mbuf->buf_len - mbuf->data_off; 1658 out_len += sg->length; 1659 sg->final = 1; 1660 cpu_to_hw_sg(sg); 1661 1662 out_sg->length = out_len; 1663 cpu_to_hw_sg(out_sg); 1664 1665 /* input */ 1666 mbuf = sym->m_src; 1667 in_sg = &cf->sg[1]; 1668 in_sg->extension = 1; 1669 in_sg->final = 1; 1670 in_len = mbuf->data_len; 1671 1672 sg++; 1673 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1674 1675 /* 1st seg */ 1676 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1677 sg->length = mbuf->data_len; 1678 sg->offset = 0; 1679 1680 /* Successive segs */ 1681 mbuf = mbuf->next; 1682 while (mbuf) { 1683 cpu_to_hw_sg(sg); 1684 sg++; 1685 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1686 sg->length = mbuf->data_len; 1687 sg->offset = 0; 1688 in_len += sg->length; 1689 mbuf = mbuf->next; 1690 } 1691 sg->final = 1; 1692 cpu_to_hw_sg(sg); 1693 1694 in_sg->length = in_len; 1695 cpu_to_hw_sg(in_sg); 1696 1697 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1698 1699 return cf; 1700 } 1701 #endif 1702 1703 static uint16_t 1704 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1705 uint16_t nb_ops) 1706 { 1707 /* Function to transmit the frames to given device and queuepair */ 1708 uint32_t loop; 1709 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1710 uint16_t num_tx = 0; 1711 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1712 uint32_t frames_to_send; 1713 struct rte_crypto_op *op; 1714 struct dpaa_sec_job *cf; 1715 dpaa_sec_session *ses; 1716 uint16_t auth_hdr_len, auth_tail_len; 1717 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1718 struct qman_fq *inq[DPAA_SEC_BURST]; 1719 1720 while (nb_ops) { 1721 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1722 DPAA_SEC_BURST : nb_ops; 1723 for (loop = 0; loop < frames_to_send; loop++) { 1724 op = *(ops++); 1725 if (*dpaa_seqn(op->sym->m_src) != 0) { 1726 index = *dpaa_seqn(op->sym->m_src) - 1; 1727 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1728 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1729 flags[loop] = ((index & 0x0f) << 8); 1730 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1731 DPAA_PER_LCORE_DQRR_SIZE--; 1732 DPAA_PER_LCORE_DQRR_HELD &= 1733 ~(1 << index); 1734 } 1735 } 1736 1737 switch (op->sess_type) { 1738 case RTE_CRYPTO_OP_WITH_SESSION: 1739 ses = (dpaa_sec_session *) 1740 get_sym_session_private_data( 1741 op->sym->session, 1742 cryptodev_driver_id); 1743 break; 1744 #ifdef RTE_LIB_SECURITY 1745 case RTE_CRYPTO_OP_SECURITY_SESSION: 1746 ses = (dpaa_sec_session *) 1747 get_sec_session_private_data( 1748 op->sym->sec_session); 1749 break; 1750 #endif 1751 default: 1752 DPAA_SEC_DP_ERR( 1753 "sessionless crypto op not supported"); 1754 frames_to_send = loop; 1755 nb_ops = loop; 1756 goto send_pkts; 1757 } 1758 1759 if (!ses) { 1760 DPAA_SEC_DP_ERR("session not available"); 1761 frames_to_send = loop; 1762 nb_ops = loop; 1763 goto send_pkts; 1764 } 1765 1766 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1767 if (dpaa_sec_attach_sess_q(qp, ses)) { 1768 frames_to_send = loop; 1769 nb_ops = loop; 1770 goto send_pkts; 1771 } 1772 } else if (unlikely(ses->qp[rte_lcore_id() % 1773 MAX_DPAA_CORES] != qp)) { 1774 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1775 " New qp = %p\n", 1776 ses->qp[rte_lcore_id() % 1777 MAX_DPAA_CORES], qp); 1778 frames_to_send = loop; 1779 nb_ops = loop; 1780 goto send_pkts; 1781 } 1782 1783 auth_hdr_len = op->sym->auth.data.length - 1784 op->sym->cipher.data.length; 1785 auth_tail_len = 0; 1786 1787 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1788 ((op->sym->m_dst == NULL) || 1789 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1790 switch (ses->ctxt) { 1791 #ifdef RTE_LIB_SECURITY 1792 case DPAA_SEC_PDCP: 1793 case DPAA_SEC_IPSEC: 1794 cf = build_proto(op, ses); 1795 break; 1796 #endif 1797 case DPAA_SEC_AUTH: 1798 cf = build_auth_only(op, ses); 1799 break; 1800 case DPAA_SEC_CIPHER: 1801 cf = build_cipher_only(op, ses); 1802 break; 1803 case DPAA_SEC_AEAD: 1804 cf = build_cipher_auth_gcm(op, ses); 1805 auth_hdr_len = ses->auth_only_len; 1806 break; 1807 case DPAA_SEC_CIPHER_HASH: 1808 auth_hdr_len = 1809 op->sym->cipher.data.offset 1810 - op->sym->auth.data.offset; 1811 auth_tail_len = 1812 op->sym->auth.data.length 1813 - op->sym->cipher.data.length 1814 - auth_hdr_len; 1815 cf = build_cipher_auth(op, ses); 1816 break; 1817 default: 1818 DPAA_SEC_DP_ERR("not supported ops"); 1819 frames_to_send = loop; 1820 nb_ops = loop; 1821 goto send_pkts; 1822 } 1823 } else { 1824 switch (ses->ctxt) { 1825 #ifdef RTE_LIB_SECURITY 1826 case DPAA_SEC_PDCP: 1827 case DPAA_SEC_IPSEC: 1828 cf = build_proto_sg(op, ses); 1829 break; 1830 #endif 1831 case DPAA_SEC_AUTH: 1832 cf = build_auth_only_sg(op, ses); 1833 break; 1834 case DPAA_SEC_CIPHER: 1835 cf = build_cipher_only_sg(op, ses); 1836 break; 1837 case DPAA_SEC_AEAD: 1838 cf = build_cipher_auth_gcm_sg(op, ses); 1839 auth_hdr_len = ses->auth_only_len; 1840 break; 1841 case DPAA_SEC_CIPHER_HASH: 1842 auth_hdr_len = 1843 op->sym->cipher.data.offset 1844 - op->sym->auth.data.offset; 1845 auth_tail_len = 1846 op->sym->auth.data.length 1847 - op->sym->cipher.data.length 1848 - auth_hdr_len; 1849 cf = build_cipher_auth_sg(op, ses); 1850 break; 1851 default: 1852 DPAA_SEC_DP_ERR("not supported ops"); 1853 frames_to_send = loop; 1854 nb_ops = loop; 1855 goto send_pkts; 1856 } 1857 } 1858 if (unlikely(!cf)) { 1859 frames_to_send = loop; 1860 nb_ops = loop; 1861 goto send_pkts; 1862 } 1863 1864 fd = &fds[loop]; 1865 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 1866 fd->opaque_addr = 0; 1867 fd->cmd = 0; 1868 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 1869 fd->_format1 = qm_fd_compound; 1870 fd->length29 = 2 * sizeof(struct qm_sg_entry); 1871 1872 /* Auth_only_len is set as 0 in descriptor and it is 1873 * overwritten here in the fd.cmd which will update 1874 * the DPOVRD reg. 1875 */ 1876 if (auth_hdr_len || auth_tail_len) { 1877 fd->cmd = 0x80000000; 1878 fd->cmd |= 1879 ((auth_tail_len << 16) | auth_hdr_len); 1880 } 1881 1882 #ifdef RTE_LIB_SECURITY 1883 /* In case of PDCP, per packet HFN is stored in 1884 * mbuf priv after sym_op. 1885 */ 1886 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 1887 fd->cmd = 0x80000000 | 1888 *((uint32_t *)((uint8_t *)op + 1889 ses->pdcp.hfn_ovd_offset)); 1890 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 1891 *((uint32_t *)((uint8_t *)op + 1892 ses->pdcp.hfn_ovd_offset)), 1893 ses->pdcp.hfn_ovd); 1894 } 1895 #endif 1896 } 1897 send_pkts: 1898 loop = 0; 1899 while (loop < frames_to_send) { 1900 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 1901 &flags[loop], frames_to_send - loop); 1902 } 1903 nb_ops -= frames_to_send; 1904 num_tx += frames_to_send; 1905 } 1906 1907 dpaa_qp->tx_pkts += num_tx; 1908 dpaa_qp->tx_errs += nb_ops - num_tx; 1909 1910 return num_tx; 1911 } 1912 1913 static uint16_t 1914 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1915 uint16_t nb_ops) 1916 { 1917 uint16_t num_rx; 1918 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1919 1920 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1921 1922 dpaa_qp->rx_pkts += num_rx; 1923 dpaa_qp->rx_errs += nb_ops - num_rx; 1924 1925 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1926 1927 return num_rx; 1928 } 1929 1930 /** Release queue pair */ 1931 static int 1932 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1933 uint16_t qp_id) 1934 { 1935 struct dpaa_sec_dev_private *internals; 1936 struct dpaa_sec_qp *qp = NULL; 1937 1938 PMD_INIT_FUNC_TRACE(); 1939 1940 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 1941 1942 internals = dev->data->dev_private; 1943 if (qp_id >= internals->max_nb_queue_pairs) { 1944 DPAA_SEC_ERR("Max supported qpid %d", 1945 internals->max_nb_queue_pairs); 1946 return -EINVAL; 1947 } 1948 1949 qp = &internals->qps[qp_id]; 1950 rte_mempool_free(qp->ctx_pool); 1951 qp->internals = NULL; 1952 dev->data->queue_pairs[qp_id] = NULL; 1953 1954 return 0; 1955 } 1956 1957 /** Setup a queue pair */ 1958 static int 1959 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1960 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1961 __rte_unused int socket_id) 1962 { 1963 struct dpaa_sec_dev_private *internals; 1964 struct dpaa_sec_qp *qp = NULL; 1965 char str[20]; 1966 1967 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1968 1969 internals = dev->data->dev_private; 1970 if (qp_id >= internals->max_nb_queue_pairs) { 1971 DPAA_SEC_ERR("Max supported qpid %d", 1972 internals->max_nb_queue_pairs); 1973 return -EINVAL; 1974 } 1975 1976 qp = &internals->qps[qp_id]; 1977 qp->internals = internals; 1978 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 1979 dev->data->dev_id, qp_id); 1980 if (!qp->ctx_pool) { 1981 qp->ctx_pool = rte_mempool_create((const char *)str, 1982 CTX_POOL_NUM_BUFS, 1983 CTX_POOL_BUF_SIZE, 1984 CTX_POOL_CACHE_SIZE, 0, 1985 NULL, NULL, NULL, NULL, 1986 SOCKET_ID_ANY, 0); 1987 if (!qp->ctx_pool) { 1988 DPAA_SEC_ERR("%s create failed\n", str); 1989 return -ENOMEM; 1990 } 1991 } else 1992 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 1993 dev->data->dev_id, qp_id); 1994 dev->data->queue_pairs[qp_id] = qp; 1995 1996 return 0; 1997 } 1998 1999 /** Returns the size of session structure */ 2000 static unsigned int 2001 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2002 { 2003 PMD_INIT_FUNC_TRACE(); 2004 2005 return sizeof(dpaa_sec_session); 2006 } 2007 2008 static int 2009 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2010 struct rte_crypto_sym_xform *xform, 2011 dpaa_sec_session *session) 2012 { 2013 session->ctxt = DPAA_SEC_CIPHER; 2014 session->cipher_alg = xform->cipher.algo; 2015 session->iv.length = xform->cipher.iv.length; 2016 session->iv.offset = xform->cipher.iv.offset; 2017 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2018 RTE_CACHE_LINE_SIZE); 2019 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2020 DPAA_SEC_ERR("No Memory for cipher key"); 2021 return -ENOMEM; 2022 } 2023 session->cipher_key.length = xform->cipher.key.length; 2024 2025 memcpy(session->cipher_key.data, xform->cipher.key.data, 2026 xform->cipher.key.length); 2027 switch (xform->cipher.algo) { 2028 case RTE_CRYPTO_CIPHER_AES_CBC: 2029 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2030 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2031 break; 2032 case RTE_CRYPTO_CIPHER_3DES_CBC: 2033 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2034 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2035 break; 2036 case RTE_CRYPTO_CIPHER_AES_CTR: 2037 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2038 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2039 break; 2040 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2041 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2042 break; 2043 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2044 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2045 break; 2046 default: 2047 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2048 xform->cipher.algo); 2049 return -ENOTSUP; 2050 } 2051 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2052 DIR_ENC : DIR_DEC; 2053 2054 return 0; 2055 } 2056 2057 static int 2058 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2059 struct rte_crypto_sym_xform *xform, 2060 dpaa_sec_session *session) 2061 { 2062 session->ctxt = DPAA_SEC_AUTH; 2063 session->auth_alg = xform->auth.algo; 2064 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 2065 RTE_CACHE_LINE_SIZE); 2066 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 2067 DPAA_SEC_ERR("No Memory for auth key"); 2068 return -ENOMEM; 2069 } 2070 session->auth_key.length = xform->auth.key.length; 2071 session->digest_length = xform->auth.digest_length; 2072 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2073 session->iv.offset = xform->auth.iv.offset; 2074 session->iv.length = xform->auth.iv.length; 2075 } 2076 2077 memcpy(session->auth_key.data, xform->auth.key.data, 2078 xform->auth.key.length); 2079 2080 switch (xform->auth.algo) { 2081 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2082 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2083 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2084 break; 2085 case RTE_CRYPTO_AUTH_MD5_HMAC: 2086 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2087 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2088 break; 2089 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2090 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2091 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2092 break; 2093 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2094 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2095 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2096 break; 2097 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2098 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2099 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2100 break; 2101 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2102 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2103 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2104 break; 2105 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2106 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2107 session->auth_key.algmode = OP_ALG_AAI_F9; 2108 break; 2109 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2110 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2111 session->auth_key.algmode = OP_ALG_AAI_F9; 2112 break; 2113 default: 2114 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2115 xform->auth.algo); 2116 return -ENOTSUP; 2117 } 2118 2119 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2120 DIR_ENC : DIR_DEC; 2121 2122 return 0; 2123 } 2124 2125 static int 2126 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2127 struct rte_crypto_sym_xform *xform, 2128 dpaa_sec_session *session) 2129 { 2130 2131 struct rte_crypto_cipher_xform *cipher_xform; 2132 struct rte_crypto_auth_xform *auth_xform; 2133 2134 session->ctxt = DPAA_SEC_CIPHER_HASH; 2135 if (session->auth_cipher_text) { 2136 cipher_xform = &xform->cipher; 2137 auth_xform = &xform->next->auth; 2138 } else { 2139 cipher_xform = &xform->next->cipher; 2140 auth_xform = &xform->auth; 2141 } 2142 2143 /* Set IV parameters */ 2144 session->iv.offset = cipher_xform->iv.offset; 2145 session->iv.length = cipher_xform->iv.length; 2146 2147 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2148 RTE_CACHE_LINE_SIZE); 2149 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2150 DPAA_SEC_ERR("No Memory for cipher key"); 2151 return -ENOMEM; 2152 } 2153 session->cipher_key.length = cipher_xform->key.length; 2154 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2155 RTE_CACHE_LINE_SIZE); 2156 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2157 DPAA_SEC_ERR("No Memory for auth key"); 2158 return -ENOMEM; 2159 } 2160 session->auth_key.length = auth_xform->key.length; 2161 memcpy(session->cipher_key.data, cipher_xform->key.data, 2162 cipher_xform->key.length); 2163 memcpy(session->auth_key.data, auth_xform->key.data, 2164 auth_xform->key.length); 2165 2166 session->digest_length = auth_xform->digest_length; 2167 session->auth_alg = auth_xform->algo; 2168 2169 switch (auth_xform->algo) { 2170 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2171 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2172 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2173 break; 2174 case RTE_CRYPTO_AUTH_MD5_HMAC: 2175 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2176 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2177 break; 2178 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2179 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2180 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2181 break; 2182 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2183 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2184 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2185 break; 2186 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2187 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2188 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2189 break; 2190 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2191 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2192 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2193 break; 2194 default: 2195 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2196 auth_xform->algo); 2197 return -ENOTSUP; 2198 } 2199 2200 session->cipher_alg = cipher_xform->algo; 2201 2202 switch (cipher_xform->algo) { 2203 case RTE_CRYPTO_CIPHER_AES_CBC: 2204 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2205 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2206 break; 2207 case RTE_CRYPTO_CIPHER_3DES_CBC: 2208 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2209 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2210 break; 2211 case RTE_CRYPTO_CIPHER_AES_CTR: 2212 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2213 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2214 break; 2215 default: 2216 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2217 cipher_xform->algo); 2218 return -ENOTSUP; 2219 } 2220 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2221 DIR_ENC : DIR_DEC; 2222 return 0; 2223 } 2224 2225 static int 2226 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2227 struct rte_crypto_sym_xform *xform, 2228 dpaa_sec_session *session) 2229 { 2230 session->aead_alg = xform->aead.algo; 2231 session->ctxt = DPAA_SEC_AEAD; 2232 session->iv.length = xform->aead.iv.length; 2233 session->iv.offset = xform->aead.iv.offset; 2234 session->auth_only_len = xform->aead.aad_length; 2235 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2236 RTE_CACHE_LINE_SIZE); 2237 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2238 DPAA_SEC_ERR("No Memory for aead key\n"); 2239 return -ENOMEM; 2240 } 2241 session->aead_key.length = xform->aead.key.length; 2242 session->digest_length = xform->aead.digest_length; 2243 2244 memcpy(session->aead_key.data, xform->aead.key.data, 2245 xform->aead.key.length); 2246 2247 switch (session->aead_alg) { 2248 case RTE_CRYPTO_AEAD_AES_GCM: 2249 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2250 session->aead_key.algmode = OP_ALG_AAI_GCM; 2251 break; 2252 default: 2253 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2254 return -ENOTSUP; 2255 } 2256 2257 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2258 DIR_ENC : DIR_DEC; 2259 2260 return 0; 2261 } 2262 2263 static struct qman_fq * 2264 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2265 { 2266 unsigned int i; 2267 2268 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2269 if (qi->inq_attach[i] == 0) { 2270 qi->inq_attach[i] = 1; 2271 return &qi->inq[i]; 2272 } 2273 } 2274 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2275 2276 return NULL; 2277 } 2278 2279 static int 2280 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2281 { 2282 unsigned int i; 2283 2284 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2285 if (&qi->inq[i] == fq) { 2286 if (qman_retire_fq(fq, NULL) != 0) 2287 DPAA_SEC_DEBUG("Queue is not retired\n"); 2288 qman_oos_fq(fq); 2289 qi->inq_attach[i] = 0; 2290 return 0; 2291 } 2292 } 2293 return -1; 2294 } 2295 2296 static int 2297 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2298 { 2299 int ret; 2300 2301 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2302 ret = dpaa_sec_prep_cdb(sess); 2303 if (ret) { 2304 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2305 return ret; 2306 } 2307 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2308 ret = rte_dpaa_portal_init((void *)0); 2309 if (ret) { 2310 DPAA_SEC_ERR("Failure in affining portal"); 2311 return ret; 2312 } 2313 } 2314 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2315 rte_dpaa_mem_vtop(&sess->cdb), 2316 qman_fq_fqid(&qp->outq)); 2317 if (ret) 2318 DPAA_SEC_ERR("Unable to init sec queue"); 2319 2320 return ret; 2321 } 2322 2323 static inline void 2324 free_session_data(dpaa_sec_session *s) 2325 { 2326 if (is_aead(s)) 2327 rte_free(s->aead_key.data); 2328 else { 2329 rte_free(s->auth_key.data); 2330 rte_free(s->cipher_key.data); 2331 } 2332 memset(s, 0, sizeof(dpaa_sec_session)); 2333 } 2334 2335 static int 2336 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2337 struct rte_crypto_sym_xform *xform, void *sess) 2338 { 2339 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2340 dpaa_sec_session *session = sess; 2341 uint32_t i; 2342 int ret; 2343 2344 PMD_INIT_FUNC_TRACE(); 2345 2346 if (unlikely(sess == NULL)) { 2347 DPAA_SEC_ERR("invalid session struct"); 2348 return -EINVAL; 2349 } 2350 memset(session, 0, sizeof(dpaa_sec_session)); 2351 2352 /* Default IV length = 0 */ 2353 session->iv.length = 0; 2354 2355 /* Cipher Only */ 2356 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2357 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2358 ret = dpaa_sec_cipher_init(dev, xform, session); 2359 2360 /* Authentication Only */ 2361 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2362 xform->next == NULL) { 2363 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2364 session->ctxt = DPAA_SEC_AUTH; 2365 ret = dpaa_sec_auth_init(dev, xform, session); 2366 2367 /* Cipher then Authenticate */ 2368 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2369 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2370 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2371 session->auth_cipher_text = 1; 2372 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2373 ret = dpaa_sec_auth_init(dev, xform, session); 2374 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2375 ret = dpaa_sec_cipher_init(dev, xform, session); 2376 else 2377 ret = dpaa_sec_chain_init(dev, xform, session); 2378 } else { 2379 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2380 return -ENOTSUP; 2381 } 2382 /* Authenticate then Cipher */ 2383 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2384 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2385 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2386 session->auth_cipher_text = 0; 2387 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2388 ret = dpaa_sec_cipher_init(dev, xform, session); 2389 else if (xform->next->cipher.algo 2390 == RTE_CRYPTO_CIPHER_NULL) 2391 ret = dpaa_sec_auth_init(dev, xform, session); 2392 else 2393 ret = dpaa_sec_chain_init(dev, xform, session); 2394 } else { 2395 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2396 return -ENOTSUP; 2397 } 2398 2399 /* AEAD operation for AES-GCM kind of Algorithms */ 2400 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2401 xform->next == NULL) { 2402 ret = dpaa_sec_aead_init(dev, xform, session); 2403 2404 } else { 2405 DPAA_SEC_ERR("Invalid crypto type"); 2406 return -EINVAL; 2407 } 2408 if (ret) { 2409 DPAA_SEC_ERR("unable to init session"); 2410 goto err1; 2411 } 2412 2413 rte_spinlock_lock(&internals->lock); 2414 for (i = 0; i < MAX_DPAA_CORES; i++) { 2415 session->inq[i] = dpaa_sec_attach_rxq(internals); 2416 if (session->inq[i] == NULL) { 2417 DPAA_SEC_ERR("unable to attach sec queue"); 2418 rte_spinlock_unlock(&internals->lock); 2419 ret = -EBUSY; 2420 goto err1; 2421 } 2422 } 2423 rte_spinlock_unlock(&internals->lock); 2424 2425 return 0; 2426 2427 err1: 2428 free_session_data(session); 2429 return ret; 2430 } 2431 2432 static int 2433 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2434 struct rte_crypto_sym_xform *xform, 2435 struct rte_cryptodev_sym_session *sess, 2436 struct rte_mempool *mempool) 2437 { 2438 void *sess_private_data; 2439 int ret; 2440 2441 PMD_INIT_FUNC_TRACE(); 2442 2443 if (rte_mempool_get(mempool, &sess_private_data)) { 2444 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2445 return -ENOMEM; 2446 } 2447 2448 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2449 if (ret != 0) { 2450 DPAA_SEC_ERR("failed to configure session parameters"); 2451 2452 /* Return session to mempool */ 2453 rte_mempool_put(mempool, sess_private_data); 2454 return ret; 2455 } 2456 2457 set_sym_session_private_data(sess, dev->driver_id, 2458 sess_private_data); 2459 2460 2461 return 0; 2462 } 2463 2464 static inline void 2465 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2466 { 2467 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2468 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s); 2469 uint8_t i; 2470 2471 for (i = 0; i < MAX_DPAA_CORES; i++) { 2472 if (s->inq[i]) 2473 dpaa_sec_detach_rxq(qi, s->inq[i]); 2474 s->inq[i] = NULL; 2475 s->qp[i] = NULL; 2476 } 2477 free_session_data(s); 2478 rte_mempool_put(sess_mp, (void *)s); 2479 } 2480 2481 /** Clear the memory of session so it doesn't leave key material behind */ 2482 static void 2483 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2484 struct rte_cryptodev_sym_session *sess) 2485 { 2486 PMD_INIT_FUNC_TRACE(); 2487 uint8_t index = dev->driver_id; 2488 void *sess_priv = get_sym_session_private_data(sess, index); 2489 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2490 2491 if (sess_priv) { 2492 free_session_memory(dev, s); 2493 set_sym_session_private_data(sess, index, NULL); 2494 } 2495 } 2496 2497 #ifdef RTE_LIB_SECURITY 2498 static int 2499 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2500 struct rte_security_ipsec_xform *ipsec_xform, 2501 dpaa_sec_session *session) 2502 { 2503 PMD_INIT_FUNC_TRACE(); 2504 2505 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2506 RTE_CACHE_LINE_SIZE); 2507 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2508 DPAA_SEC_ERR("No Memory for aead key"); 2509 return -ENOMEM; 2510 } 2511 memcpy(session->aead_key.data, aead_xform->key.data, 2512 aead_xform->key.length); 2513 2514 session->digest_length = aead_xform->digest_length; 2515 session->aead_key.length = aead_xform->key.length; 2516 2517 switch (aead_xform->algo) { 2518 case RTE_CRYPTO_AEAD_AES_GCM: 2519 switch (session->digest_length) { 2520 case 8: 2521 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2522 break; 2523 case 12: 2524 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2525 break; 2526 case 16: 2527 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2528 break; 2529 default: 2530 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2531 session->digest_length); 2532 return -EINVAL; 2533 } 2534 if (session->dir == DIR_ENC) { 2535 memcpy(session->encap_pdb.gcm.salt, 2536 (uint8_t *)&(ipsec_xform->salt), 4); 2537 } else { 2538 memcpy(session->decap_pdb.gcm.salt, 2539 (uint8_t *)&(ipsec_xform->salt), 4); 2540 } 2541 session->aead_key.algmode = OP_ALG_AAI_GCM; 2542 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2543 break; 2544 default: 2545 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2546 aead_xform->algo); 2547 return -ENOTSUP; 2548 } 2549 return 0; 2550 } 2551 2552 static int 2553 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2554 struct rte_crypto_auth_xform *auth_xform, 2555 struct rte_security_ipsec_xform *ipsec_xform, 2556 dpaa_sec_session *session) 2557 { 2558 if (cipher_xform) { 2559 session->cipher_key.data = rte_zmalloc(NULL, 2560 cipher_xform->key.length, 2561 RTE_CACHE_LINE_SIZE); 2562 if (session->cipher_key.data == NULL && 2563 cipher_xform->key.length > 0) { 2564 DPAA_SEC_ERR("No Memory for cipher key"); 2565 return -ENOMEM; 2566 } 2567 2568 session->cipher_key.length = cipher_xform->key.length; 2569 memcpy(session->cipher_key.data, cipher_xform->key.data, 2570 cipher_xform->key.length); 2571 session->cipher_alg = cipher_xform->algo; 2572 } else { 2573 session->cipher_key.data = NULL; 2574 session->cipher_key.length = 0; 2575 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2576 } 2577 2578 if (auth_xform) { 2579 session->auth_key.data = rte_zmalloc(NULL, 2580 auth_xform->key.length, 2581 RTE_CACHE_LINE_SIZE); 2582 if (session->auth_key.data == NULL && 2583 auth_xform->key.length > 0) { 2584 DPAA_SEC_ERR("No Memory for auth key"); 2585 return -ENOMEM; 2586 } 2587 session->auth_key.length = auth_xform->key.length; 2588 memcpy(session->auth_key.data, auth_xform->key.data, 2589 auth_xform->key.length); 2590 session->auth_alg = auth_xform->algo; 2591 session->digest_length = auth_xform->digest_length; 2592 } else { 2593 session->auth_key.data = NULL; 2594 session->auth_key.length = 0; 2595 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2596 } 2597 2598 switch (session->auth_alg) { 2599 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2600 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2601 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2602 break; 2603 case RTE_CRYPTO_AUTH_MD5_HMAC: 2604 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2605 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2606 break; 2607 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2608 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2609 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2610 if (session->digest_length != 16) 2611 DPAA_SEC_WARN( 2612 "+++Using sha256-hmac truncated len is non-standard," 2613 "it will not work with lookaside proto"); 2614 break; 2615 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2616 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2617 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2618 break; 2619 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2620 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2621 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2622 break; 2623 case RTE_CRYPTO_AUTH_AES_CMAC: 2624 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2625 break; 2626 case RTE_CRYPTO_AUTH_NULL: 2627 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2628 break; 2629 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2630 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2631 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2632 case RTE_CRYPTO_AUTH_SHA1: 2633 case RTE_CRYPTO_AUTH_SHA256: 2634 case RTE_CRYPTO_AUTH_SHA512: 2635 case RTE_CRYPTO_AUTH_SHA224: 2636 case RTE_CRYPTO_AUTH_SHA384: 2637 case RTE_CRYPTO_AUTH_MD5: 2638 case RTE_CRYPTO_AUTH_AES_GMAC: 2639 case RTE_CRYPTO_AUTH_KASUMI_F9: 2640 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2641 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2642 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2643 session->auth_alg); 2644 return -ENOTSUP; 2645 default: 2646 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", 2647 session->auth_alg); 2648 return -ENOTSUP; 2649 } 2650 2651 switch (session->cipher_alg) { 2652 case RTE_CRYPTO_CIPHER_AES_CBC: 2653 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2654 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2655 break; 2656 case RTE_CRYPTO_CIPHER_3DES_CBC: 2657 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2658 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2659 break; 2660 case RTE_CRYPTO_CIPHER_AES_CTR: 2661 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2662 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2663 if (session->dir == DIR_ENC) { 2664 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2665 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2666 } else { 2667 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2668 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2669 } 2670 break; 2671 case RTE_CRYPTO_CIPHER_NULL: 2672 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2673 break; 2674 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2675 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2676 case RTE_CRYPTO_CIPHER_3DES_ECB: 2677 case RTE_CRYPTO_CIPHER_AES_ECB: 2678 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2679 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2680 session->cipher_alg); 2681 return -ENOTSUP; 2682 default: 2683 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2684 session->cipher_alg); 2685 return -ENOTSUP; 2686 } 2687 2688 return 0; 2689 } 2690 2691 static int 2692 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2693 struct rte_security_session_conf *conf, 2694 void *sess) 2695 { 2696 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2697 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2698 struct rte_crypto_auth_xform *auth_xform = NULL; 2699 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2700 struct rte_crypto_aead_xform *aead_xform = NULL; 2701 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2702 uint32_t i; 2703 int ret; 2704 2705 PMD_INIT_FUNC_TRACE(); 2706 2707 memset(session, 0, sizeof(dpaa_sec_session)); 2708 session->proto_alg = conf->protocol; 2709 session->ctxt = DPAA_SEC_IPSEC; 2710 2711 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2712 session->dir = DIR_ENC; 2713 else 2714 session->dir = DIR_DEC; 2715 2716 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2717 cipher_xform = &conf->crypto_xform->cipher; 2718 if (conf->crypto_xform->next) 2719 auth_xform = &conf->crypto_xform->next->auth; 2720 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2721 ipsec_xform, session); 2722 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2723 auth_xform = &conf->crypto_xform->auth; 2724 if (conf->crypto_xform->next) 2725 cipher_xform = &conf->crypto_xform->next->cipher; 2726 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2727 ipsec_xform, session); 2728 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2729 aead_xform = &conf->crypto_xform->aead; 2730 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2731 ipsec_xform, session); 2732 } else { 2733 DPAA_SEC_ERR("XFORM not specified"); 2734 ret = -EINVAL; 2735 goto out; 2736 } 2737 if (ret) { 2738 DPAA_SEC_ERR("Failed to process xform"); 2739 goto out; 2740 } 2741 2742 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2743 if (ipsec_xform->tunnel.type == 2744 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2745 session->ip4_hdr.ip_v = IPVERSION; 2746 session->ip4_hdr.ip_hl = 5; 2747 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2748 sizeof(session->ip4_hdr)); 2749 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2750 session->ip4_hdr.ip_id = 0; 2751 session->ip4_hdr.ip_off = 0; 2752 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2753 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2754 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2755 IPPROTO_ESP : IPPROTO_AH; 2756 session->ip4_hdr.ip_sum = 0; 2757 session->ip4_hdr.ip_src = 2758 ipsec_xform->tunnel.ipv4.src_ip; 2759 session->ip4_hdr.ip_dst = 2760 ipsec_xform->tunnel.ipv4.dst_ip; 2761 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2762 (void *)&session->ip4_hdr, 2763 sizeof(struct ip)); 2764 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2765 } else if (ipsec_xform->tunnel.type == 2766 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2767 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2768 DPAA_IPv6_DEFAULT_VTC_FLOW | 2769 ((ipsec_xform->tunnel.ipv6.dscp << 2770 RTE_IPV6_HDR_TC_SHIFT) & 2771 RTE_IPV6_HDR_TC_MASK) | 2772 ((ipsec_xform->tunnel.ipv6.flabel << 2773 RTE_IPV6_HDR_FL_SHIFT) & 2774 RTE_IPV6_HDR_FL_MASK)); 2775 /* Payload length will be updated by HW */ 2776 session->ip6_hdr.payload_len = 0; 2777 session->ip6_hdr.hop_limits = 2778 ipsec_xform->tunnel.ipv6.hlimit; 2779 session->ip6_hdr.proto = (ipsec_xform->proto == 2780 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2781 IPPROTO_ESP : IPPROTO_AH; 2782 memcpy(&session->ip6_hdr.src_addr, 2783 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2784 memcpy(&session->ip6_hdr.dst_addr, 2785 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2786 session->encap_pdb.ip_hdr_len = 2787 sizeof(struct rte_ipv6_hdr); 2788 } 2789 session->encap_pdb.options = 2790 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2791 PDBOPTS_ESP_OIHI_PDB_INL | 2792 PDBOPTS_ESP_IVSRC | 2793 PDBHMO_ESP_ENCAP_DTTL | 2794 PDBHMO_ESP_SNR; 2795 if (ipsec_xform->options.esn) 2796 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 2797 session->encap_pdb.spi = ipsec_xform->spi; 2798 2799 } else if (ipsec_xform->direction == 2800 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2801 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 2802 session->decap_pdb.options = sizeof(struct ip) << 16; 2803 else 2804 session->decap_pdb.options = 2805 sizeof(struct rte_ipv6_hdr) << 16; 2806 if (ipsec_xform->options.esn) 2807 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 2808 if (ipsec_xform->replay_win_sz) { 2809 uint32_t win_sz; 2810 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2811 2812 switch (win_sz) { 2813 case 1: 2814 case 2: 2815 case 4: 2816 case 8: 2817 case 16: 2818 case 32: 2819 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 2820 break; 2821 case 64: 2822 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 2823 break; 2824 default: 2825 session->decap_pdb.options |= 2826 PDBOPTS_ESP_ARS128; 2827 } 2828 } 2829 } else 2830 goto out; 2831 rte_spinlock_lock(&internals->lock); 2832 for (i = 0; i < MAX_DPAA_CORES; i++) { 2833 session->inq[i] = dpaa_sec_attach_rxq(internals); 2834 if (session->inq[i] == NULL) { 2835 DPAA_SEC_ERR("unable to attach sec queue"); 2836 rte_spinlock_unlock(&internals->lock); 2837 goto out; 2838 } 2839 } 2840 rte_spinlock_unlock(&internals->lock); 2841 2842 return 0; 2843 out: 2844 free_session_data(session); 2845 return -1; 2846 } 2847 2848 static int 2849 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 2850 struct rte_security_session_conf *conf, 2851 void *sess) 2852 { 2853 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2854 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2855 struct rte_crypto_auth_xform *auth_xform = NULL; 2856 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2857 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2858 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 2859 uint32_t i; 2860 int ret; 2861 2862 PMD_INIT_FUNC_TRACE(); 2863 2864 memset(session, 0, sizeof(dpaa_sec_session)); 2865 2866 /* find xfrm types */ 2867 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2868 cipher_xform = &xform->cipher; 2869 if (xform->next != NULL) 2870 auth_xform = &xform->next->auth; 2871 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2872 auth_xform = &xform->auth; 2873 if (xform->next != NULL) 2874 cipher_xform = &xform->next->cipher; 2875 } else { 2876 DPAA_SEC_ERR("Invalid crypto type"); 2877 return -EINVAL; 2878 } 2879 2880 session->proto_alg = conf->protocol; 2881 session->ctxt = DPAA_SEC_PDCP; 2882 2883 if (cipher_xform) { 2884 switch (cipher_xform->algo) { 2885 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2886 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 2887 break; 2888 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2889 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 2890 break; 2891 case RTE_CRYPTO_CIPHER_AES_CTR: 2892 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 2893 break; 2894 case RTE_CRYPTO_CIPHER_NULL: 2895 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 2896 break; 2897 default: 2898 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2899 session->cipher_alg); 2900 return -EINVAL; 2901 } 2902 2903 session->cipher_key.data = rte_zmalloc(NULL, 2904 cipher_xform->key.length, 2905 RTE_CACHE_LINE_SIZE); 2906 if (session->cipher_key.data == NULL && 2907 cipher_xform->key.length > 0) { 2908 DPAA_SEC_ERR("No Memory for cipher key"); 2909 return -ENOMEM; 2910 } 2911 session->cipher_key.length = cipher_xform->key.length; 2912 memcpy(session->cipher_key.data, cipher_xform->key.data, 2913 cipher_xform->key.length); 2914 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2915 DIR_ENC : DIR_DEC; 2916 session->cipher_alg = cipher_xform->algo; 2917 } else { 2918 session->cipher_key.data = NULL; 2919 session->cipher_key.length = 0; 2920 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2921 session->dir = DIR_ENC; 2922 } 2923 2924 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2925 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 2926 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 2927 DPAA_SEC_ERR( 2928 "PDCP Seq Num size should be 5/12 bits for cmode"); 2929 ret = -EINVAL; 2930 goto out; 2931 } 2932 } 2933 2934 if (auth_xform) { 2935 switch (auth_xform->algo) { 2936 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2937 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 2938 break; 2939 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2940 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 2941 break; 2942 case RTE_CRYPTO_AUTH_AES_CMAC: 2943 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 2944 break; 2945 case RTE_CRYPTO_AUTH_NULL: 2946 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 2947 break; 2948 default: 2949 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2950 session->auth_alg); 2951 rte_free(session->cipher_key.data); 2952 return -EINVAL; 2953 } 2954 session->auth_key.data = rte_zmalloc(NULL, 2955 auth_xform->key.length, 2956 RTE_CACHE_LINE_SIZE); 2957 if (!session->auth_key.data && 2958 auth_xform->key.length > 0) { 2959 DPAA_SEC_ERR("No Memory for auth key"); 2960 rte_free(session->cipher_key.data); 2961 return -ENOMEM; 2962 } 2963 session->auth_key.length = auth_xform->key.length; 2964 memcpy(session->auth_key.data, auth_xform->key.data, 2965 auth_xform->key.length); 2966 session->auth_alg = auth_xform->algo; 2967 } else { 2968 session->auth_key.data = NULL; 2969 session->auth_key.length = 0; 2970 session->auth_alg = 0; 2971 } 2972 session->pdcp.domain = pdcp_xform->domain; 2973 session->pdcp.bearer = pdcp_xform->bearer; 2974 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2975 session->pdcp.sn_size = pdcp_xform->sn_size; 2976 session->pdcp.hfn = pdcp_xform->hfn; 2977 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2978 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 2979 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled; 2980 if (cipher_xform) 2981 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 2982 2983 rte_spinlock_lock(&dev_priv->lock); 2984 for (i = 0; i < MAX_DPAA_CORES; i++) { 2985 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 2986 if (session->inq[i] == NULL) { 2987 DPAA_SEC_ERR("unable to attach sec queue"); 2988 rte_spinlock_unlock(&dev_priv->lock); 2989 ret = -EBUSY; 2990 goto out; 2991 } 2992 } 2993 rte_spinlock_unlock(&dev_priv->lock); 2994 return 0; 2995 out: 2996 rte_free(session->auth_key.data); 2997 rte_free(session->cipher_key.data); 2998 memset(session, 0, sizeof(dpaa_sec_session)); 2999 return ret; 3000 } 3001 3002 static int 3003 dpaa_sec_security_session_create(void *dev, 3004 struct rte_security_session_conf *conf, 3005 struct rte_security_session *sess, 3006 struct rte_mempool *mempool) 3007 { 3008 void *sess_private_data; 3009 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3010 int ret; 3011 3012 if (rte_mempool_get(mempool, &sess_private_data)) { 3013 DPAA_SEC_ERR("Couldn't get object from session mempool"); 3014 return -ENOMEM; 3015 } 3016 3017 switch (conf->protocol) { 3018 case RTE_SECURITY_PROTOCOL_IPSEC: 3019 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3020 sess_private_data); 3021 break; 3022 case RTE_SECURITY_PROTOCOL_PDCP: 3023 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3024 sess_private_data); 3025 break; 3026 case RTE_SECURITY_PROTOCOL_MACSEC: 3027 return -ENOTSUP; 3028 default: 3029 return -EINVAL; 3030 } 3031 if (ret != 0) { 3032 DPAA_SEC_ERR("failed to configure session parameters"); 3033 /* Return session to mempool */ 3034 rte_mempool_put(mempool, sess_private_data); 3035 return ret; 3036 } 3037 3038 set_sec_session_private_data(sess, sess_private_data); 3039 3040 return ret; 3041 } 3042 3043 /** Clear the memory of session so it doesn't leave key material behind */ 3044 static int 3045 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3046 struct rte_security_session *sess) 3047 { 3048 PMD_INIT_FUNC_TRACE(); 3049 void *sess_priv = get_sec_session_private_data(sess); 3050 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3051 3052 if (sess_priv) { 3053 free_session_memory((struct rte_cryptodev *)dev, s); 3054 set_sec_session_private_data(sess, NULL); 3055 } 3056 return 0; 3057 } 3058 #endif 3059 static int 3060 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3061 struct rte_cryptodev_config *config __rte_unused) 3062 { 3063 PMD_INIT_FUNC_TRACE(); 3064 3065 return 0; 3066 } 3067 3068 static int 3069 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3070 { 3071 PMD_INIT_FUNC_TRACE(); 3072 return 0; 3073 } 3074 3075 static void 3076 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3077 { 3078 PMD_INIT_FUNC_TRACE(); 3079 } 3080 3081 static int 3082 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3083 { 3084 PMD_INIT_FUNC_TRACE(); 3085 3086 if (dev == NULL) 3087 return -ENOMEM; 3088 3089 return 0; 3090 } 3091 3092 static void 3093 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3094 struct rte_cryptodev_info *info) 3095 { 3096 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3097 3098 PMD_INIT_FUNC_TRACE(); 3099 if (info != NULL) { 3100 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3101 info->feature_flags = dev->feature_flags; 3102 info->capabilities = dpaa_sec_capabilities; 3103 info->sym.max_nb_sessions = internals->max_nb_sessions; 3104 info->driver_id = cryptodev_driver_id; 3105 } 3106 } 3107 3108 static enum qman_cb_dqrr_result 3109 dpaa_sec_process_parallel_event(void *event, 3110 struct qman_portal *qm __always_unused, 3111 struct qman_fq *outq, 3112 const struct qm_dqrr_entry *dqrr, 3113 void **bufs) 3114 { 3115 const struct qm_fd *fd; 3116 struct dpaa_sec_job *job; 3117 struct dpaa_sec_op_ctx *ctx; 3118 struct rte_event *ev = (struct rte_event *)event; 3119 3120 fd = &dqrr->fd; 3121 3122 /* sg is embedded in an op ctx, 3123 * sg[0] is for output 3124 * sg[1] for input 3125 */ 3126 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3127 3128 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3129 ctx->fd_status = fd->status; 3130 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3131 struct qm_sg_entry *sg_out; 3132 uint32_t len; 3133 3134 sg_out = &job->sg[0]; 3135 hw_sg_to_cpu(sg_out); 3136 len = sg_out->length; 3137 ctx->op->sym->m_src->pkt_len = len; 3138 ctx->op->sym->m_src->data_len = len; 3139 } 3140 if (!ctx->fd_status) { 3141 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3142 } else { 3143 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3144 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3145 } 3146 ev->event_ptr = (void *)ctx->op; 3147 3148 ev->flow_id = outq->ev.flow_id; 3149 ev->sub_event_type = outq->ev.sub_event_type; 3150 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3151 ev->op = RTE_EVENT_OP_NEW; 3152 ev->sched_type = outq->ev.sched_type; 3153 ev->queue_id = outq->ev.queue_id; 3154 ev->priority = outq->ev.priority; 3155 *bufs = (void *)ctx->op; 3156 3157 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3158 3159 return qman_cb_dqrr_consume; 3160 } 3161 3162 static enum qman_cb_dqrr_result 3163 dpaa_sec_process_atomic_event(void *event, 3164 struct qman_portal *qm __rte_unused, 3165 struct qman_fq *outq, 3166 const struct qm_dqrr_entry *dqrr, 3167 void **bufs) 3168 { 3169 u8 index; 3170 const struct qm_fd *fd; 3171 struct dpaa_sec_job *job; 3172 struct dpaa_sec_op_ctx *ctx; 3173 struct rte_event *ev = (struct rte_event *)event; 3174 3175 fd = &dqrr->fd; 3176 3177 /* sg is embedded in an op ctx, 3178 * sg[0] is for output 3179 * sg[1] for input 3180 */ 3181 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3182 3183 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3184 ctx->fd_status = fd->status; 3185 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3186 struct qm_sg_entry *sg_out; 3187 uint32_t len; 3188 3189 sg_out = &job->sg[0]; 3190 hw_sg_to_cpu(sg_out); 3191 len = sg_out->length; 3192 ctx->op->sym->m_src->pkt_len = len; 3193 ctx->op->sym->m_src->data_len = len; 3194 } 3195 if (!ctx->fd_status) { 3196 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3197 } else { 3198 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3199 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3200 } 3201 ev->event_ptr = (void *)ctx->op; 3202 ev->flow_id = outq->ev.flow_id; 3203 ev->sub_event_type = outq->ev.sub_event_type; 3204 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3205 ev->op = RTE_EVENT_OP_NEW; 3206 ev->sched_type = outq->ev.sched_type; 3207 ev->queue_id = outq->ev.queue_id; 3208 ev->priority = outq->ev.priority; 3209 3210 /* Save active dqrr entries */ 3211 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3212 DPAA_PER_LCORE_DQRR_SIZE++; 3213 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3214 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3215 ev->impl_opaque = index + 1; 3216 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1; 3217 *bufs = (void *)ctx->op; 3218 3219 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3220 3221 return qman_cb_dqrr_defer; 3222 } 3223 3224 int 3225 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3226 int qp_id, 3227 uint16_t ch_id, 3228 const struct rte_event *event) 3229 { 3230 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3231 struct qm_mcc_initfq opts = {0}; 3232 3233 int ret; 3234 3235 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3236 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3237 opts.fqd.dest.channel = ch_id; 3238 3239 switch (event->sched_type) { 3240 case RTE_SCHED_TYPE_ATOMIC: 3241 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3242 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3243 * configuration with HOLD_ACTIVE setting 3244 */ 3245 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3246 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3247 break; 3248 case RTE_SCHED_TYPE_ORDERED: 3249 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3250 return -ENOTSUP; 3251 default: 3252 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3253 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3254 break; 3255 } 3256 3257 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3258 if (unlikely(ret)) { 3259 DPAA_SEC_ERR("unable to init caam source fq!"); 3260 return ret; 3261 } 3262 3263 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3264 3265 return 0; 3266 } 3267 3268 int 3269 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3270 int qp_id) 3271 { 3272 struct qm_mcc_initfq opts = {0}; 3273 int ret; 3274 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3275 3276 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3277 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3278 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3279 qp->outq.cb.ern = ern_sec_fq_handler; 3280 qman_retire_fq(&qp->outq, NULL); 3281 qman_oos_fq(&qp->outq); 3282 ret = qman_init_fq(&qp->outq, 0, &opts); 3283 if (ret) 3284 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret); 3285 qp->outq.cb.dqrr = NULL; 3286 3287 return ret; 3288 } 3289 3290 static struct rte_cryptodev_ops crypto_ops = { 3291 .dev_configure = dpaa_sec_dev_configure, 3292 .dev_start = dpaa_sec_dev_start, 3293 .dev_stop = dpaa_sec_dev_stop, 3294 .dev_close = dpaa_sec_dev_close, 3295 .dev_infos_get = dpaa_sec_dev_infos_get, 3296 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3297 .queue_pair_release = dpaa_sec_queue_pair_release, 3298 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3299 .sym_session_configure = dpaa_sec_sym_session_configure, 3300 .sym_session_clear = dpaa_sec_sym_session_clear 3301 }; 3302 3303 #ifdef RTE_LIB_SECURITY 3304 static const struct rte_security_capability * 3305 dpaa_sec_capabilities_get(void *device __rte_unused) 3306 { 3307 return dpaa_sec_security_cap; 3308 } 3309 3310 static const struct rte_security_ops dpaa_sec_security_ops = { 3311 .session_create = dpaa_sec_security_session_create, 3312 .session_update = NULL, 3313 .session_stats_get = NULL, 3314 .session_destroy = dpaa_sec_security_session_destroy, 3315 .set_pkt_metadata = NULL, 3316 .capabilities_get = dpaa_sec_capabilities_get 3317 }; 3318 #endif 3319 static int 3320 dpaa_sec_uninit(struct rte_cryptodev *dev) 3321 { 3322 struct dpaa_sec_dev_private *internals; 3323 3324 if (dev == NULL) 3325 return -ENODEV; 3326 3327 internals = dev->data->dev_private; 3328 rte_free(dev->security_ctx); 3329 3330 rte_free(internals); 3331 3332 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3333 dev->data->name, rte_socket_id()); 3334 3335 return 0; 3336 } 3337 3338 static int 3339 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3340 { 3341 struct dpaa_sec_dev_private *internals; 3342 #ifdef RTE_LIB_SECURITY 3343 struct rte_security_ctx *security_instance; 3344 #endif 3345 struct dpaa_sec_qp *qp; 3346 uint32_t i, flags; 3347 int ret; 3348 3349 PMD_INIT_FUNC_TRACE(); 3350 3351 cryptodev->driver_id = cryptodev_driver_id; 3352 cryptodev->dev_ops = &crypto_ops; 3353 3354 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3355 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3356 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3357 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3358 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3359 RTE_CRYPTODEV_FF_SECURITY | 3360 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3361 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3362 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3363 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3364 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3365 3366 internals = cryptodev->data->dev_private; 3367 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3368 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3369 3370 /* 3371 * For secondary processes, we don't initialise any further as primary 3372 * has already done this work. Only check we don't need a different 3373 * RX function 3374 */ 3375 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3376 DPAA_SEC_WARN("Device already init by primary process"); 3377 return 0; 3378 } 3379 #ifdef RTE_LIB_SECURITY 3380 /* Initialize security_ctx only for primary process*/ 3381 security_instance = rte_malloc("rte_security_instances_ops", 3382 sizeof(struct rte_security_ctx), 0); 3383 if (security_instance == NULL) 3384 return -ENOMEM; 3385 security_instance->device = (void *)cryptodev; 3386 security_instance->ops = &dpaa_sec_security_ops; 3387 security_instance->sess_cnt = 0; 3388 cryptodev->security_ctx = security_instance; 3389 #endif 3390 rte_spinlock_init(&internals->lock); 3391 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3392 /* init qman fq for queue pair */ 3393 qp = &internals->qps[i]; 3394 ret = dpaa_sec_init_tx(&qp->outq); 3395 if (ret) { 3396 DPAA_SEC_ERR("config tx of queue pair %d", i); 3397 goto init_error; 3398 } 3399 } 3400 3401 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3402 QMAN_FQ_FLAG_TO_DCPORTAL; 3403 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3404 /* create rx qman fq for sessions*/ 3405 ret = qman_create_fq(0, flags, &internals->inq[i]); 3406 if (unlikely(ret != 0)) { 3407 DPAA_SEC_ERR("sec qman_create_fq failed"); 3408 goto init_error; 3409 } 3410 } 3411 3412 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); 3413 return 0; 3414 3415 init_error: 3416 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3417 3418 rte_free(cryptodev->security_ctx); 3419 return -EFAULT; 3420 } 3421 3422 static int 3423 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3424 struct rte_dpaa_device *dpaa_dev) 3425 { 3426 struct rte_cryptodev *cryptodev; 3427 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3428 3429 int retval; 3430 3431 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3432 3433 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3434 if (cryptodev == NULL) 3435 return -ENOMEM; 3436 3437 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3438 cryptodev->data->dev_private = rte_zmalloc_socket( 3439 "cryptodev private structure", 3440 sizeof(struct dpaa_sec_dev_private), 3441 RTE_CACHE_LINE_SIZE, 3442 rte_socket_id()); 3443 3444 if (cryptodev->data->dev_private == NULL) 3445 rte_panic("Cannot allocate memzone for private " 3446 "device data"); 3447 } 3448 3449 dpaa_dev->crypto_dev = cryptodev; 3450 cryptodev->device = &dpaa_dev->device; 3451 3452 /* init user callbacks */ 3453 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3454 3455 /* if sec device version is not configured */ 3456 if (!rta_get_sec_era()) { 3457 const struct device_node *caam_node; 3458 3459 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3460 const uint32_t *prop = of_get_property(caam_node, 3461 "fsl,sec-era", 3462 NULL); 3463 if (prop) { 3464 rta_set_sec_era( 3465 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3466 break; 3467 } 3468 } 3469 } 3470 3471 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 3472 retval = rte_dpaa_portal_init((void *)1); 3473 if (retval) { 3474 DPAA_SEC_ERR("Unable to initialize portal"); 3475 goto out; 3476 } 3477 } 3478 3479 /* Invoke PMD device initialization function */ 3480 retval = dpaa_sec_dev_init(cryptodev); 3481 if (retval == 0) 3482 return 0; 3483 3484 retval = -ENXIO; 3485 out: 3486 /* In case of error, cleanup is done */ 3487 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3488 rte_free(cryptodev->data->dev_private); 3489 3490 rte_cryptodev_pmd_release_device(cryptodev); 3491 3492 return retval; 3493 } 3494 3495 static int 3496 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3497 { 3498 struct rte_cryptodev *cryptodev; 3499 int ret; 3500 3501 cryptodev = dpaa_dev->crypto_dev; 3502 if (cryptodev == NULL) 3503 return -ENODEV; 3504 3505 ret = dpaa_sec_uninit(cryptodev); 3506 if (ret) 3507 return ret; 3508 3509 return rte_cryptodev_pmd_destroy(cryptodev); 3510 } 3511 3512 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3513 .drv_type = FSL_DPAA_CRYPTO, 3514 .driver = { 3515 .name = "DPAA SEC PMD" 3516 }, 3517 .probe = cryptodev_dpaa_sec_probe, 3518 .remove = cryptodev_dpaa_sec_remove, 3519 }; 3520 3521 static struct cryptodev_driver dpaa_sec_crypto_drv; 3522 3523 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3524 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3525 cryptodev_driver_id); 3526 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE); 3527