1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2019 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #ifdef RTE_LIBRTE_SECURITY 19 #include <rte_security_driver.h> 20 #endif 21 #include <rte_cycles.h> 22 #include <rte_dev.h> 23 #include <rte_kvargs.h> 24 #include <rte_malloc.h> 25 #include <rte_mbuf.h> 26 #include <rte_memcpy.h> 27 #include <rte_string_fns.h> 28 #include <rte_spinlock.h> 29 30 #include <fsl_usd.h> 31 #include <fsl_qman.h> 32 #include <of.h> 33 34 /* RTA header files */ 35 #include <desc/common.h> 36 #include <desc/algo.h> 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 40 #include <rte_dpaa_bus.h> 41 #include <dpaa_sec.h> 42 #include <dpaa_sec_event.h> 43 #include <dpaa_sec_log.h> 44 #include <dpaax_iova_table.h> 45 46 enum rta_sec_era rta_sec_era; 47 48 int dpaa_logtype_sec; 49 50 static uint8_t cryptodev_driver_id; 51 52 static __thread struct rte_crypto_op **dpaa_sec_ops; 53 static __thread int dpaa_sec_op_nb; 54 55 static int 56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 57 58 static inline void 59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 60 { 61 if (!ctx->fd_status) { 62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 63 } else { 64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 66 } 67 } 68 69 static inline struct dpaa_sec_op_ctx * 70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 71 { 72 struct dpaa_sec_op_ctx *ctx; 73 int i, retval; 74 75 retval = rte_mempool_get( 76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 77 (void **)(&ctx)); 78 if (!ctx || retval) { 79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 80 return NULL; 81 } 82 /* 83 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 86 * each packet, memset is costlier than dcbz_64(). 87 */ 88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 89 dcbz_64(&ctx->job.sg[i]); 90 91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 93 94 return ctx; 95 } 96 97 static inline rte_iova_t 98 dpaa_mem_vtop(void *vaddr) 99 { 100 const struct rte_memseg *ms; 101 102 ms = rte_mem_virt2memseg(vaddr, NULL); 103 if (ms) { 104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len); 105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr); 106 } 107 return (size_t)NULL; 108 } 109 110 static inline void * 111 dpaa_mem_ptov(rte_iova_t paddr) 112 { 113 void *va; 114 115 va = (void *)dpaax_iova_table_get_va(paddr); 116 if (likely(va)) 117 return va; 118 119 return rte_mem_iova2virt(paddr); 120 } 121 122 static void 123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 124 struct qman_fq *fq, 125 const struct qm_mr_entry *msg) 126 { 127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 128 fq->fqid, msg->ern.rc, msg->ern.seqnum); 129 } 130 131 /* initialize the queue with dest chan as caam chan so that 132 * all the packets in this queue could be dispatched into caam 133 */ 134 static int 135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 136 uint32_t fqid_out) 137 { 138 struct qm_mcc_initfq fq_opts; 139 uint32_t flags; 140 int ret = -1; 141 142 /* Clear FQ options */ 143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 144 145 flags = QMAN_INITFQ_FLAG_SCHED; 146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 147 QM_INITFQ_WE_CONTEXTB; 148 149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 150 fq_opts.fqd.context_b = fqid_out; 151 fq_opts.fqd.dest.channel = qm_channel_caam; 152 fq_opts.fqd.dest.wq = 0; 153 154 fq_in->cb.ern = ern_sec_fq_handler; 155 156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 157 158 ret = qman_init_fq(fq_in, flags, &fq_opts); 159 if (unlikely(ret != 0)) 160 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 161 162 return ret; 163 } 164 165 /* something is put into in_fq and caam put the crypto result into out_fq */ 166 static enum qman_cb_dqrr_result 167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 168 struct qman_fq *fq __always_unused, 169 const struct qm_dqrr_entry *dqrr) 170 { 171 const struct qm_fd *fd; 172 struct dpaa_sec_job *job; 173 struct dpaa_sec_op_ctx *ctx; 174 175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST) 176 return qman_cb_dqrr_defer; 177 178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 179 return qman_cb_dqrr_consume; 180 181 fd = &dqrr->fd; 182 /* sg is embedded in an op ctx, 183 * sg[0] is for output 184 * sg[1] for input 185 */ 186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); 187 188 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 189 ctx->fd_status = fd->status; 190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 191 struct qm_sg_entry *sg_out; 192 uint32_t len; 193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 194 ctx->op->sym->m_src : ctx->op->sym->m_dst; 195 196 sg_out = &job->sg[0]; 197 hw_sg_to_cpu(sg_out); 198 len = sg_out->length; 199 mbuf->pkt_len = len; 200 while (mbuf->next != NULL) { 201 len -= mbuf->data_len; 202 mbuf = mbuf->next; 203 } 204 mbuf->data_len = len; 205 } 206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op; 207 dpaa_sec_op_ending(ctx); 208 209 return qman_cb_dqrr_consume; 210 } 211 212 /* caam result is put into this queue */ 213 static int 214 dpaa_sec_init_tx(struct qman_fq *fq) 215 { 216 int ret; 217 struct qm_mcc_initfq opts; 218 uint32_t flags; 219 220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 221 QMAN_FQ_FLAG_DYNAMIC_FQID; 222 223 ret = qman_create_fq(0, flags, fq); 224 if (unlikely(ret)) { 225 DPAA_SEC_ERR("qman_create_fq failed"); 226 return ret; 227 } 228 229 memset(&opts, 0, sizeof(opts)); 230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 232 233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 234 235 fq->cb.dqrr = dqrr_out_fq_cb_rx; 236 fq->cb.ern = ern_sec_fq_handler; 237 238 ret = qman_init_fq(fq, 0, &opts); 239 if (unlikely(ret)) { 240 DPAA_SEC_ERR("unable to init caam source fq!"); 241 return ret; 242 } 243 244 return ret; 245 } 246 247 static inline int is_encode(dpaa_sec_session *ses) 248 { 249 return ses->dir == DIR_ENC; 250 } 251 252 static inline int is_decode(dpaa_sec_session *ses) 253 { 254 return ses->dir == DIR_DEC; 255 } 256 257 #ifdef RTE_LIBRTE_SECURITY 258 static int 259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 260 { 261 struct alginfo authdata = {0}, cipherdata = {0}; 262 struct sec_cdb *cdb = &ses->cdb; 263 struct alginfo *p_authdata = NULL; 264 int32_t shared_desc_len = 0; 265 int err; 266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 267 int swap = false; 268 #else 269 int swap = true; 270 #endif 271 272 cipherdata.key = (size_t)ses->cipher_key.data; 273 cipherdata.keylen = ses->cipher_key.length; 274 cipherdata.key_enc_flags = 0; 275 cipherdata.key_type = RTA_DATA_IMM; 276 cipherdata.algtype = ses->cipher_key.alg; 277 cipherdata.algmode = ses->cipher_key.algmode; 278 279 cdb->sh_desc[0] = cipherdata.keylen; 280 cdb->sh_desc[1] = 0; 281 cdb->sh_desc[2] = 0; 282 283 if (ses->auth_alg) { 284 authdata.key = (size_t)ses->auth_key.data; 285 authdata.keylen = ses->auth_key.length; 286 authdata.key_enc_flags = 0; 287 authdata.key_type = RTA_DATA_IMM; 288 authdata.algtype = ses->auth_key.alg; 289 authdata.algmode = ses->auth_key.algmode; 290 291 p_authdata = &authdata; 292 293 cdb->sh_desc[1] = authdata.keylen; 294 } 295 296 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 297 MIN_JOB_DESC_SIZE, 298 (unsigned int *)cdb->sh_desc, 299 &cdb->sh_desc[2], 2); 300 if (err < 0) { 301 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 302 return err; 303 } 304 305 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { 306 cipherdata.key = 307 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key); 308 cipherdata.key_type = RTA_DATA_PTR; 309 } 310 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) { 311 authdata.key = 312 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key); 313 authdata.key_type = RTA_DATA_PTR; 314 } 315 316 cdb->sh_desc[0] = 0; 317 cdb->sh_desc[1] = 0; 318 cdb->sh_desc[2] = 0; 319 320 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 321 if (ses->dir == DIR_ENC) 322 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 323 cdb->sh_desc, 1, swap, 324 ses->pdcp.hfn, 325 ses->pdcp.sn_size, 326 ses->pdcp.bearer, 327 ses->pdcp.pkt_dir, 328 ses->pdcp.hfn_threshold, 329 &cipherdata, &authdata, 330 0); 331 else if (ses->dir == DIR_DEC) 332 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 333 cdb->sh_desc, 1, swap, 334 ses->pdcp.hfn, 335 ses->pdcp.sn_size, 336 ses->pdcp.bearer, 337 ses->pdcp.pkt_dir, 338 ses->pdcp.hfn_threshold, 339 &cipherdata, &authdata, 340 0); 341 } else { 342 if (ses->dir == DIR_ENC) 343 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap( 344 cdb->sh_desc, 1, swap, 345 ses->pdcp.sn_size, 346 ses->pdcp.hfn, 347 ses->pdcp.bearer, 348 ses->pdcp.pkt_dir, 349 ses->pdcp.hfn_threshold, 350 &cipherdata, p_authdata, 0); 351 else if (ses->dir == DIR_DEC) 352 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap( 353 cdb->sh_desc, 1, swap, 354 ses->pdcp.sn_size, 355 ses->pdcp.hfn, 356 ses->pdcp.bearer, 357 ses->pdcp.pkt_dir, 358 ses->pdcp.hfn_threshold, 359 &cipherdata, p_authdata, 0); 360 } 361 return shared_desc_len; 362 } 363 364 /* prepare ipsec proto command block of the session */ 365 static int 366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 367 { 368 struct alginfo cipherdata = {0}, authdata = {0}; 369 struct sec_cdb *cdb = &ses->cdb; 370 int32_t shared_desc_len = 0; 371 int err; 372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 373 int swap = false; 374 #else 375 int swap = true; 376 #endif 377 378 cipherdata.key = (size_t)ses->cipher_key.data; 379 cipherdata.keylen = ses->cipher_key.length; 380 cipherdata.key_enc_flags = 0; 381 cipherdata.key_type = RTA_DATA_IMM; 382 cipherdata.algtype = ses->cipher_key.alg; 383 cipherdata.algmode = ses->cipher_key.algmode; 384 385 authdata.key = (size_t)ses->auth_key.data; 386 authdata.keylen = ses->auth_key.length; 387 authdata.key_enc_flags = 0; 388 authdata.key_type = RTA_DATA_IMM; 389 authdata.algtype = ses->auth_key.alg; 390 authdata.algmode = ses->auth_key.algmode; 391 392 cdb->sh_desc[0] = cipherdata.keylen; 393 cdb->sh_desc[1] = authdata.keylen; 394 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 395 MIN_JOB_DESC_SIZE, 396 (unsigned int *)cdb->sh_desc, 397 &cdb->sh_desc[2], 2); 398 399 if (err < 0) { 400 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 401 return err; 402 } 403 if (cdb->sh_desc[2] & 1) 404 cipherdata.key_type = RTA_DATA_IMM; 405 else { 406 cipherdata.key = (size_t)dpaa_mem_vtop( 407 (void *)(size_t)cipherdata.key); 408 cipherdata.key_type = RTA_DATA_PTR; 409 } 410 if (cdb->sh_desc[2] & (1<<1)) 411 authdata.key_type = RTA_DATA_IMM; 412 else { 413 authdata.key = (size_t)dpaa_mem_vtop( 414 (void *)(size_t)authdata.key); 415 authdata.key_type = RTA_DATA_PTR; 416 } 417 418 cdb->sh_desc[0] = 0; 419 cdb->sh_desc[1] = 0; 420 cdb->sh_desc[2] = 0; 421 if (ses->dir == DIR_ENC) { 422 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 423 cdb->sh_desc, 424 true, swap, SHR_SERIAL, 425 &ses->encap_pdb, 426 (uint8_t *)&ses->ip4_hdr, 427 &cipherdata, &authdata); 428 } else if (ses->dir == DIR_DEC) { 429 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 430 cdb->sh_desc, 431 true, swap, SHR_SERIAL, 432 &ses->decap_pdb, 433 &cipherdata, &authdata); 434 } 435 return shared_desc_len; 436 } 437 #endif 438 /* prepare command block of the session */ 439 static int 440 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 441 { 442 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 443 int32_t shared_desc_len = 0; 444 struct sec_cdb *cdb = &ses->cdb; 445 int err; 446 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 447 int swap = false; 448 #else 449 int swap = true; 450 #endif 451 452 memset(cdb, 0, sizeof(struct sec_cdb)); 453 454 switch (ses->ctxt) { 455 #ifdef RTE_LIBRTE_SECURITY 456 case DPAA_SEC_IPSEC: 457 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 458 break; 459 case DPAA_SEC_PDCP: 460 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 461 break; 462 #endif 463 case DPAA_SEC_CIPHER: 464 alginfo_c.key = (size_t)ses->cipher_key.data; 465 alginfo_c.keylen = ses->cipher_key.length; 466 alginfo_c.key_enc_flags = 0; 467 alginfo_c.key_type = RTA_DATA_IMM; 468 alginfo_c.algtype = ses->cipher_key.alg; 469 alginfo_c.algmode = ses->cipher_key.algmode; 470 471 switch (ses->cipher_alg) { 472 case RTE_CRYPTO_CIPHER_AES_CBC: 473 case RTE_CRYPTO_CIPHER_3DES_CBC: 474 case RTE_CRYPTO_CIPHER_AES_CTR: 475 case RTE_CRYPTO_CIPHER_3DES_CTR: 476 shared_desc_len = cnstr_shdsc_blkcipher( 477 cdb->sh_desc, true, 478 swap, SHR_NEVER, &alginfo_c, 479 NULL, 480 ses->iv.length, 481 ses->dir); 482 break; 483 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 484 shared_desc_len = cnstr_shdsc_snow_f8( 485 cdb->sh_desc, true, swap, 486 &alginfo_c, 487 ses->dir); 488 break; 489 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 490 shared_desc_len = cnstr_shdsc_zuce( 491 cdb->sh_desc, true, swap, 492 &alginfo_c, 493 ses->dir); 494 break; 495 default: 496 DPAA_SEC_ERR("unsupported cipher alg %d", 497 ses->cipher_alg); 498 return -ENOTSUP; 499 } 500 break; 501 case DPAA_SEC_AUTH: 502 alginfo_a.key = (size_t)ses->auth_key.data; 503 alginfo_a.keylen = ses->auth_key.length; 504 alginfo_a.key_enc_flags = 0; 505 alginfo_a.key_type = RTA_DATA_IMM; 506 alginfo_a.algtype = ses->auth_key.alg; 507 alginfo_a.algmode = ses->auth_key.algmode; 508 switch (ses->auth_alg) { 509 case RTE_CRYPTO_AUTH_MD5_HMAC: 510 case RTE_CRYPTO_AUTH_SHA1_HMAC: 511 case RTE_CRYPTO_AUTH_SHA224_HMAC: 512 case RTE_CRYPTO_AUTH_SHA256_HMAC: 513 case RTE_CRYPTO_AUTH_SHA384_HMAC: 514 case RTE_CRYPTO_AUTH_SHA512_HMAC: 515 shared_desc_len = cnstr_shdsc_hmac( 516 cdb->sh_desc, true, 517 swap, SHR_NEVER, &alginfo_a, 518 !ses->dir, 519 ses->digest_length); 520 break; 521 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 522 shared_desc_len = cnstr_shdsc_snow_f9( 523 cdb->sh_desc, true, swap, 524 &alginfo_a, 525 !ses->dir, 526 ses->digest_length); 527 break; 528 case RTE_CRYPTO_AUTH_ZUC_EIA3: 529 shared_desc_len = cnstr_shdsc_zuca( 530 cdb->sh_desc, true, swap, 531 &alginfo_a, 532 !ses->dir, 533 ses->digest_length); 534 break; 535 default: 536 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); 537 } 538 break; 539 case DPAA_SEC_AEAD: 540 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 541 DPAA_SEC_ERR("not supported aead alg"); 542 return -ENOTSUP; 543 } 544 alginfo.key = (size_t)ses->aead_key.data; 545 alginfo.keylen = ses->aead_key.length; 546 alginfo.key_enc_flags = 0; 547 alginfo.key_type = RTA_DATA_IMM; 548 alginfo.algtype = ses->aead_key.alg; 549 alginfo.algmode = ses->aead_key.algmode; 550 551 if (ses->dir == DIR_ENC) 552 shared_desc_len = cnstr_shdsc_gcm_encap( 553 cdb->sh_desc, true, swap, SHR_NEVER, 554 &alginfo, 555 ses->iv.length, 556 ses->digest_length); 557 else 558 shared_desc_len = cnstr_shdsc_gcm_decap( 559 cdb->sh_desc, true, swap, SHR_NEVER, 560 &alginfo, 561 ses->iv.length, 562 ses->digest_length); 563 break; 564 case DPAA_SEC_CIPHER_HASH: 565 alginfo_c.key = (size_t)ses->cipher_key.data; 566 alginfo_c.keylen = ses->cipher_key.length; 567 alginfo_c.key_enc_flags = 0; 568 alginfo_c.key_type = RTA_DATA_IMM; 569 alginfo_c.algtype = ses->cipher_key.alg; 570 alginfo_c.algmode = ses->cipher_key.algmode; 571 572 alginfo_a.key = (size_t)ses->auth_key.data; 573 alginfo_a.keylen = ses->auth_key.length; 574 alginfo_a.key_enc_flags = 0; 575 alginfo_a.key_type = RTA_DATA_IMM; 576 alginfo_a.algtype = ses->auth_key.alg; 577 alginfo_a.algmode = ses->auth_key.algmode; 578 579 cdb->sh_desc[0] = alginfo_c.keylen; 580 cdb->sh_desc[1] = alginfo_a.keylen; 581 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 582 MIN_JOB_DESC_SIZE, 583 (unsigned int *)cdb->sh_desc, 584 &cdb->sh_desc[2], 2); 585 586 if (err < 0) { 587 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 588 return err; 589 } 590 if (cdb->sh_desc[2] & 1) 591 alginfo_c.key_type = RTA_DATA_IMM; 592 else { 593 alginfo_c.key = (size_t)dpaa_mem_vtop( 594 (void *)(size_t)alginfo_c.key); 595 alginfo_c.key_type = RTA_DATA_PTR; 596 } 597 if (cdb->sh_desc[2] & (1<<1)) 598 alginfo_a.key_type = RTA_DATA_IMM; 599 else { 600 alginfo_a.key = (size_t)dpaa_mem_vtop( 601 (void *)(size_t)alginfo_a.key); 602 alginfo_a.key_type = RTA_DATA_PTR; 603 } 604 cdb->sh_desc[0] = 0; 605 cdb->sh_desc[1] = 0; 606 cdb->sh_desc[2] = 0; 607 /* Auth_only_len is set as 0 here and it will be 608 * overwritten in fd for each packet. 609 */ 610 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 611 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 612 ses->iv.length, 613 ses->digest_length, ses->dir); 614 break; 615 case DPAA_SEC_HASH_CIPHER: 616 default: 617 DPAA_SEC_ERR("error: Unsupported session"); 618 return -ENOTSUP; 619 } 620 621 if (shared_desc_len < 0) { 622 DPAA_SEC_ERR("error in preparing command block"); 623 return shared_desc_len; 624 } 625 626 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 627 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 628 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 629 630 return 0; 631 } 632 633 /* qp is lockless, should be accessed by only one thread */ 634 static int 635 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 636 { 637 struct qman_fq *fq; 638 unsigned int pkts = 0; 639 int num_rx_bufs, ret; 640 struct qm_dqrr_entry *dq; 641 uint32_t vdqcr_flags = 0; 642 643 fq = &qp->outq; 644 /* 645 * Until request for four buffers, we provide exact number of buffers. 646 * Otherwise we do not set the QM_VDQCR_EXACT flag. 647 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 648 * requested, so we request two less in this case. 649 */ 650 if (nb_ops < 4) { 651 vdqcr_flags = QM_VDQCR_EXACT; 652 num_rx_bufs = nb_ops; 653 } else { 654 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 655 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 656 } 657 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 658 if (ret) 659 return 0; 660 661 do { 662 const struct qm_fd *fd; 663 struct dpaa_sec_job *job; 664 struct dpaa_sec_op_ctx *ctx; 665 struct rte_crypto_op *op; 666 667 dq = qman_dequeue(fq); 668 if (!dq) 669 continue; 670 671 fd = &dq->fd; 672 /* sg is embedded in an op ctx, 673 * sg[0] is for output 674 * sg[1] for input 675 */ 676 job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); 677 678 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 679 ctx->fd_status = fd->status; 680 op = ctx->op; 681 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 682 struct qm_sg_entry *sg_out; 683 uint32_t len; 684 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 685 op->sym->m_src : op->sym->m_dst; 686 687 sg_out = &job->sg[0]; 688 hw_sg_to_cpu(sg_out); 689 len = sg_out->length; 690 mbuf->pkt_len = len; 691 while (mbuf->next != NULL) { 692 len -= mbuf->data_len; 693 mbuf = mbuf->next; 694 } 695 mbuf->data_len = len; 696 } 697 if (!ctx->fd_status) { 698 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 699 } else { 700 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); 701 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 702 } 703 ops[pkts++] = op; 704 705 /* report op status to sym->op and then free the ctx memeory */ 706 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 707 708 qman_dqrr_consume(fq, dq); 709 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 710 711 return pkts; 712 } 713 714 static inline struct dpaa_sec_job * 715 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 716 { 717 struct rte_crypto_sym_op *sym = op->sym; 718 struct rte_mbuf *mbuf = sym->m_src; 719 struct dpaa_sec_job *cf; 720 struct dpaa_sec_op_ctx *ctx; 721 struct qm_sg_entry *sg, *out_sg, *in_sg; 722 phys_addr_t start_addr; 723 uint8_t *old_digest, extra_segs; 724 int data_len, data_offset; 725 726 data_len = sym->auth.data.length; 727 data_offset = sym->auth.data.offset; 728 729 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 730 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 731 if ((data_len & 7) || (data_offset & 7)) { 732 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 733 return NULL; 734 } 735 736 data_len = data_len >> 3; 737 data_offset = data_offset >> 3; 738 } 739 740 if (is_decode(ses)) 741 extra_segs = 3; 742 else 743 extra_segs = 2; 744 745 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 746 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 747 MAX_SG_ENTRIES); 748 return NULL; 749 } 750 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 751 if (!ctx) 752 return NULL; 753 754 cf = &ctx->job; 755 ctx->op = op; 756 old_digest = ctx->digest; 757 758 /* output */ 759 out_sg = &cf->sg[0]; 760 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 761 out_sg->length = ses->digest_length; 762 cpu_to_hw_sg(out_sg); 763 764 /* input */ 765 in_sg = &cf->sg[1]; 766 /* need to extend the input to a compound frame */ 767 in_sg->extension = 1; 768 in_sg->final = 1; 769 in_sg->length = data_len; 770 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2])); 771 772 /* 1st seg */ 773 sg = in_sg + 1; 774 775 if (ses->iv.length) { 776 uint8_t *iv_ptr; 777 778 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 779 ses->iv.offset); 780 781 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 782 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 783 sg->length = 12; 784 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 785 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 786 sg->length = 8; 787 } else { 788 sg->length = ses->iv.length; 789 } 790 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr)); 791 in_sg->length += sg->length; 792 cpu_to_hw_sg(sg); 793 sg++; 794 } 795 796 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 797 sg->offset = data_offset; 798 799 if (data_len <= (mbuf->data_len - data_offset)) { 800 sg->length = data_len; 801 } else { 802 sg->length = mbuf->data_len - data_offset; 803 804 /* remaining i/p segs */ 805 while ((data_len = data_len - sg->length) && 806 (mbuf = mbuf->next)) { 807 cpu_to_hw_sg(sg); 808 sg++; 809 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 810 if (data_len > mbuf->data_len) 811 sg->length = mbuf->data_len; 812 else 813 sg->length = data_len; 814 } 815 } 816 817 if (is_decode(ses)) { 818 /* Digest verification case */ 819 cpu_to_hw_sg(sg); 820 sg++; 821 rte_memcpy(old_digest, sym->auth.digest.data, 822 ses->digest_length); 823 start_addr = dpaa_mem_vtop(old_digest); 824 qm_sg_entry_set64(sg, start_addr); 825 sg->length = ses->digest_length; 826 in_sg->length += ses->digest_length; 827 } 828 sg->final = 1; 829 cpu_to_hw_sg(sg); 830 cpu_to_hw_sg(in_sg); 831 832 return cf; 833 } 834 835 /** 836 * packet looks like: 837 * |<----data_len------->| 838 * |ip_header|ah_header|icv|payload| 839 * ^ 840 * | 841 * mbuf->pkt.data 842 */ 843 static inline struct dpaa_sec_job * 844 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 845 { 846 struct rte_crypto_sym_op *sym = op->sym; 847 struct rte_mbuf *mbuf = sym->m_src; 848 struct dpaa_sec_job *cf; 849 struct dpaa_sec_op_ctx *ctx; 850 struct qm_sg_entry *sg, *in_sg; 851 rte_iova_t start_addr; 852 uint8_t *old_digest; 853 int data_len, data_offset; 854 855 data_len = sym->auth.data.length; 856 data_offset = sym->auth.data.offset; 857 858 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 859 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 860 if ((data_len & 7) || (data_offset & 7)) { 861 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 862 return NULL; 863 } 864 865 data_len = data_len >> 3; 866 data_offset = data_offset >> 3; 867 } 868 869 ctx = dpaa_sec_alloc_ctx(ses, 4); 870 if (!ctx) 871 return NULL; 872 873 cf = &ctx->job; 874 ctx->op = op; 875 old_digest = ctx->digest; 876 877 start_addr = rte_pktmbuf_iova(mbuf); 878 /* output */ 879 sg = &cf->sg[0]; 880 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 881 sg->length = ses->digest_length; 882 cpu_to_hw_sg(sg); 883 884 /* input */ 885 in_sg = &cf->sg[1]; 886 /* need to extend the input to a compound frame */ 887 in_sg->extension = 1; 888 in_sg->final = 1; 889 in_sg->length = data_len; 890 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2])); 891 sg = &cf->sg[2]; 892 893 if (ses->iv.length) { 894 uint8_t *iv_ptr; 895 896 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 897 ses->iv.offset); 898 899 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 900 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 901 sg->length = 12; 902 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 903 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 904 sg->length = 8; 905 } else { 906 sg->length = ses->iv.length; 907 } 908 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr)); 909 in_sg->length += sg->length; 910 cpu_to_hw_sg(sg); 911 sg++; 912 } 913 914 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 915 sg->offset = data_offset; 916 sg->length = data_len; 917 918 if (is_decode(ses)) { 919 /* Digest verification case */ 920 cpu_to_hw_sg(sg); 921 /* hash result or digest, save digest first */ 922 rte_memcpy(old_digest, sym->auth.digest.data, 923 ses->digest_length); 924 /* let's check digest by hw */ 925 start_addr = dpaa_mem_vtop(old_digest); 926 sg++; 927 qm_sg_entry_set64(sg, start_addr); 928 sg->length = ses->digest_length; 929 in_sg->length += ses->digest_length; 930 } 931 sg->final = 1; 932 cpu_to_hw_sg(sg); 933 cpu_to_hw_sg(in_sg); 934 935 return cf; 936 } 937 938 static inline struct dpaa_sec_job * 939 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 940 { 941 struct rte_crypto_sym_op *sym = op->sym; 942 struct dpaa_sec_job *cf; 943 struct dpaa_sec_op_ctx *ctx; 944 struct qm_sg_entry *sg, *out_sg, *in_sg; 945 struct rte_mbuf *mbuf; 946 uint8_t req_segs; 947 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 948 ses->iv.offset); 949 int data_len, data_offset; 950 951 data_len = sym->cipher.data.length; 952 data_offset = sym->cipher.data.offset; 953 954 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 955 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 956 if ((data_len & 7) || (data_offset & 7)) { 957 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 958 return NULL; 959 } 960 961 data_len = data_len >> 3; 962 data_offset = data_offset >> 3; 963 } 964 965 if (sym->m_dst) { 966 mbuf = sym->m_dst; 967 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 968 } else { 969 mbuf = sym->m_src; 970 req_segs = mbuf->nb_segs * 2 + 3; 971 } 972 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 973 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 974 MAX_SG_ENTRIES); 975 return NULL; 976 } 977 978 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 979 if (!ctx) 980 return NULL; 981 982 cf = &ctx->job; 983 ctx->op = op; 984 985 /* output */ 986 out_sg = &cf->sg[0]; 987 out_sg->extension = 1; 988 out_sg->length = data_len; 989 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2])); 990 cpu_to_hw_sg(out_sg); 991 992 /* 1st seg */ 993 sg = &cf->sg[2]; 994 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 995 sg->length = mbuf->data_len - data_offset; 996 sg->offset = data_offset; 997 998 /* Successive segs */ 999 mbuf = mbuf->next; 1000 while (mbuf) { 1001 cpu_to_hw_sg(sg); 1002 sg++; 1003 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1004 sg->length = mbuf->data_len; 1005 mbuf = mbuf->next; 1006 } 1007 sg->final = 1; 1008 cpu_to_hw_sg(sg); 1009 1010 /* input */ 1011 mbuf = sym->m_src; 1012 in_sg = &cf->sg[1]; 1013 in_sg->extension = 1; 1014 in_sg->final = 1; 1015 in_sg->length = data_len + ses->iv.length; 1016 1017 sg++; 1018 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); 1019 cpu_to_hw_sg(in_sg); 1020 1021 /* IV */ 1022 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1023 sg->length = ses->iv.length; 1024 cpu_to_hw_sg(sg); 1025 1026 /* 1st seg */ 1027 sg++; 1028 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1029 sg->length = mbuf->data_len - data_offset; 1030 sg->offset = data_offset; 1031 1032 /* Successive segs */ 1033 mbuf = mbuf->next; 1034 while (mbuf) { 1035 cpu_to_hw_sg(sg); 1036 sg++; 1037 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1038 sg->length = mbuf->data_len; 1039 mbuf = mbuf->next; 1040 } 1041 sg->final = 1; 1042 cpu_to_hw_sg(sg); 1043 1044 return cf; 1045 } 1046 1047 static inline struct dpaa_sec_job * 1048 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1049 { 1050 struct rte_crypto_sym_op *sym = op->sym; 1051 struct dpaa_sec_job *cf; 1052 struct dpaa_sec_op_ctx *ctx; 1053 struct qm_sg_entry *sg; 1054 rte_iova_t src_start_addr, dst_start_addr; 1055 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1056 ses->iv.offset); 1057 int data_len, data_offset; 1058 1059 data_len = sym->cipher.data.length; 1060 data_offset = sym->cipher.data.offset; 1061 1062 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1063 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1064 if ((data_len & 7) || (data_offset & 7)) { 1065 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1066 return NULL; 1067 } 1068 1069 data_len = data_len >> 3; 1070 data_offset = data_offset >> 3; 1071 } 1072 1073 ctx = dpaa_sec_alloc_ctx(ses, 4); 1074 if (!ctx) 1075 return NULL; 1076 1077 cf = &ctx->job; 1078 ctx->op = op; 1079 1080 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1081 1082 if (sym->m_dst) 1083 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1084 else 1085 dst_start_addr = src_start_addr; 1086 1087 /* output */ 1088 sg = &cf->sg[0]; 1089 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1090 sg->length = data_len + ses->iv.length; 1091 cpu_to_hw_sg(sg); 1092 1093 /* input */ 1094 sg = &cf->sg[1]; 1095 1096 /* need to extend the input to a compound frame */ 1097 sg->extension = 1; 1098 sg->final = 1; 1099 sg->length = data_len + ses->iv.length; 1100 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); 1101 cpu_to_hw_sg(sg); 1102 1103 sg = &cf->sg[2]; 1104 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1105 sg->length = ses->iv.length; 1106 cpu_to_hw_sg(sg); 1107 1108 sg++; 1109 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1110 sg->length = data_len; 1111 sg->final = 1; 1112 cpu_to_hw_sg(sg); 1113 1114 return cf; 1115 } 1116 1117 static inline struct dpaa_sec_job * 1118 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1119 { 1120 struct rte_crypto_sym_op *sym = op->sym; 1121 struct dpaa_sec_job *cf; 1122 struct dpaa_sec_op_ctx *ctx; 1123 struct qm_sg_entry *sg, *out_sg, *in_sg; 1124 struct rte_mbuf *mbuf; 1125 uint8_t req_segs; 1126 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1127 ses->iv.offset); 1128 1129 if (sym->m_dst) { 1130 mbuf = sym->m_dst; 1131 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1132 } else { 1133 mbuf = sym->m_src; 1134 req_segs = mbuf->nb_segs * 2 + 4; 1135 } 1136 1137 if (ses->auth_only_len) 1138 req_segs++; 1139 1140 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1141 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1142 MAX_SG_ENTRIES); 1143 return NULL; 1144 } 1145 1146 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1147 if (!ctx) 1148 return NULL; 1149 1150 cf = &ctx->job; 1151 ctx->op = op; 1152 1153 rte_prefetch0(cf->sg); 1154 1155 /* output */ 1156 out_sg = &cf->sg[0]; 1157 out_sg->extension = 1; 1158 if (is_encode(ses)) 1159 out_sg->length = sym->aead.data.length + ses->digest_length; 1160 else 1161 out_sg->length = sym->aead.data.length; 1162 1163 /* output sg entries */ 1164 sg = &cf->sg[2]; 1165 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); 1166 cpu_to_hw_sg(out_sg); 1167 1168 /* 1st seg */ 1169 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1170 sg->length = mbuf->data_len - sym->aead.data.offset; 1171 sg->offset = sym->aead.data.offset; 1172 1173 /* Successive segs */ 1174 mbuf = mbuf->next; 1175 while (mbuf) { 1176 cpu_to_hw_sg(sg); 1177 sg++; 1178 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1179 sg->length = mbuf->data_len; 1180 mbuf = mbuf->next; 1181 } 1182 sg->length -= ses->digest_length; 1183 1184 if (is_encode(ses)) { 1185 cpu_to_hw_sg(sg); 1186 /* set auth output */ 1187 sg++; 1188 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1189 sg->length = ses->digest_length; 1190 } 1191 sg->final = 1; 1192 cpu_to_hw_sg(sg); 1193 1194 /* input */ 1195 mbuf = sym->m_src; 1196 in_sg = &cf->sg[1]; 1197 in_sg->extension = 1; 1198 in_sg->final = 1; 1199 if (is_encode(ses)) 1200 in_sg->length = ses->iv.length + sym->aead.data.length 1201 + ses->auth_only_len; 1202 else 1203 in_sg->length = ses->iv.length + sym->aead.data.length 1204 + ses->auth_only_len + ses->digest_length; 1205 1206 /* input sg entries */ 1207 sg++; 1208 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); 1209 cpu_to_hw_sg(in_sg); 1210 1211 /* 1st seg IV */ 1212 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1213 sg->length = ses->iv.length; 1214 cpu_to_hw_sg(sg); 1215 1216 /* 2nd seg auth only */ 1217 if (ses->auth_only_len) { 1218 sg++; 1219 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data)); 1220 sg->length = ses->auth_only_len; 1221 cpu_to_hw_sg(sg); 1222 } 1223 1224 /* 3rd seg */ 1225 sg++; 1226 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1227 sg->length = mbuf->data_len - sym->aead.data.offset; 1228 sg->offset = sym->aead.data.offset; 1229 1230 /* Successive segs */ 1231 mbuf = mbuf->next; 1232 while (mbuf) { 1233 cpu_to_hw_sg(sg); 1234 sg++; 1235 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1236 sg->length = mbuf->data_len; 1237 mbuf = mbuf->next; 1238 } 1239 1240 if (is_decode(ses)) { 1241 cpu_to_hw_sg(sg); 1242 sg++; 1243 memcpy(ctx->digest, sym->aead.digest.data, 1244 ses->digest_length); 1245 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); 1246 sg->length = ses->digest_length; 1247 } 1248 sg->final = 1; 1249 cpu_to_hw_sg(sg); 1250 1251 return cf; 1252 } 1253 1254 static inline struct dpaa_sec_job * 1255 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1256 { 1257 struct rte_crypto_sym_op *sym = op->sym; 1258 struct dpaa_sec_job *cf; 1259 struct dpaa_sec_op_ctx *ctx; 1260 struct qm_sg_entry *sg; 1261 uint32_t length = 0; 1262 rte_iova_t src_start_addr, dst_start_addr; 1263 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1264 ses->iv.offset); 1265 1266 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1267 1268 if (sym->m_dst) 1269 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1270 else 1271 dst_start_addr = src_start_addr; 1272 1273 ctx = dpaa_sec_alloc_ctx(ses, 7); 1274 if (!ctx) 1275 return NULL; 1276 1277 cf = &ctx->job; 1278 ctx->op = op; 1279 1280 /* input */ 1281 rte_prefetch0(cf->sg); 1282 sg = &cf->sg[2]; 1283 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); 1284 if (is_encode(ses)) { 1285 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1286 sg->length = ses->iv.length; 1287 length += sg->length; 1288 cpu_to_hw_sg(sg); 1289 1290 sg++; 1291 if (ses->auth_only_len) { 1292 qm_sg_entry_set64(sg, 1293 dpaa_mem_vtop(sym->aead.aad.data)); 1294 sg->length = ses->auth_only_len; 1295 length += sg->length; 1296 cpu_to_hw_sg(sg); 1297 sg++; 1298 } 1299 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1300 sg->length = sym->aead.data.length; 1301 length += sg->length; 1302 sg->final = 1; 1303 cpu_to_hw_sg(sg); 1304 } else { 1305 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1306 sg->length = ses->iv.length; 1307 length += sg->length; 1308 cpu_to_hw_sg(sg); 1309 1310 sg++; 1311 if (ses->auth_only_len) { 1312 qm_sg_entry_set64(sg, 1313 dpaa_mem_vtop(sym->aead.aad.data)); 1314 sg->length = ses->auth_only_len; 1315 length += sg->length; 1316 cpu_to_hw_sg(sg); 1317 sg++; 1318 } 1319 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1320 sg->length = sym->aead.data.length; 1321 length += sg->length; 1322 cpu_to_hw_sg(sg); 1323 1324 memcpy(ctx->digest, sym->aead.digest.data, 1325 ses->digest_length); 1326 sg++; 1327 1328 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); 1329 sg->length = ses->digest_length; 1330 length += sg->length; 1331 sg->final = 1; 1332 cpu_to_hw_sg(sg); 1333 } 1334 /* input compound frame */ 1335 cf->sg[1].length = length; 1336 cf->sg[1].extension = 1; 1337 cf->sg[1].final = 1; 1338 cpu_to_hw_sg(&cf->sg[1]); 1339 1340 /* output */ 1341 sg++; 1342 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); 1343 qm_sg_entry_set64(sg, 1344 dst_start_addr + sym->aead.data.offset); 1345 sg->length = sym->aead.data.length; 1346 length = sg->length; 1347 if (is_encode(ses)) { 1348 cpu_to_hw_sg(sg); 1349 /* set auth output */ 1350 sg++; 1351 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1352 sg->length = ses->digest_length; 1353 length += sg->length; 1354 } 1355 sg->final = 1; 1356 cpu_to_hw_sg(sg); 1357 1358 /* output compound frame */ 1359 cf->sg[0].length = length; 1360 cf->sg[0].extension = 1; 1361 cpu_to_hw_sg(&cf->sg[0]); 1362 1363 return cf; 1364 } 1365 1366 static inline struct dpaa_sec_job * 1367 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1368 { 1369 struct rte_crypto_sym_op *sym = op->sym; 1370 struct dpaa_sec_job *cf; 1371 struct dpaa_sec_op_ctx *ctx; 1372 struct qm_sg_entry *sg, *out_sg, *in_sg; 1373 struct rte_mbuf *mbuf; 1374 uint8_t req_segs; 1375 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1376 ses->iv.offset); 1377 1378 if (sym->m_dst) { 1379 mbuf = sym->m_dst; 1380 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1381 } else { 1382 mbuf = sym->m_src; 1383 req_segs = mbuf->nb_segs * 2 + 4; 1384 } 1385 1386 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1387 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1388 MAX_SG_ENTRIES); 1389 return NULL; 1390 } 1391 1392 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1393 if (!ctx) 1394 return NULL; 1395 1396 cf = &ctx->job; 1397 ctx->op = op; 1398 1399 rte_prefetch0(cf->sg); 1400 1401 /* output */ 1402 out_sg = &cf->sg[0]; 1403 out_sg->extension = 1; 1404 if (is_encode(ses)) 1405 out_sg->length = sym->auth.data.length + ses->digest_length; 1406 else 1407 out_sg->length = sym->auth.data.length; 1408 1409 /* output sg entries */ 1410 sg = &cf->sg[2]; 1411 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); 1412 cpu_to_hw_sg(out_sg); 1413 1414 /* 1st seg */ 1415 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1416 sg->length = mbuf->data_len - sym->auth.data.offset; 1417 sg->offset = sym->auth.data.offset; 1418 1419 /* Successive segs */ 1420 mbuf = mbuf->next; 1421 while (mbuf) { 1422 cpu_to_hw_sg(sg); 1423 sg++; 1424 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1425 sg->length = mbuf->data_len; 1426 mbuf = mbuf->next; 1427 } 1428 sg->length -= ses->digest_length; 1429 1430 if (is_encode(ses)) { 1431 cpu_to_hw_sg(sg); 1432 /* set auth output */ 1433 sg++; 1434 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1435 sg->length = ses->digest_length; 1436 } 1437 sg->final = 1; 1438 cpu_to_hw_sg(sg); 1439 1440 /* input */ 1441 mbuf = sym->m_src; 1442 in_sg = &cf->sg[1]; 1443 in_sg->extension = 1; 1444 in_sg->final = 1; 1445 if (is_encode(ses)) 1446 in_sg->length = ses->iv.length + sym->auth.data.length; 1447 else 1448 in_sg->length = ses->iv.length + sym->auth.data.length 1449 + ses->digest_length; 1450 1451 /* input sg entries */ 1452 sg++; 1453 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); 1454 cpu_to_hw_sg(in_sg); 1455 1456 /* 1st seg IV */ 1457 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1458 sg->length = ses->iv.length; 1459 cpu_to_hw_sg(sg); 1460 1461 /* 2nd seg */ 1462 sg++; 1463 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1464 sg->length = mbuf->data_len - sym->auth.data.offset; 1465 sg->offset = sym->auth.data.offset; 1466 1467 /* Successive segs */ 1468 mbuf = mbuf->next; 1469 while (mbuf) { 1470 cpu_to_hw_sg(sg); 1471 sg++; 1472 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1473 sg->length = mbuf->data_len; 1474 mbuf = mbuf->next; 1475 } 1476 1477 sg->length -= ses->digest_length; 1478 if (is_decode(ses)) { 1479 cpu_to_hw_sg(sg); 1480 sg++; 1481 memcpy(ctx->digest, sym->auth.digest.data, 1482 ses->digest_length); 1483 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); 1484 sg->length = ses->digest_length; 1485 } 1486 sg->final = 1; 1487 cpu_to_hw_sg(sg); 1488 1489 return cf; 1490 } 1491 1492 static inline struct dpaa_sec_job * 1493 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1494 { 1495 struct rte_crypto_sym_op *sym = op->sym; 1496 struct dpaa_sec_job *cf; 1497 struct dpaa_sec_op_ctx *ctx; 1498 struct qm_sg_entry *sg; 1499 rte_iova_t src_start_addr, dst_start_addr; 1500 uint32_t length = 0; 1501 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1502 ses->iv.offset); 1503 1504 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1505 if (sym->m_dst) 1506 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1507 else 1508 dst_start_addr = src_start_addr; 1509 1510 ctx = dpaa_sec_alloc_ctx(ses, 7); 1511 if (!ctx) 1512 return NULL; 1513 1514 cf = &ctx->job; 1515 ctx->op = op; 1516 1517 /* input */ 1518 rte_prefetch0(cf->sg); 1519 sg = &cf->sg[2]; 1520 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); 1521 if (is_encode(ses)) { 1522 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1523 sg->length = ses->iv.length; 1524 length += sg->length; 1525 cpu_to_hw_sg(sg); 1526 1527 sg++; 1528 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1529 sg->length = sym->auth.data.length; 1530 length += sg->length; 1531 sg->final = 1; 1532 cpu_to_hw_sg(sg); 1533 } else { 1534 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 1535 sg->length = ses->iv.length; 1536 length += sg->length; 1537 cpu_to_hw_sg(sg); 1538 1539 sg++; 1540 1541 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1542 sg->length = sym->auth.data.length; 1543 length += sg->length; 1544 cpu_to_hw_sg(sg); 1545 1546 memcpy(ctx->digest, sym->auth.digest.data, 1547 ses->digest_length); 1548 sg++; 1549 1550 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); 1551 sg->length = ses->digest_length; 1552 length += sg->length; 1553 sg->final = 1; 1554 cpu_to_hw_sg(sg); 1555 } 1556 /* input compound frame */ 1557 cf->sg[1].length = length; 1558 cf->sg[1].extension = 1; 1559 cf->sg[1].final = 1; 1560 cpu_to_hw_sg(&cf->sg[1]); 1561 1562 /* output */ 1563 sg++; 1564 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); 1565 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1566 sg->length = sym->cipher.data.length; 1567 length = sg->length; 1568 if (is_encode(ses)) { 1569 cpu_to_hw_sg(sg); 1570 /* set auth output */ 1571 sg++; 1572 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1573 sg->length = ses->digest_length; 1574 length += sg->length; 1575 } 1576 sg->final = 1; 1577 cpu_to_hw_sg(sg); 1578 1579 /* output compound frame */ 1580 cf->sg[0].length = length; 1581 cf->sg[0].extension = 1; 1582 cpu_to_hw_sg(&cf->sg[0]); 1583 1584 return cf; 1585 } 1586 1587 #ifdef RTE_LIBRTE_SECURITY 1588 static inline struct dpaa_sec_job * 1589 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1590 { 1591 struct rte_crypto_sym_op *sym = op->sym; 1592 struct dpaa_sec_job *cf; 1593 struct dpaa_sec_op_ctx *ctx; 1594 struct qm_sg_entry *sg; 1595 phys_addr_t src_start_addr, dst_start_addr; 1596 1597 ctx = dpaa_sec_alloc_ctx(ses, 2); 1598 if (!ctx) 1599 return NULL; 1600 cf = &ctx->job; 1601 ctx->op = op; 1602 1603 src_start_addr = rte_pktmbuf_mtophys(sym->m_src); 1604 1605 if (sym->m_dst) 1606 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst); 1607 else 1608 dst_start_addr = src_start_addr; 1609 1610 /* input */ 1611 sg = &cf->sg[1]; 1612 qm_sg_entry_set64(sg, src_start_addr); 1613 sg->length = sym->m_src->pkt_len; 1614 sg->final = 1; 1615 cpu_to_hw_sg(sg); 1616 1617 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1618 /* output */ 1619 sg = &cf->sg[0]; 1620 qm_sg_entry_set64(sg, dst_start_addr); 1621 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1622 cpu_to_hw_sg(sg); 1623 1624 return cf; 1625 } 1626 1627 static inline struct dpaa_sec_job * 1628 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1629 { 1630 struct rte_crypto_sym_op *sym = op->sym; 1631 struct dpaa_sec_job *cf; 1632 struct dpaa_sec_op_ctx *ctx; 1633 struct qm_sg_entry *sg, *out_sg, *in_sg; 1634 struct rte_mbuf *mbuf; 1635 uint8_t req_segs; 1636 uint32_t in_len = 0, out_len = 0; 1637 1638 if (sym->m_dst) 1639 mbuf = sym->m_dst; 1640 else 1641 mbuf = sym->m_src; 1642 1643 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1644 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1645 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1646 MAX_SG_ENTRIES); 1647 return NULL; 1648 } 1649 1650 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1651 if (!ctx) 1652 return NULL; 1653 cf = &ctx->job; 1654 ctx->op = op; 1655 /* output */ 1656 out_sg = &cf->sg[0]; 1657 out_sg->extension = 1; 1658 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2])); 1659 1660 /* 1st seg */ 1661 sg = &cf->sg[2]; 1662 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1663 sg->offset = 0; 1664 1665 /* Successive segs */ 1666 while (mbuf->next) { 1667 sg->length = mbuf->data_len; 1668 out_len += sg->length; 1669 mbuf = mbuf->next; 1670 cpu_to_hw_sg(sg); 1671 sg++; 1672 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1673 sg->offset = 0; 1674 } 1675 sg->length = mbuf->buf_len - mbuf->data_off; 1676 out_len += sg->length; 1677 sg->final = 1; 1678 cpu_to_hw_sg(sg); 1679 1680 out_sg->length = out_len; 1681 cpu_to_hw_sg(out_sg); 1682 1683 /* input */ 1684 mbuf = sym->m_src; 1685 in_sg = &cf->sg[1]; 1686 in_sg->extension = 1; 1687 in_sg->final = 1; 1688 in_len = mbuf->data_len; 1689 1690 sg++; 1691 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); 1692 1693 /* 1st seg */ 1694 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1695 sg->length = mbuf->data_len; 1696 sg->offset = 0; 1697 1698 /* Successive segs */ 1699 mbuf = mbuf->next; 1700 while (mbuf) { 1701 cpu_to_hw_sg(sg); 1702 sg++; 1703 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); 1704 sg->length = mbuf->data_len; 1705 sg->offset = 0; 1706 in_len += sg->length; 1707 mbuf = mbuf->next; 1708 } 1709 sg->final = 1; 1710 cpu_to_hw_sg(sg); 1711 1712 in_sg->length = in_len; 1713 cpu_to_hw_sg(in_sg); 1714 1715 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1716 1717 return cf; 1718 } 1719 #endif 1720 1721 static uint16_t 1722 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1723 uint16_t nb_ops) 1724 { 1725 /* Function to transmit the frames to given device and queuepair */ 1726 uint32_t loop; 1727 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1728 uint16_t num_tx = 0; 1729 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1730 uint32_t frames_to_send; 1731 struct rte_crypto_op *op; 1732 struct dpaa_sec_job *cf; 1733 dpaa_sec_session *ses; 1734 uint16_t auth_hdr_len, auth_tail_len; 1735 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1736 struct qman_fq *inq[DPAA_SEC_BURST]; 1737 1738 while (nb_ops) { 1739 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1740 DPAA_SEC_BURST : nb_ops; 1741 for (loop = 0; loop < frames_to_send; loop++) { 1742 op = *(ops++); 1743 if (op->sym->m_src->seqn != 0) { 1744 index = op->sym->m_src->seqn - 1; 1745 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1746 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1747 flags[loop] = ((index & 0x0f) << 8); 1748 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1749 DPAA_PER_LCORE_DQRR_SIZE--; 1750 DPAA_PER_LCORE_DQRR_HELD &= 1751 ~(1 << index); 1752 } 1753 } 1754 1755 switch (op->sess_type) { 1756 case RTE_CRYPTO_OP_WITH_SESSION: 1757 ses = (dpaa_sec_session *) 1758 get_sym_session_private_data( 1759 op->sym->session, 1760 cryptodev_driver_id); 1761 break; 1762 #ifdef RTE_LIBRTE_SECURITY 1763 case RTE_CRYPTO_OP_SECURITY_SESSION: 1764 ses = (dpaa_sec_session *) 1765 get_sec_session_private_data( 1766 op->sym->sec_session); 1767 break; 1768 #endif 1769 default: 1770 DPAA_SEC_DP_ERR( 1771 "sessionless crypto op not supported"); 1772 frames_to_send = loop; 1773 nb_ops = loop; 1774 goto send_pkts; 1775 } 1776 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1777 if (dpaa_sec_attach_sess_q(qp, ses)) { 1778 frames_to_send = loop; 1779 nb_ops = loop; 1780 goto send_pkts; 1781 } 1782 } else if (unlikely(ses->qp[rte_lcore_id() % 1783 MAX_DPAA_CORES] != qp)) { 1784 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1785 " New qp = %p\n", 1786 ses->qp[rte_lcore_id() % 1787 MAX_DPAA_CORES], qp); 1788 frames_to_send = loop; 1789 nb_ops = loop; 1790 goto send_pkts; 1791 } 1792 1793 auth_hdr_len = op->sym->auth.data.length - 1794 op->sym->cipher.data.length; 1795 auth_tail_len = 0; 1796 1797 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1798 ((op->sym->m_dst == NULL) || 1799 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1800 switch (ses->ctxt) { 1801 #ifdef RTE_LIBRTE_SECURITY 1802 case DPAA_SEC_PDCP: 1803 case DPAA_SEC_IPSEC: 1804 cf = build_proto(op, ses); 1805 break; 1806 #endif 1807 case DPAA_SEC_AUTH: 1808 cf = build_auth_only(op, ses); 1809 break; 1810 case DPAA_SEC_CIPHER: 1811 cf = build_cipher_only(op, ses); 1812 break; 1813 case DPAA_SEC_AEAD: 1814 cf = build_cipher_auth_gcm(op, ses); 1815 auth_hdr_len = ses->auth_only_len; 1816 break; 1817 case DPAA_SEC_CIPHER_HASH: 1818 auth_hdr_len = 1819 op->sym->cipher.data.offset 1820 - op->sym->auth.data.offset; 1821 auth_tail_len = 1822 op->sym->auth.data.length 1823 - op->sym->cipher.data.length 1824 - auth_hdr_len; 1825 cf = build_cipher_auth(op, ses); 1826 break; 1827 default: 1828 DPAA_SEC_DP_ERR("not supported ops"); 1829 frames_to_send = loop; 1830 nb_ops = loop; 1831 goto send_pkts; 1832 } 1833 } else { 1834 switch (ses->ctxt) { 1835 #ifdef RTE_LIBRTE_SECURITY 1836 case DPAA_SEC_PDCP: 1837 case DPAA_SEC_IPSEC: 1838 cf = build_proto_sg(op, ses); 1839 break; 1840 #endif 1841 case DPAA_SEC_AUTH: 1842 cf = build_auth_only_sg(op, ses); 1843 break; 1844 case DPAA_SEC_CIPHER: 1845 cf = build_cipher_only_sg(op, ses); 1846 break; 1847 case DPAA_SEC_AEAD: 1848 cf = build_cipher_auth_gcm_sg(op, ses); 1849 auth_hdr_len = ses->auth_only_len; 1850 break; 1851 case DPAA_SEC_CIPHER_HASH: 1852 auth_hdr_len = 1853 op->sym->cipher.data.offset 1854 - op->sym->auth.data.offset; 1855 auth_tail_len = 1856 op->sym->auth.data.length 1857 - op->sym->cipher.data.length 1858 - auth_hdr_len; 1859 cf = build_cipher_auth_sg(op, ses); 1860 break; 1861 default: 1862 DPAA_SEC_DP_ERR("not supported ops"); 1863 frames_to_send = loop; 1864 nb_ops = loop; 1865 goto send_pkts; 1866 } 1867 } 1868 if (unlikely(!cf)) { 1869 frames_to_send = loop; 1870 nb_ops = loop; 1871 goto send_pkts; 1872 } 1873 1874 fd = &fds[loop]; 1875 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 1876 fd->opaque_addr = 0; 1877 fd->cmd = 0; 1878 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg)); 1879 fd->_format1 = qm_fd_compound; 1880 fd->length29 = 2 * sizeof(struct qm_sg_entry); 1881 1882 /* Auth_only_len is set as 0 in descriptor and it is 1883 * overwritten here in the fd.cmd which will update 1884 * the DPOVRD reg. 1885 */ 1886 if (auth_hdr_len || auth_tail_len) { 1887 fd->cmd = 0x80000000; 1888 fd->cmd |= 1889 ((auth_tail_len << 16) | auth_hdr_len); 1890 } 1891 1892 #ifdef RTE_LIBRTE_SECURITY 1893 /* In case of PDCP, per packet HFN is stored in 1894 * mbuf priv after sym_op. 1895 */ 1896 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 1897 fd->cmd = 0x80000000 | 1898 *((uint32_t *)((uint8_t *)op + 1899 ses->pdcp.hfn_ovd_offset)); 1900 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 1901 *((uint32_t *)((uint8_t *)op + 1902 ses->pdcp.hfn_ovd_offset)), 1903 ses->pdcp.hfn_ovd); 1904 } 1905 #endif 1906 } 1907 send_pkts: 1908 loop = 0; 1909 while (loop < frames_to_send) { 1910 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 1911 &flags[loop], frames_to_send - loop); 1912 } 1913 nb_ops -= frames_to_send; 1914 num_tx += frames_to_send; 1915 } 1916 1917 dpaa_qp->tx_pkts += num_tx; 1918 dpaa_qp->tx_errs += nb_ops - num_tx; 1919 1920 return num_tx; 1921 } 1922 1923 static uint16_t 1924 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1925 uint16_t nb_ops) 1926 { 1927 uint16_t num_rx; 1928 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1929 1930 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1931 1932 dpaa_qp->rx_pkts += num_rx; 1933 dpaa_qp->rx_errs += nb_ops - num_rx; 1934 1935 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1936 1937 return num_rx; 1938 } 1939 1940 /** Release queue pair */ 1941 static int 1942 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1943 uint16_t qp_id) 1944 { 1945 struct dpaa_sec_dev_private *internals; 1946 struct dpaa_sec_qp *qp = NULL; 1947 1948 PMD_INIT_FUNC_TRACE(); 1949 1950 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 1951 1952 internals = dev->data->dev_private; 1953 if (qp_id >= internals->max_nb_queue_pairs) { 1954 DPAA_SEC_ERR("Max supported qpid %d", 1955 internals->max_nb_queue_pairs); 1956 return -EINVAL; 1957 } 1958 1959 qp = &internals->qps[qp_id]; 1960 rte_mempool_free(qp->ctx_pool); 1961 qp->internals = NULL; 1962 dev->data->queue_pairs[qp_id] = NULL; 1963 1964 return 0; 1965 } 1966 1967 /** Setup a queue pair */ 1968 static int 1969 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1970 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1971 __rte_unused int socket_id) 1972 { 1973 struct dpaa_sec_dev_private *internals; 1974 struct dpaa_sec_qp *qp = NULL; 1975 char str[20]; 1976 1977 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1978 1979 internals = dev->data->dev_private; 1980 if (qp_id >= internals->max_nb_queue_pairs) { 1981 DPAA_SEC_ERR("Max supported qpid %d", 1982 internals->max_nb_queue_pairs); 1983 return -EINVAL; 1984 } 1985 1986 qp = &internals->qps[qp_id]; 1987 qp->internals = internals; 1988 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 1989 dev->data->dev_id, qp_id); 1990 if (!qp->ctx_pool) { 1991 qp->ctx_pool = rte_mempool_create((const char *)str, 1992 CTX_POOL_NUM_BUFS, 1993 CTX_POOL_BUF_SIZE, 1994 CTX_POOL_CACHE_SIZE, 0, 1995 NULL, NULL, NULL, NULL, 1996 SOCKET_ID_ANY, 0); 1997 if (!qp->ctx_pool) { 1998 DPAA_SEC_ERR("%s create failed\n", str); 1999 return -ENOMEM; 2000 } 2001 } else 2002 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 2003 dev->data->dev_id, qp_id); 2004 dev->data->queue_pairs[qp_id] = qp; 2005 2006 return 0; 2007 } 2008 2009 /** Return the number of allocated queue pairs */ 2010 static uint32_t 2011 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) 2012 { 2013 PMD_INIT_FUNC_TRACE(); 2014 2015 return dev->data->nb_queue_pairs; 2016 } 2017 2018 /** Returns the size of session structure */ 2019 static unsigned int 2020 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2021 { 2022 PMD_INIT_FUNC_TRACE(); 2023 2024 return sizeof(dpaa_sec_session); 2025 } 2026 2027 static int 2028 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2029 struct rte_crypto_sym_xform *xform, 2030 dpaa_sec_session *session) 2031 { 2032 session->cipher_alg = xform->cipher.algo; 2033 session->iv.length = xform->cipher.iv.length; 2034 session->iv.offset = xform->cipher.iv.offset; 2035 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2036 RTE_CACHE_LINE_SIZE); 2037 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2038 DPAA_SEC_ERR("No Memory for cipher key"); 2039 return -ENOMEM; 2040 } 2041 session->cipher_key.length = xform->cipher.key.length; 2042 2043 memcpy(session->cipher_key.data, xform->cipher.key.data, 2044 xform->cipher.key.length); 2045 switch (xform->cipher.algo) { 2046 case RTE_CRYPTO_CIPHER_AES_CBC: 2047 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2048 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2049 break; 2050 case RTE_CRYPTO_CIPHER_3DES_CBC: 2051 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2052 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2053 break; 2054 case RTE_CRYPTO_CIPHER_AES_CTR: 2055 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2056 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2057 break; 2058 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2059 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2060 break; 2061 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2062 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2063 break; 2064 default: 2065 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2066 xform->cipher.algo); 2067 rte_free(session->cipher_key.data); 2068 return -1; 2069 } 2070 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2071 DIR_ENC : DIR_DEC; 2072 2073 return 0; 2074 } 2075 2076 static int 2077 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2078 struct rte_crypto_sym_xform *xform, 2079 dpaa_sec_session *session) 2080 { 2081 session->auth_alg = xform->auth.algo; 2082 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 2083 RTE_CACHE_LINE_SIZE); 2084 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 2085 DPAA_SEC_ERR("No Memory for auth key"); 2086 return -ENOMEM; 2087 } 2088 session->auth_key.length = xform->auth.key.length; 2089 session->digest_length = xform->auth.digest_length; 2090 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2091 session->iv.offset = xform->auth.iv.offset; 2092 session->iv.length = xform->auth.iv.length; 2093 } 2094 2095 memcpy(session->auth_key.data, xform->auth.key.data, 2096 xform->auth.key.length); 2097 2098 switch (xform->auth.algo) { 2099 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2100 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2101 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2102 break; 2103 case RTE_CRYPTO_AUTH_MD5_HMAC: 2104 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2105 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2106 break; 2107 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2108 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2109 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2110 break; 2111 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2112 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2113 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2114 break; 2115 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2116 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2117 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2118 break; 2119 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2120 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2121 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2122 break; 2123 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2124 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2125 session->auth_key.algmode = OP_ALG_AAI_F9; 2126 break; 2127 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2128 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2129 session->auth_key.algmode = OP_ALG_AAI_F9; 2130 break; 2131 default: 2132 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2133 xform->auth.algo); 2134 rte_free(session->auth_key.data); 2135 return -1; 2136 } 2137 2138 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2139 DIR_ENC : DIR_DEC; 2140 2141 return 0; 2142 } 2143 2144 static int 2145 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2146 struct rte_crypto_sym_xform *xform, 2147 dpaa_sec_session *session) 2148 { 2149 2150 struct rte_crypto_cipher_xform *cipher_xform; 2151 struct rte_crypto_auth_xform *auth_xform; 2152 2153 if (session->auth_cipher_text) { 2154 cipher_xform = &xform->cipher; 2155 auth_xform = &xform->next->auth; 2156 } else { 2157 cipher_xform = &xform->next->cipher; 2158 auth_xform = &xform->auth; 2159 } 2160 2161 /* Set IV parameters */ 2162 session->iv.offset = cipher_xform->iv.offset; 2163 session->iv.length = cipher_xform->iv.length; 2164 2165 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2166 RTE_CACHE_LINE_SIZE); 2167 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2168 DPAA_SEC_ERR("No Memory for cipher key"); 2169 return -1; 2170 } 2171 session->cipher_key.length = cipher_xform->key.length; 2172 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2173 RTE_CACHE_LINE_SIZE); 2174 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2175 DPAA_SEC_ERR("No Memory for auth key"); 2176 rte_free(session->cipher_key.data); 2177 return -ENOMEM; 2178 } 2179 session->auth_key.length = auth_xform->key.length; 2180 memcpy(session->cipher_key.data, cipher_xform->key.data, 2181 cipher_xform->key.length); 2182 memcpy(session->auth_key.data, auth_xform->key.data, 2183 auth_xform->key.length); 2184 2185 session->digest_length = auth_xform->digest_length; 2186 session->auth_alg = auth_xform->algo; 2187 2188 switch (auth_xform->algo) { 2189 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2190 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2191 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2192 break; 2193 case RTE_CRYPTO_AUTH_MD5_HMAC: 2194 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2195 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2196 break; 2197 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2198 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2199 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2200 break; 2201 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2202 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2203 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2204 break; 2205 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2206 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2207 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2208 break; 2209 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2210 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2211 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2212 break; 2213 default: 2214 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", 2215 auth_xform->algo); 2216 goto error_out; 2217 } 2218 2219 session->cipher_alg = cipher_xform->algo; 2220 2221 switch (cipher_xform->algo) { 2222 case RTE_CRYPTO_CIPHER_AES_CBC: 2223 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2224 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2225 break; 2226 case RTE_CRYPTO_CIPHER_3DES_CBC: 2227 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2228 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2229 break; 2230 case RTE_CRYPTO_CIPHER_AES_CTR: 2231 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2232 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2233 break; 2234 default: 2235 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2236 cipher_xform->algo); 2237 goto error_out; 2238 } 2239 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2240 DIR_ENC : DIR_DEC; 2241 return 0; 2242 2243 error_out: 2244 rte_free(session->cipher_key.data); 2245 rte_free(session->auth_key.data); 2246 return -1; 2247 } 2248 2249 static int 2250 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2251 struct rte_crypto_sym_xform *xform, 2252 dpaa_sec_session *session) 2253 { 2254 session->aead_alg = xform->aead.algo; 2255 session->ctxt = DPAA_SEC_AEAD; 2256 session->iv.length = xform->aead.iv.length; 2257 session->iv.offset = xform->aead.iv.offset; 2258 session->auth_only_len = xform->aead.aad_length; 2259 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2260 RTE_CACHE_LINE_SIZE); 2261 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2262 DPAA_SEC_ERR("No Memory for aead key\n"); 2263 return -ENOMEM; 2264 } 2265 session->aead_key.length = xform->aead.key.length; 2266 session->digest_length = xform->aead.digest_length; 2267 2268 memcpy(session->aead_key.data, xform->aead.key.data, 2269 xform->aead.key.length); 2270 2271 switch (session->aead_alg) { 2272 case RTE_CRYPTO_AEAD_AES_GCM: 2273 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2274 session->aead_key.algmode = OP_ALG_AAI_GCM; 2275 break; 2276 default: 2277 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2278 rte_free(session->aead_key.data); 2279 return -ENOMEM; 2280 } 2281 2282 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2283 DIR_ENC : DIR_DEC; 2284 2285 return 0; 2286 } 2287 2288 static struct qman_fq * 2289 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2290 { 2291 unsigned int i; 2292 2293 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) { 2294 if (qi->inq_attach[i] == 0) { 2295 qi->inq_attach[i] = 1; 2296 return &qi->inq[i]; 2297 } 2298 } 2299 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2300 2301 return NULL; 2302 } 2303 2304 static int 2305 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2306 { 2307 unsigned int i; 2308 2309 for (i = 0; i < qi->max_nb_sessions; i++) { 2310 if (&qi->inq[i] == fq) { 2311 qman_retire_fq(fq, NULL); 2312 qman_oos_fq(fq); 2313 qi->inq_attach[i] = 0; 2314 return 0; 2315 } 2316 } 2317 return -1; 2318 } 2319 2320 static int 2321 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2322 { 2323 int ret; 2324 2325 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2326 ret = dpaa_sec_prep_cdb(sess); 2327 if (ret) { 2328 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2329 return -1; 2330 } 2331 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 2332 ret = rte_dpaa_portal_init((void *)0); 2333 if (ret) { 2334 DPAA_SEC_ERR("Failure in affining portal"); 2335 return ret; 2336 } 2337 } 2338 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2339 dpaa_mem_vtop(&sess->cdb), 2340 qman_fq_fqid(&qp->outq)); 2341 if (ret) 2342 DPAA_SEC_ERR("Unable to init sec queue"); 2343 2344 return ret; 2345 } 2346 2347 static int 2348 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2349 struct rte_crypto_sym_xform *xform, void *sess) 2350 { 2351 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2352 dpaa_sec_session *session = sess; 2353 uint32_t i; 2354 2355 PMD_INIT_FUNC_TRACE(); 2356 2357 if (unlikely(sess == NULL)) { 2358 DPAA_SEC_ERR("invalid session struct"); 2359 return -EINVAL; 2360 } 2361 memset(session, 0, sizeof(dpaa_sec_session)); 2362 2363 /* Default IV length = 0 */ 2364 session->iv.length = 0; 2365 2366 /* Cipher Only */ 2367 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2368 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2369 session->ctxt = DPAA_SEC_CIPHER; 2370 dpaa_sec_cipher_init(dev, xform, session); 2371 2372 /* Authentication Only */ 2373 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2374 xform->next == NULL) { 2375 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2376 session->ctxt = DPAA_SEC_AUTH; 2377 dpaa_sec_auth_init(dev, xform, session); 2378 2379 /* Cipher then Authenticate */ 2380 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2381 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2382 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2383 session->ctxt = DPAA_SEC_CIPHER_HASH; 2384 session->auth_cipher_text = 1; 2385 dpaa_sec_chain_init(dev, xform, session); 2386 } else { 2387 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2388 return -EINVAL; 2389 } 2390 /* Authenticate then Cipher */ 2391 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2392 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2393 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2394 session->ctxt = DPAA_SEC_CIPHER_HASH; 2395 session->auth_cipher_text = 0; 2396 dpaa_sec_chain_init(dev, xform, session); 2397 } else { 2398 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2399 return -EINVAL; 2400 } 2401 2402 /* AEAD operation for AES-GCM kind of Algorithms */ 2403 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2404 xform->next == NULL) { 2405 dpaa_sec_aead_init(dev, xform, session); 2406 2407 } else { 2408 DPAA_SEC_ERR("Invalid crypto type"); 2409 return -EINVAL; 2410 } 2411 rte_spinlock_lock(&internals->lock); 2412 for (i = 0; i < MAX_DPAA_CORES; i++) { 2413 session->inq[i] = dpaa_sec_attach_rxq(internals); 2414 if (session->inq[i] == NULL) { 2415 DPAA_SEC_ERR("unable to attach sec queue"); 2416 rte_spinlock_unlock(&internals->lock); 2417 goto err1; 2418 } 2419 } 2420 rte_spinlock_unlock(&internals->lock); 2421 2422 return 0; 2423 2424 err1: 2425 rte_free(session->cipher_key.data); 2426 rte_free(session->auth_key.data); 2427 memset(session, 0, sizeof(dpaa_sec_session)); 2428 2429 return -EINVAL; 2430 } 2431 2432 static int 2433 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2434 struct rte_crypto_sym_xform *xform, 2435 struct rte_cryptodev_sym_session *sess, 2436 struct rte_mempool *mempool) 2437 { 2438 void *sess_private_data; 2439 int ret; 2440 2441 PMD_INIT_FUNC_TRACE(); 2442 2443 if (rte_mempool_get(mempool, &sess_private_data)) { 2444 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2445 return -ENOMEM; 2446 } 2447 2448 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2449 if (ret != 0) { 2450 DPAA_SEC_ERR("failed to configure session parameters"); 2451 2452 /* Return session to mempool */ 2453 rte_mempool_put(mempool, sess_private_data); 2454 return ret; 2455 } 2456 2457 set_sym_session_private_data(sess, dev->driver_id, 2458 sess_private_data); 2459 2460 2461 return 0; 2462 } 2463 2464 static inline void 2465 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2466 { 2467 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2468 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s); 2469 uint8_t i; 2470 2471 for (i = 0; i < MAX_DPAA_CORES; i++) { 2472 if (s->inq[i]) 2473 dpaa_sec_detach_rxq(qi, s->inq[i]); 2474 s->inq[i] = NULL; 2475 s->qp[i] = NULL; 2476 } 2477 rte_free(s->cipher_key.data); 2478 rte_free(s->auth_key.data); 2479 memset(s, 0, sizeof(dpaa_sec_session)); 2480 rte_mempool_put(sess_mp, (void *)s); 2481 } 2482 2483 /** Clear the memory of session so it doesn't leave key material behind */ 2484 static void 2485 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2486 struct rte_cryptodev_sym_session *sess) 2487 { 2488 PMD_INIT_FUNC_TRACE(); 2489 uint8_t index = dev->driver_id; 2490 void *sess_priv = get_sym_session_private_data(sess, index); 2491 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2492 2493 if (sess_priv) { 2494 free_session_memory(dev, s); 2495 set_sym_session_private_data(sess, index, NULL); 2496 } 2497 } 2498 2499 #ifdef RTE_LIBRTE_SECURITY 2500 static int 2501 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2502 struct rte_security_session_conf *conf, 2503 void *sess) 2504 { 2505 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2506 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2507 struct rte_crypto_auth_xform *auth_xform = NULL; 2508 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2509 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2510 uint32_t i; 2511 2512 PMD_INIT_FUNC_TRACE(); 2513 2514 memset(session, 0, sizeof(dpaa_sec_session)); 2515 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2516 cipher_xform = &conf->crypto_xform->cipher; 2517 if (conf->crypto_xform->next) 2518 auth_xform = &conf->crypto_xform->next->auth; 2519 } else { 2520 auth_xform = &conf->crypto_xform->auth; 2521 if (conf->crypto_xform->next) 2522 cipher_xform = &conf->crypto_xform->next->cipher; 2523 } 2524 session->proto_alg = conf->protocol; 2525 session->ctxt = DPAA_SEC_IPSEC; 2526 2527 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) { 2528 session->cipher_key.data = rte_zmalloc(NULL, 2529 cipher_xform->key.length, 2530 RTE_CACHE_LINE_SIZE); 2531 if (session->cipher_key.data == NULL && 2532 cipher_xform->key.length > 0) { 2533 DPAA_SEC_ERR("No Memory for cipher key"); 2534 return -ENOMEM; 2535 } 2536 memcpy(session->cipher_key.data, cipher_xform->key.data, 2537 cipher_xform->key.length); 2538 session->cipher_key.length = cipher_xform->key.length; 2539 2540 switch (cipher_xform->algo) { 2541 case RTE_CRYPTO_CIPHER_NULL: 2542 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2543 break; 2544 case RTE_CRYPTO_CIPHER_AES_CBC: 2545 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2546 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2547 break; 2548 case RTE_CRYPTO_CIPHER_3DES_CBC: 2549 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2550 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2551 break; 2552 case RTE_CRYPTO_CIPHER_AES_CTR: 2553 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2554 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2555 break; 2556 default: 2557 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2558 cipher_xform->algo); 2559 goto out; 2560 } 2561 session->cipher_alg = cipher_xform->algo; 2562 } else { 2563 session->cipher_key.data = NULL; 2564 session->cipher_key.length = 0; 2565 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2566 } 2567 2568 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) { 2569 session->auth_key.data = rte_zmalloc(NULL, 2570 auth_xform->key.length, 2571 RTE_CACHE_LINE_SIZE); 2572 if (session->auth_key.data == NULL && 2573 auth_xform->key.length > 0) { 2574 DPAA_SEC_ERR("No Memory for auth key"); 2575 rte_free(session->cipher_key.data); 2576 return -ENOMEM; 2577 } 2578 memcpy(session->auth_key.data, auth_xform->key.data, 2579 auth_xform->key.length); 2580 session->auth_key.length = auth_xform->key.length; 2581 2582 switch (auth_xform->algo) { 2583 case RTE_CRYPTO_AUTH_NULL: 2584 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2585 session->digest_length = 0; 2586 break; 2587 case RTE_CRYPTO_AUTH_MD5_HMAC: 2588 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2589 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2590 break; 2591 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2592 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2593 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2594 break; 2595 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2596 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160; 2597 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2598 break; 2599 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2600 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2601 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2602 break; 2603 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2604 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2605 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2606 break; 2607 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2608 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2609 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2610 break; 2611 default: 2612 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2613 auth_xform->algo); 2614 goto out; 2615 } 2616 session->auth_alg = auth_xform->algo; 2617 } else { 2618 session->auth_key.data = NULL; 2619 session->auth_key.length = 0; 2620 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2621 } 2622 2623 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2624 if (ipsec_xform->tunnel.type == 2625 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2626 memset(&session->encap_pdb, 0, 2627 sizeof(struct ipsec_encap_pdb) + 2628 sizeof(session->ip4_hdr)); 2629 session->ip4_hdr.ip_v = IPVERSION; 2630 session->ip4_hdr.ip_hl = 5; 2631 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2632 sizeof(session->ip4_hdr)); 2633 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2634 session->ip4_hdr.ip_id = 0; 2635 session->ip4_hdr.ip_off = 0; 2636 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2637 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2638 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2639 IPPROTO_ESP : IPPROTO_AH; 2640 session->ip4_hdr.ip_sum = 0; 2641 session->ip4_hdr.ip_src = 2642 ipsec_xform->tunnel.ipv4.src_ip; 2643 session->ip4_hdr.ip_dst = 2644 ipsec_xform->tunnel.ipv4.dst_ip; 2645 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2646 (void *)&session->ip4_hdr, 2647 sizeof(struct ip)); 2648 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2649 } else if (ipsec_xform->tunnel.type == 2650 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2651 memset(&session->encap_pdb, 0, 2652 sizeof(struct ipsec_encap_pdb) + 2653 sizeof(session->ip6_hdr)); 2654 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2655 DPAA_IPv6_DEFAULT_VTC_FLOW | 2656 ((ipsec_xform->tunnel.ipv6.dscp << 2657 RTE_IPV6_HDR_TC_SHIFT) & 2658 RTE_IPV6_HDR_TC_MASK) | 2659 ((ipsec_xform->tunnel.ipv6.flabel << 2660 RTE_IPV6_HDR_FL_SHIFT) & 2661 RTE_IPV6_HDR_FL_MASK)); 2662 /* Payload length will be updated by HW */ 2663 session->ip6_hdr.payload_len = 0; 2664 session->ip6_hdr.hop_limits = 2665 ipsec_xform->tunnel.ipv6.hlimit; 2666 session->ip6_hdr.proto = (ipsec_xform->proto == 2667 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2668 IPPROTO_ESP : IPPROTO_AH; 2669 memcpy(&session->ip6_hdr.src_addr, 2670 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2671 memcpy(&session->ip6_hdr.dst_addr, 2672 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2673 session->encap_pdb.ip_hdr_len = 2674 sizeof(struct rte_ipv6_hdr); 2675 } 2676 session->encap_pdb.options = 2677 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2678 PDBOPTS_ESP_OIHI_PDB_INL | 2679 PDBOPTS_ESP_IVSRC | 2680 PDBHMO_ESP_ENCAP_DTTL | 2681 PDBHMO_ESP_SNR; 2682 if (ipsec_xform->options.esn) 2683 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 2684 session->encap_pdb.spi = ipsec_xform->spi; 2685 session->dir = DIR_ENC; 2686 } else if (ipsec_xform->direction == 2687 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2688 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2689 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 2690 session->decap_pdb.options = sizeof(struct ip) << 16; 2691 else 2692 session->decap_pdb.options = 2693 sizeof(struct rte_ipv6_hdr) << 16; 2694 if (ipsec_xform->options.esn) 2695 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 2696 session->dir = DIR_DEC; 2697 } else 2698 goto out; 2699 rte_spinlock_lock(&internals->lock); 2700 for (i = 0; i < MAX_DPAA_CORES; i++) { 2701 session->inq[i] = dpaa_sec_attach_rxq(internals); 2702 if (session->inq[i] == NULL) { 2703 DPAA_SEC_ERR("unable to attach sec queue"); 2704 rte_spinlock_unlock(&internals->lock); 2705 goto out; 2706 } 2707 } 2708 rte_spinlock_unlock(&internals->lock); 2709 2710 return 0; 2711 out: 2712 rte_free(session->auth_key.data); 2713 rte_free(session->cipher_key.data); 2714 memset(session, 0, sizeof(dpaa_sec_session)); 2715 return -1; 2716 } 2717 2718 static int 2719 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 2720 struct rte_security_session_conf *conf, 2721 void *sess) 2722 { 2723 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2724 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2725 struct rte_crypto_auth_xform *auth_xform = NULL; 2726 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2727 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2728 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 2729 uint32_t i; 2730 2731 PMD_INIT_FUNC_TRACE(); 2732 2733 memset(session, 0, sizeof(dpaa_sec_session)); 2734 2735 /* find xfrm types */ 2736 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2737 cipher_xform = &xform->cipher; 2738 if (xform->next != NULL) 2739 auth_xform = &xform->next->auth; 2740 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2741 auth_xform = &xform->auth; 2742 if (xform->next != NULL) 2743 cipher_xform = &xform->next->cipher; 2744 } else { 2745 DPAA_SEC_ERR("Invalid crypto type"); 2746 return -EINVAL; 2747 } 2748 2749 session->proto_alg = conf->protocol; 2750 session->ctxt = DPAA_SEC_PDCP; 2751 2752 if (cipher_xform) { 2753 switch (cipher_xform->algo) { 2754 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2755 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 2756 break; 2757 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2758 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 2759 break; 2760 case RTE_CRYPTO_CIPHER_AES_CTR: 2761 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 2762 break; 2763 case RTE_CRYPTO_CIPHER_NULL: 2764 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 2765 break; 2766 default: 2767 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 2768 session->cipher_alg); 2769 return -1; 2770 } 2771 2772 session->cipher_key.data = rte_zmalloc(NULL, 2773 cipher_xform->key.length, 2774 RTE_CACHE_LINE_SIZE); 2775 if (session->cipher_key.data == NULL && 2776 cipher_xform->key.length > 0) { 2777 DPAA_SEC_ERR("No Memory for cipher key"); 2778 return -ENOMEM; 2779 } 2780 session->cipher_key.length = cipher_xform->key.length; 2781 memcpy(session->cipher_key.data, cipher_xform->key.data, 2782 cipher_xform->key.length); 2783 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2784 DIR_ENC : DIR_DEC; 2785 session->cipher_alg = cipher_xform->algo; 2786 } else { 2787 session->cipher_key.data = NULL; 2788 session->cipher_key.length = 0; 2789 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2790 session->dir = DIR_ENC; 2791 } 2792 2793 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2794 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 2795 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 2796 DPAA_SEC_ERR( 2797 "PDCP Seq Num size should be 5/12 bits for cmode"); 2798 goto out; 2799 } 2800 } 2801 2802 if (auth_xform) { 2803 switch (auth_xform->algo) { 2804 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2805 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 2806 break; 2807 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2808 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 2809 break; 2810 case RTE_CRYPTO_AUTH_AES_CMAC: 2811 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 2812 break; 2813 case RTE_CRYPTO_AUTH_NULL: 2814 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 2815 break; 2816 default: 2817 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", 2818 session->auth_alg); 2819 rte_free(session->cipher_key.data); 2820 return -1; 2821 } 2822 session->auth_key.data = rte_zmalloc(NULL, 2823 auth_xform->key.length, 2824 RTE_CACHE_LINE_SIZE); 2825 if (!session->auth_key.data && 2826 auth_xform->key.length > 0) { 2827 DPAA_SEC_ERR("No Memory for auth key"); 2828 rte_free(session->cipher_key.data); 2829 return -ENOMEM; 2830 } 2831 session->auth_key.length = auth_xform->key.length; 2832 memcpy(session->auth_key.data, auth_xform->key.data, 2833 auth_xform->key.length); 2834 session->auth_alg = auth_xform->algo; 2835 } else { 2836 session->auth_key.data = NULL; 2837 session->auth_key.length = 0; 2838 session->auth_alg = 0; 2839 } 2840 session->pdcp.domain = pdcp_xform->domain; 2841 session->pdcp.bearer = pdcp_xform->bearer; 2842 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2843 session->pdcp.sn_size = pdcp_xform->sn_size; 2844 session->pdcp.hfn = pdcp_xform->hfn; 2845 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2846 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 2847 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 2848 2849 rte_spinlock_lock(&dev_priv->lock); 2850 for (i = 0; i < MAX_DPAA_CORES; i++) { 2851 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 2852 if (session->inq[i] == NULL) { 2853 DPAA_SEC_ERR("unable to attach sec queue"); 2854 rte_spinlock_unlock(&dev_priv->lock); 2855 goto out; 2856 } 2857 } 2858 rte_spinlock_unlock(&dev_priv->lock); 2859 return 0; 2860 out: 2861 rte_free(session->auth_key.data); 2862 rte_free(session->cipher_key.data); 2863 memset(session, 0, sizeof(dpaa_sec_session)); 2864 return -1; 2865 } 2866 2867 static int 2868 dpaa_sec_security_session_create(void *dev, 2869 struct rte_security_session_conf *conf, 2870 struct rte_security_session *sess, 2871 struct rte_mempool *mempool) 2872 { 2873 void *sess_private_data; 2874 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2875 int ret; 2876 2877 if (rte_mempool_get(mempool, &sess_private_data)) { 2878 DPAA_SEC_ERR("Couldn't get object from session mempool"); 2879 return -ENOMEM; 2880 } 2881 2882 switch (conf->protocol) { 2883 case RTE_SECURITY_PROTOCOL_IPSEC: 2884 ret = dpaa_sec_set_ipsec_session(cdev, conf, 2885 sess_private_data); 2886 break; 2887 case RTE_SECURITY_PROTOCOL_PDCP: 2888 ret = dpaa_sec_set_pdcp_session(cdev, conf, 2889 sess_private_data); 2890 break; 2891 case RTE_SECURITY_PROTOCOL_MACSEC: 2892 return -ENOTSUP; 2893 default: 2894 return -EINVAL; 2895 } 2896 if (ret != 0) { 2897 DPAA_SEC_ERR("failed to configure session parameters"); 2898 /* Return session to mempool */ 2899 rte_mempool_put(mempool, sess_private_data); 2900 return ret; 2901 } 2902 2903 set_sec_session_private_data(sess, sess_private_data); 2904 2905 return ret; 2906 } 2907 2908 /** Clear the memory of session so it doesn't leave key material behind */ 2909 static int 2910 dpaa_sec_security_session_destroy(void *dev __rte_unused, 2911 struct rte_security_session *sess) 2912 { 2913 PMD_INIT_FUNC_TRACE(); 2914 void *sess_priv = get_sec_session_private_data(sess); 2915 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2916 2917 if (sess_priv) { 2918 free_session_memory((struct rte_cryptodev *)dev, s); 2919 set_sec_session_private_data(sess, NULL); 2920 } 2921 return 0; 2922 } 2923 #endif 2924 static int 2925 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 2926 struct rte_cryptodev_config *config __rte_unused) 2927 { 2928 PMD_INIT_FUNC_TRACE(); 2929 2930 return 0; 2931 } 2932 2933 static int 2934 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 2935 { 2936 PMD_INIT_FUNC_TRACE(); 2937 return 0; 2938 } 2939 2940 static void 2941 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 2942 { 2943 PMD_INIT_FUNC_TRACE(); 2944 } 2945 2946 static int 2947 dpaa_sec_dev_close(struct rte_cryptodev *dev) 2948 { 2949 PMD_INIT_FUNC_TRACE(); 2950 2951 if (dev == NULL) 2952 return -ENOMEM; 2953 2954 return 0; 2955 } 2956 2957 static void 2958 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 2959 struct rte_cryptodev_info *info) 2960 { 2961 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2962 2963 PMD_INIT_FUNC_TRACE(); 2964 if (info != NULL) { 2965 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 2966 info->feature_flags = dev->feature_flags; 2967 info->capabilities = dpaa_sec_capabilities; 2968 info->sym.max_nb_sessions = internals->max_nb_sessions; 2969 info->driver_id = cryptodev_driver_id; 2970 } 2971 } 2972 2973 static enum qman_cb_dqrr_result 2974 dpaa_sec_process_parallel_event(void *event, 2975 struct qman_portal *qm __always_unused, 2976 struct qman_fq *outq, 2977 const struct qm_dqrr_entry *dqrr, 2978 void **bufs) 2979 { 2980 const struct qm_fd *fd; 2981 struct dpaa_sec_job *job; 2982 struct dpaa_sec_op_ctx *ctx; 2983 struct rte_event *ev = (struct rte_event *)event; 2984 2985 fd = &dqrr->fd; 2986 2987 /* sg is embedded in an op ctx, 2988 * sg[0] is for output 2989 * sg[1] for input 2990 */ 2991 job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); 2992 2993 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 2994 ctx->fd_status = fd->status; 2995 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 2996 struct qm_sg_entry *sg_out; 2997 uint32_t len; 2998 2999 sg_out = &job->sg[0]; 3000 hw_sg_to_cpu(sg_out); 3001 len = sg_out->length; 3002 ctx->op->sym->m_src->pkt_len = len; 3003 ctx->op->sym->m_src->data_len = len; 3004 } 3005 if (!ctx->fd_status) { 3006 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3007 } else { 3008 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3009 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3010 } 3011 ev->event_ptr = (void *)ctx->op; 3012 3013 ev->flow_id = outq->ev.flow_id; 3014 ev->sub_event_type = outq->ev.sub_event_type; 3015 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3016 ev->op = RTE_EVENT_OP_NEW; 3017 ev->sched_type = outq->ev.sched_type; 3018 ev->queue_id = outq->ev.queue_id; 3019 ev->priority = outq->ev.priority; 3020 *bufs = (void *)ctx->op; 3021 3022 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3023 3024 return qman_cb_dqrr_consume; 3025 } 3026 3027 static enum qman_cb_dqrr_result 3028 dpaa_sec_process_atomic_event(void *event, 3029 struct qman_portal *qm __rte_unused, 3030 struct qman_fq *outq, 3031 const struct qm_dqrr_entry *dqrr, 3032 void **bufs) 3033 { 3034 u8 index; 3035 const struct qm_fd *fd; 3036 struct dpaa_sec_job *job; 3037 struct dpaa_sec_op_ctx *ctx; 3038 struct rte_event *ev = (struct rte_event *)event; 3039 3040 fd = &dqrr->fd; 3041 3042 /* sg is embedded in an op ctx, 3043 * sg[0] is for output 3044 * sg[1] for input 3045 */ 3046 job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3047 3048 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3049 ctx->fd_status = fd->status; 3050 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3051 struct qm_sg_entry *sg_out; 3052 uint32_t len; 3053 3054 sg_out = &job->sg[0]; 3055 hw_sg_to_cpu(sg_out); 3056 len = sg_out->length; 3057 ctx->op->sym->m_src->pkt_len = len; 3058 ctx->op->sym->m_src->data_len = len; 3059 } 3060 if (!ctx->fd_status) { 3061 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3062 } else { 3063 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3064 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3065 } 3066 ev->event_ptr = (void *)ctx->op; 3067 ev->flow_id = outq->ev.flow_id; 3068 ev->sub_event_type = outq->ev.sub_event_type; 3069 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3070 ev->op = RTE_EVENT_OP_NEW; 3071 ev->sched_type = outq->ev.sched_type; 3072 ev->queue_id = outq->ev.queue_id; 3073 ev->priority = outq->ev.priority; 3074 3075 /* Save active dqrr entries */ 3076 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3077 DPAA_PER_LCORE_DQRR_SIZE++; 3078 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3079 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3080 ev->impl_opaque = index + 1; 3081 ctx->op->sym->m_src->seqn = (uint32_t)index + 1; 3082 *bufs = (void *)ctx->op; 3083 3084 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3085 3086 return qman_cb_dqrr_defer; 3087 } 3088 3089 int 3090 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3091 int qp_id, 3092 uint16_t ch_id, 3093 const struct rte_event *event) 3094 { 3095 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3096 struct qm_mcc_initfq opts = {0}; 3097 3098 int ret; 3099 3100 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3101 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3102 opts.fqd.dest.channel = ch_id; 3103 3104 switch (event->sched_type) { 3105 case RTE_SCHED_TYPE_ATOMIC: 3106 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3107 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3108 * configuration with HOLD_ACTIVE setting 3109 */ 3110 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3111 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3112 break; 3113 case RTE_SCHED_TYPE_ORDERED: 3114 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3115 return -1; 3116 default: 3117 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3118 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3119 break; 3120 } 3121 3122 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3123 if (unlikely(ret)) { 3124 DPAA_SEC_ERR("unable to init caam source fq!"); 3125 return ret; 3126 } 3127 3128 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3129 3130 return 0; 3131 } 3132 3133 int 3134 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3135 int qp_id) 3136 { 3137 struct qm_mcc_initfq opts = {0}; 3138 int ret; 3139 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3140 3141 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3142 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3143 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3144 qp->outq.cb.ern = ern_sec_fq_handler; 3145 qman_retire_fq(&qp->outq, NULL); 3146 qman_oos_fq(&qp->outq); 3147 ret = qman_init_fq(&qp->outq, 0, &opts); 3148 if (ret) 3149 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret); 3150 qp->outq.cb.dqrr = NULL; 3151 3152 return ret; 3153 } 3154 3155 static struct rte_cryptodev_ops crypto_ops = { 3156 .dev_configure = dpaa_sec_dev_configure, 3157 .dev_start = dpaa_sec_dev_start, 3158 .dev_stop = dpaa_sec_dev_stop, 3159 .dev_close = dpaa_sec_dev_close, 3160 .dev_infos_get = dpaa_sec_dev_infos_get, 3161 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3162 .queue_pair_release = dpaa_sec_queue_pair_release, 3163 .queue_pair_count = dpaa_sec_queue_pair_count, 3164 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3165 .sym_session_configure = dpaa_sec_sym_session_configure, 3166 .sym_session_clear = dpaa_sec_sym_session_clear 3167 }; 3168 3169 #ifdef RTE_LIBRTE_SECURITY 3170 static const struct rte_security_capability * 3171 dpaa_sec_capabilities_get(void *device __rte_unused) 3172 { 3173 return dpaa_sec_security_cap; 3174 } 3175 3176 static const struct rte_security_ops dpaa_sec_security_ops = { 3177 .session_create = dpaa_sec_security_session_create, 3178 .session_update = NULL, 3179 .session_stats_get = NULL, 3180 .session_destroy = dpaa_sec_security_session_destroy, 3181 .set_pkt_metadata = NULL, 3182 .capabilities_get = dpaa_sec_capabilities_get 3183 }; 3184 #endif 3185 static int 3186 dpaa_sec_uninit(struct rte_cryptodev *dev) 3187 { 3188 struct dpaa_sec_dev_private *internals; 3189 3190 if (dev == NULL) 3191 return -ENODEV; 3192 3193 internals = dev->data->dev_private; 3194 rte_free(dev->security_ctx); 3195 3196 rte_free(internals); 3197 3198 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3199 dev->data->name, rte_socket_id()); 3200 3201 return 0; 3202 } 3203 3204 static int 3205 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3206 { 3207 struct dpaa_sec_dev_private *internals; 3208 #ifdef RTE_LIBRTE_SECURITY 3209 struct rte_security_ctx *security_instance; 3210 #endif 3211 struct dpaa_sec_qp *qp; 3212 uint32_t i, flags; 3213 int ret; 3214 3215 PMD_INIT_FUNC_TRACE(); 3216 3217 cryptodev->driver_id = cryptodev_driver_id; 3218 cryptodev->dev_ops = &crypto_ops; 3219 3220 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3221 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3222 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3223 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3224 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3225 RTE_CRYPTODEV_FF_SECURITY | 3226 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3227 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3228 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3229 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3230 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3231 3232 internals = cryptodev->data->dev_private; 3233 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3234 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3235 3236 /* 3237 * For secondary processes, we don't initialise any further as primary 3238 * has already done this work. Only check we don't need a different 3239 * RX function 3240 */ 3241 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3242 DPAA_SEC_WARN("Device already init by primary process"); 3243 return 0; 3244 } 3245 #ifdef RTE_LIBRTE_SECURITY 3246 /* Initialize security_ctx only for primary process*/ 3247 security_instance = rte_malloc("rte_security_instances_ops", 3248 sizeof(struct rte_security_ctx), 0); 3249 if (security_instance == NULL) 3250 return -ENOMEM; 3251 security_instance->device = (void *)cryptodev; 3252 security_instance->ops = &dpaa_sec_security_ops; 3253 security_instance->sess_cnt = 0; 3254 cryptodev->security_ctx = security_instance; 3255 #endif 3256 rte_spinlock_init(&internals->lock); 3257 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3258 /* init qman fq for queue pair */ 3259 qp = &internals->qps[i]; 3260 ret = dpaa_sec_init_tx(&qp->outq); 3261 if (ret) { 3262 DPAA_SEC_ERR("config tx of queue pair %d", i); 3263 goto init_error; 3264 } 3265 } 3266 3267 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3268 QMAN_FQ_FLAG_TO_DCPORTAL; 3269 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) { 3270 /* create rx qman fq for sessions*/ 3271 ret = qman_create_fq(0, flags, &internals->inq[i]); 3272 if (unlikely(ret != 0)) { 3273 DPAA_SEC_ERR("sec qman_create_fq failed"); 3274 goto init_error; 3275 } 3276 } 3277 3278 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); 3279 return 0; 3280 3281 init_error: 3282 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3283 3284 dpaa_sec_uninit(cryptodev); 3285 return -EFAULT; 3286 } 3287 3288 static int 3289 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3290 struct rte_dpaa_device *dpaa_dev) 3291 { 3292 struct rte_cryptodev *cryptodev; 3293 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3294 3295 int retval; 3296 3297 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3298 3299 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3300 if (cryptodev == NULL) 3301 return -ENOMEM; 3302 3303 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3304 cryptodev->data->dev_private = rte_zmalloc_socket( 3305 "cryptodev private structure", 3306 sizeof(struct dpaa_sec_dev_private), 3307 RTE_CACHE_LINE_SIZE, 3308 rte_socket_id()); 3309 3310 if (cryptodev->data->dev_private == NULL) 3311 rte_panic("Cannot allocate memzone for private " 3312 "device data"); 3313 } 3314 3315 dpaa_dev->crypto_dev = cryptodev; 3316 cryptodev->device = &dpaa_dev->device; 3317 3318 /* init user callbacks */ 3319 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3320 3321 /* if sec device version is not configured */ 3322 if (!rta_get_sec_era()) { 3323 const struct device_node *caam_node; 3324 3325 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3326 const uint32_t *prop = of_get_property(caam_node, 3327 "fsl,sec-era", 3328 NULL); 3329 if (prop) { 3330 rta_set_sec_era( 3331 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3332 break; 3333 } 3334 } 3335 } 3336 3337 /* Invoke PMD device initialization function */ 3338 retval = dpaa_sec_dev_init(cryptodev); 3339 if (retval == 0) 3340 return 0; 3341 3342 /* In case of error, cleanup is done */ 3343 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3344 rte_free(cryptodev->data->dev_private); 3345 3346 rte_cryptodev_pmd_release_device(cryptodev); 3347 3348 return -ENXIO; 3349 } 3350 3351 static int 3352 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3353 { 3354 struct rte_cryptodev *cryptodev; 3355 int ret; 3356 3357 cryptodev = dpaa_dev->crypto_dev; 3358 if (cryptodev == NULL) 3359 return -ENODEV; 3360 3361 ret = dpaa_sec_uninit(cryptodev); 3362 if (ret) 3363 return ret; 3364 3365 return rte_cryptodev_pmd_destroy(cryptodev); 3366 } 3367 3368 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3369 .drv_type = FSL_DPAA_CRYPTO, 3370 .driver = { 3371 .name = "DPAA SEC PMD" 3372 }, 3373 .probe = cryptodev_dpaa_sec_probe, 3374 .remove = cryptodev_dpaa_sec_remove, 3375 }; 3376 3377 static struct cryptodev_driver dpaa_sec_crypto_drv; 3378 3379 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3380 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3381 cryptodev_driver_id); 3382 3383 RTE_INIT(dpaa_sec_init_log) 3384 { 3385 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); 3386 if (dpaa_logtype_sec >= 0) 3387 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); 3388 } 3389