1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2024 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #include <rte_security_driver.h> 19 #include <rte_cycles.h> 20 #include <dev_driver.h> 21 #include <rte_io.h> 22 #include <rte_ip.h> 23 #include <rte_kvargs.h> 24 #include <rte_malloc.h> 25 #include <rte_mbuf.h> 26 #include <rte_memcpy.h> 27 #include <rte_string_fns.h> 28 #include <rte_spinlock.h> 29 #include <rte_hexdump.h> 30 31 #include <fsl_usd.h> 32 #include <fsl_qman.h> 33 #include <dpaa_of.h> 34 35 /* RTA header files */ 36 #include <desc/common.h> 37 #include <desc/algo.h> 38 #include <desc/ipsec.h> 39 #include <desc/pdcp.h> 40 #include <desc/sdap.h> 41 42 #include <bus_dpaa_driver.h> 43 #include <dpaa_sec.h> 44 #include <dpaa_sec_event.h> 45 #include <dpaa_sec_log.h> 46 #include <dpaax_iova_table.h> 47 48 #define DRIVER_DUMP_MODE "drv_dump_mode" 49 50 /* DPAA_SEC_DP_DUMP levels */ 51 enum dpaa_sec_dump_levels { 52 DPAA_SEC_DP_NO_DUMP, 53 DPAA_SEC_DP_ERR_DUMP, 54 DPAA_SEC_DP_FULL_DUMP 55 }; 56 57 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP; 58 59 uint8_t dpaa_cryptodev_driver_id; 60 61 static inline void 62 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 63 { 64 if (!ctx->fd_status) { 65 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 66 } else { 67 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 68 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 69 } 70 } 71 72 static inline struct dpaa_sec_op_ctx * 73 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 74 { 75 struct dpaa_sec_op_ctx *ctx; 76 int i, retval; 77 78 retval = rte_mempool_get( 79 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 80 (void **)(&ctx)); 81 if (!ctx || retval) { 82 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 83 return NULL; 84 } 85 /* 86 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 87 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 88 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 89 * each packet, memset is costlier than dcbz_64(). 90 */ 91 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 92 dcbz_64(&ctx->job.sg[i]); 93 94 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 95 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 96 97 return ctx; 98 } 99 100 static void 101 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 102 struct qman_fq *fq, 103 const struct qm_mr_entry *msg) 104 { 105 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", 106 fq->fqid, msg->ern.rc, msg->ern.seqnum); 107 } 108 109 /* initialize the queue with dest chan as caam chan so that 110 * all the packets in this queue could be dispatched into caam 111 */ 112 static int 113 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 114 uint32_t fqid_out) 115 { 116 struct qm_mcc_initfq fq_opts; 117 uint32_t flags; 118 int ret = -1; 119 120 /* Clear FQ options */ 121 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 122 123 flags = QMAN_INITFQ_FLAG_SCHED; 124 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 125 QM_INITFQ_WE_CONTEXTB; 126 127 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 128 fq_opts.fqd.context_b = fqid_out; 129 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam(); 130 fq_opts.fqd.dest.wq = 0; 131 132 fq_in->cb.ern = ern_sec_fq_handler; 133 134 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 135 136 ret = qman_init_fq(fq_in, flags, &fq_opts); 137 if (unlikely(ret != 0)) 138 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 139 140 return ret; 141 } 142 143 /* something is put into in_fq and caam put the crypto result into out_fq */ 144 static enum qman_cb_dqrr_result 145 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 146 struct qman_fq *fq __always_unused, 147 const struct qm_dqrr_entry *dqrr) 148 { 149 const struct qm_fd *fd; 150 struct dpaa_sec_job *job; 151 struct dpaa_sec_op_ctx *ctx; 152 153 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 154 return qman_cb_dqrr_consume; 155 156 fd = &dqrr->fd; 157 /* sg is embedded in an op ctx, 158 * sg[0] is for output 159 * sg[1] for input 160 */ 161 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 162 163 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 164 ctx->fd_status = fd->status; 165 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 166 struct qm_sg_entry *sg_out; 167 uint32_t len; 168 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 169 ctx->op->sym->m_src : ctx->op->sym->m_dst; 170 171 sg_out = &job->sg[0]; 172 hw_sg_to_cpu(sg_out); 173 len = sg_out->length; 174 mbuf->pkt_len = len; 175 while (mbuf->next != NULL) { 176 len -= mbuf->data_len; 177 mbuf = mbuf->next; 178 } 179 mbuf->data_len = len; 180 } 181 dpaa_sec_op_ending(ctx); 182 183 return qman_cb_dqrr_consume; 184 } 185 186 /* caam result is put into this queue */ 187 static int 188 dpaa_sec_init_tx(struct qman_fq *fq) 189 { 190 int ret; 191 struct qm_mcc_initfq opts; 192 uint32_t flags; 193 194 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 195 QMAN_FQ_FLAG_DYNAMIC_FQID; 196 197 ret = qman_create_fq(0, flags, fq); 198 if (unlikely(ret)) { 199 DPAA_SEC_ERR("qman_create_fq failed"); 200 return ret; 201 } 202 203 memset(&opts, 0, sizeof(opts)); 204 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 205 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 206 207 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 208 209 fq->cb.dqrr = dqrr_out_fq_cb_rx; 210 fq->cb.ern = ern_sec_fq_handler; 211 212 ret = qman_init_fq(fq, 0, &opts); 213 if (unlikely(ret)) { 214 DPAA_SEC_ERR("unable to init caam source fq!"); 215 return ret; 216 } 217 218 return ret; 219 } 220 221 static inline int is_aead(dpaa_sec_session *ses) 222 { 223 return ((ses->cipher_alg == 0) && 224 (ses->auth_alg == 0) && 225 (ses->aead_alg != 0)); 226 } 227 228 static inline int is_encode(dpaa_sec_session *ses) 229 { 230 return ses->dir == DIR_ENC; 231 } 232 233 static inline int is_decode(dpaa_sec_session *ses) 234 { 235 return ses->dir == DIR_DEC; 236 } 237 238 static int 239 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 240 { 241 struct alginfo authdata = {0}, cipherdata = {0}; 242 struct sec_cdb *cdb = &ses->cdb; 243 struct alginfo *p_authdata = NULL; 244 int32_t shared_desc_len = 0; 245 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 246 int swap = false; 247 #else 248 int swap = true; 249 #endif 250 251 cipherdata.key = (size_t)ses->cipher_key.data; 252 cipherdata.keylen = ses->cipher_key.length; 253 cipherdata.key_enc_flags = 0; 254 cipherdata.key_type = RTA_DATA_IMM; 255 cipherdata.algtype = ses->cipher_key.alg; 256 cipherdata.algmode = ses->cipher_key.algmode; 257 258 if (ses->auth_alg) { 259 authdata.key = (size_t)ses->auth_key.data; 260 authdata.keylen = ses->auth_key.length; 261 authdata.key_enc_flags = 0; 262 authdata.key_type = RTA_DATA_IMM; 263 authdata.algtype = ses->auth_key.alg; 264 authdata.algmode = ses->auth_key.algmode; 265 266 p_authdata = &authdata; 267 } 268 269 if (ses->pdcp.sdap_enabled) { 270 int nb_keys_to_inline = 271 rta_inline_pdcp_sdap_query(authdata.algtype, 272 cipherdata.algtype, 273 ses->pdcp.sn_size, 274 ses->pdcp.hfn_ovd); 275 if (nb_keys_to_inline >= 1) { 276 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *) 277 (size_t)cipherdata.key); 278 cipherdata.key_type = RTA_DATA_PTR; 279 } 280 if (nb_keys_to_inline >= 2) { 281 authdata.key = (size_t)rte_dpaa_mem_vtop((void *) 282 (size_t)authdata.key); 283 authdata.key_type = RTA_DATA_PTR; 284 } 285 } else { 286 if (rta_inline_pdcp_query(authdata.algtype, 287 cipherdata.algtype, 288 ses->pdcp.sn_size, 289 ses->pdcp.hfn_ovd)) { 290 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *) 291 (size_t)cipherdata.key); 292 cipherdata.key_type = RTA_DATA_PTR; 293 } 294 } 295 296 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 297 if (ses->dir == DIR_ENC) 298 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 299 cdb->sh_desc, 1, swap, 300 ses->pdcp.hfn, 301 ses->pdcp.sn_size, 302 ses->pdcp.bearer, 303 ses->pdcp.pkt_dir, 304 ses->pdcp.hfn_threshold, 305 &cipherdata, &authdata); 306 else if (ses->dir == DIR_DEC) 307 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 308 cdb->sh_desc, 1, swap, 309 ses->pdcp.hfn, 310 ses->pdcp.sn_size, 311 ses->pdcp.bearer, 312 ses->pdcp.pkt_dir, 313 ses->pdcp.hfn_threshold, 314 &cipherdata, &authdata); 315 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 316 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc, 317 1, swap, &authdata); 318 } else { 319 if (ses->dir == DIR_ENC) { 320 if (ses->pdcp.sdap_enabled) 321 shared_desc_len = 322 cnstr_shdsc_pdcp_sdap_u_plane_encap( 323 cdb->sh_desc, 1, swap, 324 ses->pdcp.sn_size, 325 ses->pdcp.hfn, 326 ses->pdcp.bearer, 327 ses->pdcp.pkt_dir, 328 ses->pdcp.hfn_threshold, 329 &cipherdata, p_authdata); 330 else 331 shared_desc_len = 332 cnstr_shdsc_pdcp_u_plane_encap( 333 cdb->sh_desc, 1, swap, 334 ses->pdcp.sn_size, 335 ses->pdcp.hfn, 336 ses->pdcp.bearer, 337 ses->pdcp.pkt_dir, 338 ses->pdcp.hfn_threshold, 339 &cipherdata, p_authdata); 340 } else if (ses->dir == DIR_DEC) { 341 if (ses->pdcp.sdap_enabled) 342 shared_desc_len = 343 cnstr_shdsc_pdcp_sdap_u_plane_decap( 344 cdb->sh_desc, 1, swap, 345 ses->pdcp.sn_size, 346 ses->pdcp.hfn, 347 ses->pdcp.bearer, 348 ses->pdcp.pkt_dir, 349 ses->pdcp.hfn_threshold, 350 &cipherdata, p_authdata); 351 else 352 shared_desc_len = 353 cnstr_shdsc_pdcp_u_plane_decap( 354 cdb->sh_desc, 1, swap, 355 ses->pdcp.sn_size, 356 ses->pdcp.hfn, 357 ses->pdcp.bearer, 358 ses->pdcp.pkt_dir, 359 ses->pdcp.hfn_threshold, 360 &cipherdata, p_authdata); 361 } 362 } 363 return shared_desc_len; 364 } 365 366 /* prepare ipsec proto command block of the session */ 367 static int 368 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 369 { 370 struct alginfo cipherdata = {0}, authdata = {0}; 371 struct sec_cdb *cdb = &ses->cdb; 372 int32_t shared_desc_len = 0; 373 int err; 374 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 375 int swap = false; 376 #else 377 int swap = true; 378 #endif 379 380 cipherdata.key = (size_t)ses->cipher_key.data; 381 cipherdata.keylen = ses->cipher_key.length; 382 cipherdata.key_enc_flags = 0; 383 cipherdata.key_type = RTA_DATA_IMM; 384 cipherdata.algtype = ses->cipher_key.alg; 385 cipherdata.algmode = ses->cipher_key.algmode; 386 387 if (ses->auth_key.length) { 388 authdata.key = (size_t)ses->auth_key.data; 389 authdata.keylen = ses->auth_key.length; 390 authdata.key_enc_flags = 0; 391 authdata.key_type = RTA_DATA_IMM; 392 authdata.algtype = ses->auth_key.alg; 393 authdata.algmode = ses->auth_key.algmode; 394 } 395 396 cdb->sh_desc[0] = cipherdata.keylen; 397 cdb->sh_desc[1] = authdata.keylen; 398 err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 399 DESC_JOB_IO_LEN, 400 (unsigned int *)cdb->sh_desc, 401 &cdb->sh_desc[2], 2, authdata.algtype, 1); 402 403 if (err < 0) { 404 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 405 return err; 406 } 407 if (cdb->sh_desc[2] & 1) 408 cipherdata.key_type = RTA_DATA_IMM; 409 else { 410 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 411 (void *)(size_t)cipherdata.key); 412 cipherdata.key_type = RTA_DATA_PTR; 413 } 414 if (cdb->sh_desc[2] & (1<<1)) 415 authdata.key_type = RTA_DATA_IMM; 416 else { 417 authdata.key = (size_t)rte_dpaa_mem_vtop( 418 (void *)(size_t)authdata.key); 419 authdata.key_type = RTA_DATA_PTR; 420 } 421 422 cdb->sh_desc[0] = 0; 423 cdb->sh_desc[1] = 0; 424 cdb->sh_desc[2] = 0; 425 if (ses->dir == DIR_ENC) { 426 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 427 cdb->sh_desc, 428 true, swap, SHR_SERIAL, 429 &ses->encap_pdb, 430 (uint8_t *)&ses->ip4_hdr, 431 &cipherdata, &authdata); 432 } else if (ses->dir == DIR_DEC) { 433 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 434 cdb->sh_desc, 435 true, swap, SHR_SERIAL, 436 &ses->decap_pdb, 437 &cipherdata, &authdata); 438 } 439 return shared_desc_len; 440 } 441 442 /* prepare command block of the session */ 443 static int 444 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 445 { 446 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 447 int32_t shared_desc_len = 0; 448 struct sec_cdb *cdb = &ses->cdb; 449 int err; 450 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 451 int swap = false; 452 #else 453 int swap = true; 454 #endif 455 456 memset(cdb, 0, sizeof(struct sec_cdb)); 457 458 switch (ses->ctxt) { 459 case DPAA_SEC_IPSEC: 460 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 461 break; 462 case DPAA_SEC_PDCP: 463 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 464 break; 465 case DPAA_SEC_CIPHER: 466 alginfo_c.key = (size_t)ses->cipher_key.data; 467 alginfo_c.keylen = ses->cipher_key.length; 468 alginfo_c.key_enc_flags = 0; 469 alginfo_c.key_type = RTA_DATA_IMM; 470 alginfo_c.algtype = ses->cipher_key.alg; 471 alginfo_c.algmode = ses->cipher_key.algmode; 472 473 switch (ses->cipher_alg) { 474 case RTE_CRYPTO_CIPHER_AES_CBC: 475 case RTE_CRYPTO_CIPHER_3DES_CBC: 476 case RTE_CRYPTO_CIPHER_DES_CBC: 477 case RTE_CRYPTO_CIPHER_AES_CTR: 478 case RTE_CRYPTO_CIPHER_3DES_CTR: 479 shared_desc_len = cnstr_shdsc_blkcipher( 480 cdb->sh_desc, true, 481 swap, SHR_NEVER, &alginfo_c, 482 ses->iv.length, 483 ses->dir); 484 break; 485 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 486 shared_desc_len = cnstr_shdsc_snow_f8( 487 cdb->sh_desc, true, swap, 488 &alginfo_c, 489 ses->dir); 490 break; 491 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 492 shared_desc_len = cnstr_shdsc_zuce( 493 cdb->sh_desc, true, swap, 494 &alginfo_c, 495 ses->dir); 496 break; 497 default: 498 DPAA_SEC_ERR("unsupported cipher alg %s (%d)", 499 rte_cryptodev_get_cipher_algo_string(ses->cipher_alg), 500 ses->cipher_alg); 501 return -ENOTSUP; 502 } 503 break; 504 case DPAA_SEC_AUTH: 505 alginfo_a.key = (size_t)ses->auth_key.data; 506 alginfo_a.keylen = ses->auth_key.length; 507 alginfo_a.key_enc_flags = 0; 508 alginfo_a.key_type = RTA_DATA_IMM; 509 alginfo_a.algtype = ses->auth_key.alg; 510 alginfo_a.algmode = ses->auth_key.algmode; 511 switch (ses->auth_alg) { 512 case RTE_CRYPTO_AUTH_MD5: 513 case RTE_CRYPTO_AUTH_SHA1: 514 case RTE_CRYPTO_AUTH_SHA224: 515 case RTE_CRYPTO_AUTH_SHA256: 516 case RTE_CRYPTO_AUTH_SHA384: 517 case RTE_CRYPTO_AUTH_SHA512: 518 shared_desc_len = cnstr_shdsc_hash( 519 cdb->sh_desc, true, 520 swap, SHR_NEVER, &alginfo_a, 521 !ses->dir, 522 ses->digest_length); 523 break; 524 case RTE_CRYPTO_AUTH_MD5_HMAC: 525 case RTE_CRYPTO_AUTH_SHA1_HMAC: 526 case RTE_CRYPTO_AUTH_SHA224_HMAC: 527 case RTE_CRYPTO_AUTH_SHA256_HMAC: 528 case RTE_CRYPTO_AUTH_SHA384_HMAC: 529 case RTE_CRYPTO_AUTH_SHA512_HMAC: 530 shared_desc_len = cnstr_shdsc_hmac( 531 cdb->sh_desc, true, 532 swap, SHR_NEVER, &alginfo_a, 533 !ses->dir, 534 ses->digest_length); 535 break; 536 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 537 shared_desc_len = cnstr_shdsc_snow_f9( 538 cdb->sh_desc, true, swap, 539 &alginfo_a, 540 !ses->dir, 541 ses->digest_length); 542 break; 543 case RTE_CRYPTO_AUTH_ZUC_EIA3: 544 shared_desc_len = cnstr_shdsc_zuca( 545 cdb->sh_desc, true, swap, 546 &alginfo_a, 547 !ses->dir, 548 ses->digest_length); 549 break; 550 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 551 case RTE_CRYPTO_AUTH_AES_CMAC: 552 shared_desc_len = cnstr_shdsc_aes_mac( 553 cdb->sh_desc, 554 true, swap, SHR_NEVER, 555 &alginfo_a, 556 !ses->dir, 557 ses->digest_length); 558 break; 559 default: 560 DPAA_SEC_ERR("unsupported auth alg %s (%u)", 561 rte_cryptodev_get_auth_algo_string(ses->auth_alg), 562 ses->auth_alg); 563 } 564 break; 565 case DPAA_SEC_AEAD: 566 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 567 DPAA_SEC_ERR("not supported aead alg"); 568 return -ENOTSUP; 569 } 570 alginfo.key = (size_t)ses->aead_key.data; 571 alginfo.keylen = ses->aead_key.length; 572 alginfo.key_enc_flags = 0; 573 alginfo.key_type = RTA_DATA_IMM; 574 alginfo.algtype = ses->aead_key.alg; 575 alginfo.algmode = ses->aead_key.algmode; 576 577 if (ses->dir == DIR_ENC) 578 shared_desc_len = cnstr_shdsc_gcm_encap( 579 cdb->sh_desc, true, swap, SHR_NEVER, 580 &alginfo, 581 ses->iv.length, 582 ses->digest_length); 583 else 584 shared_desc_len = cnstr_shdsc_gcm_decap( 585 cdb->sh_desc, true, swap, SHR_NEVER, 586 &alginfo, 587 ses->iv.length, 588 ses->digest_length); 589 break; 590 case DPAA_SEC_CIPHER_HASH: 591 alginfo_c.key = (size_t)ses->cipher_key.data; 592 alginfo_c.keylen = ses->cipher_key.length; 593 alginfo_c.key_enc_flags = 0; 594 alginfo_c.key_type = RTA_DATA_IMM; 595 alginfo_c.algtype = ses->cipher_key.alg; 596 alginfo_c.algmode = ses->cipher_key.algmode; 597 598 alginfo_a.key = (size_t)ses->auth_key.data; 599 alginfo_a.keylen = ses->auth_key.length; 600 alginfo_a.key_enc_flags = 0; 601 alginfo_a.key_type = RTA_DATA_IMM; 602 alginfo_a.algtype = ses->auth_key.alg; 603 alginfo_a.algmode = ses->auth_key.algmode; 604 605 cdb->sh_desc[0] = alginfo_c.keylen; 606 cdb->sh_desc[1] = alginfo_a.keylen; 607 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 608 DESC_JOB_IO_LEN, 609 (unsigned int *)cdb->sh_desc, 610 &cdb->sh_desc[2], 2); 611 612 if (err < 0) { 613 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 614 return err; 615 } 616 if (cdb->sh_desc[2] & 1) 617 alginfo_c.key_type = RTA_DATA_IMM; 618 else { 619 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 620 (void *)(size_t)alginfo_c.key); 621 alginfo_c.key_type = RTA_DATA_PTR; 622 } 623 if (cdb->sh_desc[2] & (1<<1)) 624 alginfo_a.key_type = RTA_DATA_IMM; 625 else { 626 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 627 (void *)(size_t)alginfo_a.key); 628 alginfo_a.key_type = RTA_DATA_PTR; 629 } 630 cdb->sh_desc[0] = 0; 631 cdb->sh_desc[1] = 0; 632 cdb->sh_desc[2] = 0; 633 /* Auth_only_len is set as 0 here and it will be 634 * overwritten in fd for each packet. 635 */ 636 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 637 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 638 ses->iv.length, 639 ses->digest_length, ses->dir); 640 break; 641 default: 642 DPAA_SEC_ERR("error: Unsupported session %d", ses->ctxt); 643 return -ENOTSUP; 644 } 645 646 if (shared_desc_len < 0) { 647 DPAA_SEC_ERR("error in preparing command block"); 648 return shared_desc_len; 649 } 650 651 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 652 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 653 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 654 655 return 0; 656 } 657 658 static void 659 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp, FILE *f) 660 { 661 struct dpaa_sec_job *job = &ctx->job; 662 struct rte_crypto_op *op = ctx->op; 663 dpaa_sec_session *sess = NULL; 664 struct sec_cdb c_cdb, *cdb; 665 uint8_t bufsize; 666 struct rte_crypto_sym_op *sym_op; 667 struct qm_sg_entry sg[2]; 668 669 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 670 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 671 #ifdef RTE_LIB_SECURITY 672 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 673 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 674 #endif 675 if (sess == NULL) { 676 printf("session is NULL\n"); 677 goto mbuf_dump; 678 } 679 680 cdb = &sess->cdb; 681 rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); 682 #ifdef RTE_LIB_SECURITY 683 fprintf(f, "\nsession protocol type = %d\n", sess->proto_alg); 684 #endif 685 fprintf(f, "\n****************************************\n" 686 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n" 687 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n" 688 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n" 689 "\tCipher algmode:\t%d\n", sess->ctxt, 690 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC", 691 sess->cipher_alg, sess->auth_alg, sess->aead_alg, 692 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg, 693 sess->cipher_key.algmode); 694 rte_hexdump(f, "cipher key", sess->cipher_key.data, 695 sess->cipher_key.length); 696 rte_hexdump(f, "auth key", sess->auth_key.data, 697 sess->auth_key.length); 698 fprintf(f, "\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n" 699 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n" 700 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n" 701 "\taead cipher text:\t%d\n", 702 (uint64_t)sess->auth_key.length, sess->auth_key.alg, 703 sess->auth_key.algmode, 704 sess->iv.length, sess->iv.offset, 705 sess->digest_length, sess->auth_only_len, 706 sess->auth_cipher_text); 707 #ifdef RTE_LIB_SECURITY 708 fprintf(f, "PDCP session params:\n" 709 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" 710 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:" 711 "\t%d\n\thfn:\t\t%d\n" 712 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain, 713 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd, 714 sess->pdcp.sn_size, sess->pdcp.sdap_enabled, 715 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn, 716 sess->pdcp.hfn_threshold); 717 #endif 718 c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word); 719 c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word); 720 bufsize = c_cdb.sh_hdr.hi.field.idlen; 721 722 fprintf(f, "cdb = %p\n\n", cdb); 723 fprintf(f, "Descriptor size = %d\n", bufsize); 724 int m; 725 for (m = 0; m < bufsize; m++) 726 fprintf(f, "0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m])); 727 728 fprintf(f, "\n"); 729 mbuf_dump: 730 sym_op = op->sym; 731 if (sym_op->m_src) { 732 fprintf(f, "Source mbuf:\n"); 733 rte_pktmbuf_dump(f, sym_op->m_src, 734 sym_op->m_src->data_len); 735 } 736 if (sym_op->m_dst) { 737 fprintf(f, "Destination mbuf:\n"); 738 rte_pktmbuf_dump(f, sym_op->m_dst, 739 sym_op->m_dst->data_len); 740 } 741 742 fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n" 743 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n", 744 sym_op->session, sym_op->cipher.data.offset, 745 sym_op->cipher.data.length, 746 sym_op->auth.data.offset, sym_op->auth.data.length, 747 sym_op->aead.data.offset, sym_op->aead.data.length); 748 fprintf(f, "\n"); 749 750 fprintf(f, "******************************************************\n"); 751 fprintf(f, "ctx info:\n"); 752 fprintf(f, "job->sg[0] output info:\n"); 753 memcpy(&sg[0], &job->sg[0], sizeof(sg[0])); 754 fprintf(f, "\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d" 755 "\n\tbpid = %d\n\toffset = %d\n", 756 (uint64_t)sg[0].addr, sg[0].length, sg[0].final, 757 sg[0].extension, sg[0].bpid, sg[0].offset); 758 fprintf(f, "\njob->sg[1] input info:\n"); 759 memcpy(&sg[1], &job->sg[1], sizeof(sg[1])); 760 hw_sg_to_cpu(&sg[1]); 761 fprintf(f, "\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d" 762 "\n\tbpid = %d\n\toffset = %d\n", 763 (uint64_t)sg[1].addr, sg[1].length, sg[1].final, 764 sg[1].extension, sg[1].bpid, sg[1].offset); 765 766 fprintf(f, "\nctx pool addr = %p\n", ctx->ctx_pool); 767 if (ctx->ctx_pool) 768 fprintf(f, "ctx pool available counts = %d\n", 769 rte_mempool_avail_count(ctx->ctx_pool)); 770 771 fprintf(f, "\nop pool addr = %p\n", op->mempool); 772 if (op->mempool) 773 fprintf(f, "op pool available counts = %d\n", 774 rte_mempool_avail_count(op->mempool)); 775 776 fprintf(f, "********************************************************\n"); 777 fprintf(f, "Queue data:\n"); 778 fprintf(f, "\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n" 779 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts" 780 "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n", 781 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc, 782 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts, 783 qp->rx_errs, qp->tx_errs); 784 } 785 786 /* qp is lockless, should be accessed by only one thread */ 787 static int 788 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 789 { 790 struct qman_fq *fq; 791 unsigned int pkts = 0; 792 int num_rx_bufs, ret; 793 struct qm_dqrr_entry *dq; 794 uint32_t vdqcr_flags = 0; 795 796 fq = &qp->outq; 797 /* 798 * Until request for four buffers, we provide exact number of buffers. 799 * Otherwise we do not set the QM_VDQCR_EXACT flag. 800 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 801 * requested, so we request two less in this case. 802 */ 803 if (nb_ops < 4) { 804 vdqcr_flags = QM_VDQCR_EXACT; 805 num_rx_bufs = nb_ops; 806 } else { 807 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 808 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 809 } 810 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 811 if (ret) 812 return 0; 813 814 do { 815 const struct qm_fd *fd; 816 struct dpaa_sec_job *job; 817 struct dpaa_sec_op_ctx *ctx; 818 struct rte_crypto_op *op; 819 820 dq = qman_dequeue(fq); 821 if (!dq) 822 continue; 823 824 fd = &dq->fd; 825 /* sg is embedded in an op ctx, 826 * sg[0] is for output 827 * sg[1] for input 828 */ 829 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 830 831 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 832 ctx->fd_status = fd->status; 833 op = ctx->op; 834 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 835 struct qm_sg_entry *sg_out; 836 uint32_t len; 837 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 838 op->sym->m_src : op->sym->m_dst; 839 840 sg_out = &job->sg[0]; 841 hw_sg_to_cpu(sg_out); 842 len = sg_out->length; 843 mbuf->pkt_len = len; 844 while (mbuf->next != NULL) { 845 len -= mbuf->data_len; 846 mbuf = mbuf->next; 847 } 848 mbuf->data_len = len; 849 } 850 if (!ctx->fd_status) { 851 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 852 } else { 853 if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) { 854 DPAA_SEC_DP_WARN("SEC return err:0x%x\n", 855 ctx->fd_status); 856 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP) 857 dpaa_sec_dump(ctx, qp, stdout); 858 } 859 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 860 } 861 ops[pkts++] = op; 862 863 /* report op status to sym->op and then free the ctx memory */ 864 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 865 866 qman_dqrr_consume(fq, dq); 867 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 868 869 return pkts; 870 } 871 872 static inline struct dpaa_sec_job * 873 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 874 { 875 struct rte_crypto_sym_op *sym = op->sym; 876 struct rte_mbuf *mbuf = sym->m_src; 877 struct dpaa_sec_job *cf; 878 struct dpaa_sec_op_ctx *ctx; 879 struct qm_sg_entry *sg, *out_sg, *in_sg; 880 phys_addr_t start_addr; 881 uint8_t *old_digest, extra_segs; 882 int data_len, data_offset; 883 884 data_len = sym->auth.data.length; 885 data_offset = sym->auth.data.offset; 886 887 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 888 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 889 if ((data_len & 7) || (data_offset & 7)) { 890 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 891 return NULL; 892 } 893 894 data_len = data_len >> 3; 895 data_offset = data_offset >> 3; 896 } 897 898 if (is_decode(ses)) 899 extra_segs = 3; 900 else 901 extra_segs = 2; 902 903 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 904 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 905 MAX_SG_ENTRIES); 906 return NULL; 907 } 908 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 909 if (!ctx) 910 return NULL; 911 912 cf = &ctx->job; 913 ctx->op = op; 914 old_digest = ctx->digest; 915 916 /* output */ 917 out_sg = &cf->sg[0]; 918 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 919 out_sg->length = ses->digest_length; 920 cpu_to_hw_sg(out_sg); 921 922 /* input */ 923 in_sg = &cf->sg[1]; 924 /* need to extend the input to a compound frame */ 925 in_sg->extension = 1; 926 in_sg->final = 1; 927 in_sg->length = data_len; 928 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 929 930 /* 1st seg */ 931 sg = in_sg + 1; 932 933 if (ses->iv.length) { 934 uint8_t *iv_ptr; 935 936 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 937 ses->iv.offset); 938 939 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 940 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 941 sg->length = 12; 942 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 943 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 944 sg->length = 8; 945 } else { 946 sg->length = ses->iv.length; 947 } 948 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 949 in_sg->length += sg->length; 950 cpu_to_hw_sg(sg); 951 sg++; 952 } 953 954 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 955 sg->offset = data_offset; 956 957 if (data_len <= (mbuf->data_len - data_offset)) { 958 sg->length = data_len; 959 } else { 960 sg->length = mbuf->data_len - data_offset; 961 962 /* remaining i/p segs */ 963 while ((data_len = data_len - sg->length) && 964 (mbuf = mbuf->next)) { 965 cpu_to_hw_sg(sg); 966 sg++; 967 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 968 if (data_len > mbuf->data_len) 969 sg->length = mbuf->data_len; 970 else 971 sg->length = data_len; 972 } 973 } 974 975 if (is_decode(ses)) { 976 /* Digest verification case */ 977 cpu_to_hw_sg(sg); 978 sg++; 979 rte_memcpy(old_digest, sym->auth.digest.data, 980 ses->digest_length); 981 start_addr = rte_dpaa_mem_vtop(old_digest); 982 qm_sg_entry_set64(sg, start_addr); 983 sg->length = ses->digest_length; 984 in_sg->length += ses->digest_length; 985 } 986 sg->final = 1; 987 cpu_to_hw_sg(sg); 988 cpu_to_hw_sg(in_sg); 989 990 return cf; 991 } 992 993 /** 994 * packet looks like: 995 * |<----data_len------->| 996 * |ip_header|ah_header|icv|payload| 997 * ^ 998 * | 999 * mbuf->pkt.data 1000 */ 1001 static inline struct dpaa_sec_job * 1002 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1003 { 1004 struct rte_crypto_sym_op *sym = op->sym; 1005 struct rte_mbuf *mbuf = sym->m_src; 1006 struct dpaa_sec_job *cf; 1007 struct dpaa_sec_op_ctx *ctx; 1008 struct qm_sg_entry *sg, *in_sg; 1009 rte_iova_t start_addr; 1010 uint8_t *old_digest; 1011 int data_len, data_offset; 1012 1013 data_len = sym->auth.data.length; 1014 data_offset = sym->auth.data.offset; 1015 1016 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1017 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1018 if ((data_len & 7) || (data_offset & 7)) { 1019 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 1020 return NULL; 1021 } 1022 1023 data_len = data_len >> 3; 1024 data_offset = data_offset >> 3; 1025 } 1026 1027 ctx = dpaa_sec_alloc_ctx(ses, 4); 1028 if (!ctx) 1029 return NULL; 1030 1031 cf = &ctx->job; 1032 ctx->op = op; 1033 old_digest = ctx->digest; 1034 1035 start_addr = rte_pktmbuf_iova(mbuf); 1036 /* output */ 1037 sg = &cf->sg[0]; 1038 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1039 sg->length = ses->digest_length; 1040 cpu_to_hw_sg(sg); 1041 1042 /* input */ 1043 in_sg = &cf->sg[1]; 1044 /* need to extend the input to a compound frame */ 1045 in_sg->extension = 1; 1046 in_sg->final = 1; 1047 in_sg->length = data_len; 1048 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1049 sg = &cf->sg[2]; 1050 1051 if (ses->iv.length) { 1052 uint8_t *iv_ptr; 1053 1054 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1055 ses->iv.offset); 1056 1057 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1058 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1059 sg->length = 12; 1060 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1061 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1062 sg->length = 8; 1063 } else { 1064 sg->length = ses->iv.length; 1065 } 1066 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 1067 in_sg->length += sg->length; 1068 cpu_to_hw_sg(sg); 1069 sg++; 1070 } 1071 1072 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1073 sg->offset = data_offset; 1074 sg->length = data_len; 1075 1076 if (is_decode(ses)) { 1077 /* Digest verification case */ 1078 cpu_to_hw_sg(sg); 1079 /* hash result or digest, save digest first */ 1080 rte_memcpy(old_digest, sym->auth.digest.data, 1081 ses->digest_length); 1082 /* let's check digest by hw */ 1083 start_addr = rte_dpaa_mem_vtop(old_digest); 1084 sg++; 1085 qm_sg_entry_set64(sg, start_addr); 1086 sg->length = ses->digest_length; 1087 in_sg->length += ses->digest_length; 1088 } 1089 sg->final = 1; 1090 cpu_to_hw_sg(sg); 1091 cpu_to_hw_sg(in_sg); 1092 1093 return cf; 1094 } 1095 1096 static inline struct dpaa_sec_job * 1097 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1098 { 1099 struct rte_crypto_sym_op *sym = op->sym; 1100 struct dpaa_sec_job *cf; 1101 struct dpaa_sec_op_ctx *ctx; 1102 struct qm_sg_entry *sg, *out_sg, *in_sg; 1103 struct rte_mbuf *mbuf; 1104 uint8_t req_segs; 1105 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1106 ses->iv.offset); 1107 int data_len, data_offset; 1108 1109 data_len = sym->cipher.data.length; 1110 data_offset = sym->cipher.data.offset; 1111 1112 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1113 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1114 if ((data_len & 7) || (data_offset & 7)) { 1115 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1116 return NULL; 1117 } 1118 1119 data_len = data_len >> 3; 1120 data_offset = data_offset >> 3; 1121 } 1122 1123 if (sym->m_dst) { 1124 mbuf = sym->m_dst; 1125 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 1126 } else { 1127 mbuf = sym->m_src; 1128 req_segs = mbuf->nb_segs * 2 + 3; 1129 } 1130 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1131 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 1132 MAX_SG_ENTRIES); 1133 return NULL; 1134 } 1135 1136 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1137 if (!ctx) 1138 return NULL; 1139 1140 cf = &ctx->job; 1141 ctx->op = op; 1142 1143 /* output */ 1144 out_sg = &cf->sg[0]; 1145 out_sg->extension = 1; 1146 out_sg->length = data_len; 1147 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1148 cpu_to_hw_sg(out_sg); 1149 1150 /* 1st seg */ 1151 sg = &cf->sg[2]; 1152 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1153 sg->length = mbuf->data_len - data_offset; 1154 sg->offset = data_offset; 1155 1156 /* Successive segs */ 1157 mbuf = mbuf->next; 1158 while (mbuf) { 1159 cpu_to_hw_sg(sg); 1160 sg++; 1161 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1162 sg->length = mbuf->data_len; 1163 mbuf = mbuf->next; 1164 } 1165 sg->final = 1; 1166 cpu_to_hw_sg(sg); 1167 1168 /* input */ 1169 mbuf = sym->m_src; 1170 in_sg = &cf->sg[1]; 1171 in_sg->extension = 1; 1172 in_sg->final = 1; 1173 in_sg->length = data_len + ses->iv.length; 1174 1175 sg++; 1176 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1177 cpu_to_hw_sg(in_sg); 1178 1179 /* IV */ 1180 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1181 sg->length = ses->iv.length; 1182 cpu_to_hw_sg(sg); 1183 1184 /* 1st seg */ 1185 sg++; 1186 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1187 sg->length = mbuf->data_len - data_offset; 1188 sg->offset = data_offset; 1189 1190 /* Successive segs */ 1191 mbuf = mbuf->next; 1192 while (mbuf) { 1193 cpu_to_hw_sg(sg); 1194 sg++; 1195 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1196 sg->length = mbuf->data_len; 1197 mbuf = mbuf->next; 1198 } 1199 sg->final = 1; 1200 cpu_to_hw_sg(sg); 1201 1202 return cf; 1203 } 1204 1205 static inline struct dpaa_sec_job * 1206 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1207 { 1208 struct rte_crypto_sym_op *sym = op->sym; 1209 struct dpaa_sec_job *cf; 1210 struct dpaa_sec_op_ctx *ctx; 1211 struct qm_sg_entry *sg; 1212 rte_iova_t src_start_addr, dst_start_addr; 1213 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1214 ses->iv.offset); 1215 int data_len, data_offset; 1216 1217 data_len = sym->cipher.data.length; 1218 data_offset = sym->cipher.data.offset; 1219 1220 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1221 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1222 if ((data_len & 7) || (data_offset & 7)) { 1223 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1224 return NULL; 1225 } 1226 1227 data_len = data_len >> 3; 1228 data_offset = data_offset >> 3; 1229 } 1230 1231 ctx = dpaa_sec_alloc_ctx(ses, 4); 1232 if (!ctx) 1233 return NULL; 1234 1235 cf = &ctx->job; 1236 ctx->op = op; 1237 1238 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1239 1240 if (sym->m_dst) 1241 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1242 else 1243 dst_start_addr = src_start_addr; 1244 1245 /* output */ 1246 sg = &cf->sg[0]; 1247 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1248 sg->length = data_len + ses->iv.length; 1249 cpu_to_hw_sg(sg); 1250 1251 /* input */ 1252 sg = &cf->sg[1]; 1253 1254 /* need to extend the input to a compound frame */ 1255 sg->extension = 1; 1256 sg->final = 1; 1257 sg->length = data_len + ses->iv.length; 1258 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1259 cpu_to_hw_sg(sg); 1260 1261 sg = &cf->sg[2]; 1262 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1263 sg->length = ses->iv.length; 1264 cpu_to_hw_sg(sg); 1265 1266 sg++; 1267 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1268 sg->length = data_len; 1269 sg->final = 1; 1270 cpu_to_hw_sg(sg); 1271 1272 return cf; 1273 } 1274 1275 static inline struct dpaa_sec_job * 1276 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1277 { 1278 struct rte_crypto_sym_op *sym = op->sym; 1279 struct dpaa_sec_job *cf; 1280 struct dpaa_sec_op_ctx *ctx; 1281 struct qm_sg_entry *sg, *out_sg, *in_sg; 1282 struct rte_mbuf *mbuf; 1283 uint8_t req_segs; 1284 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1285 ses->iv.offset); 1286 1287 if (sym->m_dst) { 1288 mbuf = sym->m_dst; 1289 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1290 } else { 1291 mbuf = sym->m_src; 1292 req_segs = mbuf->nb_segs * 2 + 4; 1293 } 1294 1295 if (ses->auth_only_len) 1296 req_segs++; 1297 1298 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1299 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1300 MAX_SG_ENTRIES); 1301 return NULL; 1302 } 1303 1304 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1305 if (!ctx) 1306 return NULL; 1307 1308 cf = &ctx->job; 1309 ctx->op = op; 1310 1311 rte_prefetch0(cf->sg); 1312 1313 /* output */ 1314 out_sg = &cf->sg[0]; 1315 out_sg->extension = 1; 1316 if (is_encode(ses)) 1317 out_sg->length = sym->aead.data.length + ses->digest_length; 1318 else 1319 out_sg->length = sym->aead.data.length; 1320 1321 /* output sg entries */ 1322 sg = &cf->sg[2]; 1323 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1324 cpu_to_hw_sg(out_sg); 1325 1326 /* 1st seg */ 1327 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1328 sg->length = mbuf->data_len - sym->aead.data.offset; 1329 sg->offset = sym->aead.data.offset; 1330 1331 /* Successive segs */ 1332 mbuf = mbuf->next; 1333 while (mbuf) { 1334 cpu_to_hw_sg(sg); 1335 sg++; 1336 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1337 sg->length = mbuf->data_len; 1338 mbuf = mbuf->next; 1339 } 1340 sg->length -= ses->digest_length; 1341 1342 if (is_encode(ses)) { 1343 cpu_to_hw_sg(sg); 1344 /* set auth output */ 1345 sg++; 1346 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1347 sg->length = ses->digest_length; 1348 } 1349 sg->final = 1; 1350 cpu_to_hw_sg(sg); 1351 1352 /* input */ 1353 mbuf = sym->m_src; 1354 in_sg = &cf->sg[1]; 1355 in_sg->extension = 1; 1356 in_sg->final = 1; 1357 if (is_encode(ses)) 1358 in_sg->length = ses->iv.length + sym->aead.data.length 1359 + ses->auth_only_len; 1360 else 1361 in_sg->length = ses->iv.length + sym->aead.data.length 1362 + ses->auth_only_len + ses->digest_length; 1363 1364 /* input sg entries */ 1365 sg++; 1366 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1367 cpu_to_hw_sg(in_sg); 1368 1369 /* 1st seg IV */ 1370 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1371 sg->length = ses->iv.length; 1372 cpu_to_hw_sg(sg); 1373 1374 /* 2nd seg auth only */ 1375 if (ses->auth_only_len) { 1376 sg++; 1377 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1378 sg->length = ses->auth_only_len; 1379 cpu_to_hw_sg(sg); 1380 } 1381 1382 /* 3rd seg */ 1383 sg++; 1384 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1385 sg->length = mbuf->data_len - sym->aead.data.offset; 1386 sg->offset = sym->aead.data.offset; 1387 1388 /* Successive segs */ 1389 mbuf = mbuf->next; 1390 while (mbuf) { 1391 cpu_to_hw_sg(sg); 1392 sg++; 1393 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1394 sg->length = mbuf->data_len; 1395 mbuf = mbuf->next; 1396 } 1397 1398 if (is_decode(ses)) { 1399 cpu_to_hw_sg(sg); 1400 sg++; 1401 memcpy(ctx->digest, sym->aead.digest.data, 1402 ses->digest_length); 1403 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1404 sg->length = ses->digest_length; 1405 } 1406 sg->final = 1; 1407 cpu_to_hw_sg(sg); 1408 1409 return cf; 1410 } 1411 1412 static inline struct dpaa_sec_job * 1413 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1414 { 1415 struct rte_crypto_sym_op *sym = op->sym; 1416 struct dpaa_sec_job *cf; 1417 struct dpaa_sec_op_ctx *ctx; 1418 struct qm_sg_entry *sg; 1419 uint32_t length = 0; 1420 rte_iova_t src_start_addr, dst_start_addr; 1421 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1422 ses->iv.offset); 1423 1424 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1425 1426 if (sym->m_dst) 1427 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1428 else 1429 dst_start_addr = src_start_addr; 1430 1431 ctx = dpaa_sec_alloc_ctx(ses, 7); 1432 if (!ctx) 1433 return NULL; 1434 1435 cf = &ctx->job; 1436 ctx->op = op; 1437 1438 /* input */ 1439 rte_prefetch0(cf->sg); 1440 sg = &cf->sg[2]; 1441 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1442 if (is_encode(ses)) { 1443 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1444 sg->length = ses->iv.length; 1445 length += sg->length; 1446 cpu_to_hw_sg(sg); 1447 1448 sg++; 1449 if (ses->auth_only_len) { 1450 qm_sg_entry_set64(sg, 1451 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1452 sg->length = ses->auth_only_len; 1453 length += sg->length; 1454 cpu_to_hw_sg(sg); 1455 sg++; 1456 } 1457 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1458 sg->length = sym->aead.data.length; 1459 length += sg->length; 1460 sg->final = 1; 1461 cpu_to_hw_sg(sg); 1462 } else { 1463 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1464 sg->length = ses->iv.length; 1465 length += sg->length; 1466 cpu_to_hw_sg(sg); 1467 1468 sg++; 1469 if (ses->auth_only_len) { 1470 qm_sg_entry_set64(sg, 1471 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1472 sg->length = ses->auth_only_len; 1473 length += sg->length; 1474 cpu_to_hw_sg(sg); 1475 sg++; 1476 } 1477 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1478 sg->length = sym->aead.data.length; 1479 length += sg->length; 1480 cpu_to_hw_sg(sg); 1481 1482 memcpy(ctx->digest, sym->aead.digest.data, 1483 ses->digest_length); 1484 sg++; 1485 1486 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1487 sg->length = ses->digest_length; 1488 length += sg->length; 1489 sg->final = 1; 1490 cpu_to_hw_sg(sg); 1491 } 1492 /* input compound frame */ 1493 cf->sg[1].length = length; 1494 cf->sg[1].extension = 1; 1495 cf->sg[1].final = 1; 1496 cpu_to_hw_sg(&cf->sg[1]); 1497 1498 /* output */ 1499 sg++; 1500 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1501 qm_sg_entry_set64(sg, 1502 dst_start_addr + sym->aead.data.offset); 1503 sg->length = sym->aead.data.length; 1504 length = sg->length; 1505 if (is_encode(ses)) { 1506 cpu_to_hw_sg(sg); 1507 /* set auth output */ 1508 sg++; 1509 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1510 sg->length = ses->digest_length; 1511 length += sg->length; 1512 } 1513 sg->final = 1; 1514 cpu_to_hw_sg(sg); 1515 1516 /* output compound frame */ 1517 cf->sg[0].length = length; 1518 cf->sg[0].extension = 1; 1519 cpu_to_hw_sg(&cf->sg[0]); 1520 1521 return cf; 1522 } 1523 1524 static inline struct dpaa_sec_job * 1525 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1526 { 1527 struct rte_crypto_sym_op *sym = op->sym; 1528 struct dpaa_sec_job *cf; 1529 struct dpaa_sec_op_ctx *ctx; 1530 struct qm_sg_entry *sg, *out_sg, *in_sg; 1531 struct rte_mbuf *mbuf; 1532 uint8_t req_segs; 1533 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1534 ses->iv.offset); 1535 1536 if (sym->m_dst) { 1537 mbuf = sym->m_dst; 1538 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1539 } else { 1540 mbuf = sym->m_src; 1541 req_segs = mbuf->nb_segs * 2 + 4; 1542 } 1543 1544 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1545 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1546 MAX_SG_ENTRIES); 1547 return NULL; 1548 } 1549 1550 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1551 if (!ctx) 1552 return NULL; 1553 1554 cf = &ctx->job; 1555 ctx->op = op; 1556 1557 rte_prefetch0(cf->sg); 1558 1559 /* output */ 1560 out_sg = &cf->sg[0]; 1561 out_sg->extension = 1; 1562 if (is_encode(ses)) 1563 out_sg->length = sym->auth.data.length + ses->digest_length; 1564 else 1565 out_sg->length = sym->auth.data.length; 1566 1567 /* output sg entries */ 1568 sg = &cf->sg[2]; 1569 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1570 cpu_to_hw_sg(out_sg); 1571 1572 /* 1st seg */ 1573 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1574 sg->length = mbuf->data_len - sym->auth.data.offset; 1575 sg->offset = sym->auth.data.offset; 1576 1577 /* Successive segs */ 1578 mbuf = mbuf->next; 1579 while (mbuf) { 1580 cpu_to_hw_sg(sg); 1581 sg++; 1582 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1583 sg->length = mbuf->data_len; 1584 mbuf = mbuf->next; 1585 } 1586 sg->length -= ses->digest_length; 1587 1588 if (is_encode(ses)) { 1589 cpu_to_hw_sg(sg); 1590 /* set auth output */ 1591 sg++; 1592 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1593 sg->length = ses->digest_length; 1594 } 1595 sg->final = 1; 1596 cpu_to_hw_sg(sg); 1597 1598 /* input */ 1599 mbuf = sym->m_src; 1600 in_sg = &cf->sg[1]; 1601 in_sg->extension = 1; 1602 in_sg->final = 1; 1603 if (is_encode(ses)) 1604 in_sg->length = ses->iv.length + sym->auth.data.length; 1605 else 1606 in_sg->length = ses->iv.length + sym->auth.data.length 1607 + ses->digest_length; 1608 1609 /* input sg entries */ 1610 sg++; 1611 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1612 cpu_to_hw_sg(in_sg); 1613 1614 /* 1st seg IV */ 1615 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1616 sg->length = ses->iv.length; 1617 cpu_to_hw_sg(sg); 1618 1619 /* 2nd seg */ 1620 sg++; 1621 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1622 sg->length = mbuf->data_len - sym->auth.data.offset; 1623 sg->offset = sym->auth.data.offset; 1624 1625 /* Successive segs */ 1626 mbuf = mbuf->next; 1627 while (mbuf) { 1628 cpu_to_hw_sg(sg); 1629 sg++; 1630 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1631 sg->length = mbuf->data_len; 1632 mbuf = mbuf->next; 1633 } 1634 1635 sg->length -= ses->digest_length; 1636 if (is_decode(ses)) { 1637 cpu_to_hw_sg(sg); 1638 sg++; 1639 memcpy(ctx->digest, sym->auth.digest.data, 1640 ses->digest_length); 1641 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1642 sg->length = ses->digest_length; 1643 } 1644 sg->final = 1; 1645 cpu_to_hw_sg(sg); 1646 1647 return cf; 1648 } 1649 1650 static inline struct dpaa_sec_job * 1651 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1652 { 1653 struct rte_crypto_sym_op *sym = op->sym; 1654 struct dpaa_sec_job *cf; 1655 struct dpaa_sec_op_ctx *ctx; 1656 struct qm_sg_entry *sg; 1657 rte_iova_t src_start_addr, dst_start_addr; 1658 uint32_t length = 0; 1659 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1660 ses->iv.offset); 1661 1662 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1663 if (sym->m_dst) 1664 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1665 else 1666 dst_start_addr = src_start_addr; 1667 1668 ctx = dpaa_sec_alloc_ctx(ses, 7); 1669 if (!ctx) 1670 return NULL; 1671 1672 cf = &ctx->job; 1673 ctx->op = op; 1674 1675 /* input */ 1676 rte_prefetch0(cf->sg); 1677 sg = &cf->sg[2]; 1678 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1679 if (is_encode(ses)) { 1680 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1681 sg->length = ses->iv.length; 1682 length += sg->length; 1683 cpu_to_hw_sg(sg); 1684 1685 sg++; 1686 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1687 sg->length = sym->auth.data.length; 1688 length += sg->length; 1689 sg->final = 1; 1690 cpu_to_hw_sg(sg); 1691 } else { 1692 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1693 sg->length = ses->iv.length; 1694 length += sg->length; 1695 cpu_to_hw_sg(sg); 1696 1697 sg++; 1698 1699 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1700 sg->length = sym->auth.data.length; 1701 length += sg->length; 1702 cpu_to_hw_sg(sg); 1703 1704 memcpy(ctx->digest, sym->auth.digest.data, 1705 ses->digest_length); 1706 sg++; 1707 1708 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1709 sg->length = ses->digest_length; 1710 length += sg->length; 1711 sg->final = 1; 1712 cpu_to_hw_sg(sg); 1713 } 1714 /* input compound frame */ 1715 cf->sg[1].length = length; 1716 cf->sg[1].extension = 1; 1717 cf->sg[1].final = 1; 1718 cpu_to_hw_sg(&cf->sg[1]); 1719 1720 /* output */ 1721 sg++; 1722 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1723 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1724 sg->length = sym->cipher.data.length; 1725 length = sg->length; 1726 if (is_encode(ses)) { 1727 cpu_to_hw_sg(sg); 1728 /* set auth output */ 1729 sg++; 1730 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1731 sg->length = ses->digest_length; 1732 length += sg->length; 1733 } 1734 sg->final = 1; 1735 cpu_to_hw_sg(sg); 1736 1737 /* output compound frame */ 1738 cf->sg[0].length = length; 1739 cf->sg[0].extension = 1; 1740 cpu_to_hw_sg(&cf->sg[0]); 1741 1742 return cf; 1743 } 1744 1745 static inline struct dpaa_sec_job * 1746 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1747 { 1748 struct rte_crypto_sym_op *sym = op->sym; 1749 struct dpaa_sec_job *cf; 1750 struct dpaa_sec_op_ctx *ctx; 1751 struct qm_sg_entry *sg; 1752 phys_addr_t src_start_addr, dst_start_addr; 1753 1754 ctx = dpaa_sec_alloc_ctx(ses, 2); 1755 if (!ctx) 1756 return NULL; 1757 cf = &ctx->job; 1758 ctx->op = op; 1759 1760 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1761 1762 if (sym->m_dst) 1763 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1764 else 1765 dst_start_addr = src_start_addr; 1766 1767 /* input */ 1768 sg = &cf->sg[1]; 1769 qm_sg_entry_set64(sg, src_start_addr); 1770 sg->length = sym->m_src->pkt_len; 1771 sg->final = 1; 1772 cpu_to_hw_sg(sg); 1773 1774 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1775 /* output */ 1776 sg = &cf->sg[0]; 1777 qm_sg_entry_set64(sg, dst_start_addr); 1778 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1779 cpu_to_hw_sg(sg); 1780 1781 return cf; 1782 } 1783 1784 static inline struct dpaa_sec_job * 1785 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1786 { 1787 struct rte_crypto_sym_op *sym = op->sym; 1788 struct dpaa_sec_job *cf; 1789 struct dpaa_sec_op_ctx *ctx; 1790 struct qm_sg_entry *sg, *out_sg, *in_sg; 1791 struct rte_mbuf *mbuf; 1792 uint8_t req_segs; 1793 uint32_t in_len = 0, out_len = 0; 1794 1795 if (sym->m_dst) 1796 mbuf = sym->m_dst; 1797 else 1798 mbuf = sym->m_src; 1799 1800 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1801 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1802 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1803 MAX_SG_ENTRIES); 1804 return NULL; 1805 } 1806 1807 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1808 if (!ctx) 1809 return NULL; 1810 cf = &ctx->job; 1811 ctx->op = op; 1812 /* output */ 1813 out_sg = &cf->sg[0]; 1814 out_sg->extension = 1; 1815 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1816 1817 /* 1st seg */ 1818 sg = &cf->sg[2]; 1819 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1820 sg->offset = 0; 1821 1822 /* Successive segs */ 1823 while (mbuf->next) { 1824 sg->length = mbuf->data_len; 1825 out_len += sg->length; 1826 mbuf = mbuf->next; 1827 cpu_to_hw_sg(sg); 1828 sg++; 1829 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1830 sg->offset = 0; 1831 } 1832 sg->length = mbuf->buf_len - mbuf->data_off; 1833 out_len += sg->length; 1834 sg->final = 1; 1835 cpu_to_hw_sg(sg); 1836 1837 out_sg->length = out_len; 1838 cpu_to_hw_sg(out_sg); 1839 1840 /* input */ 1841 mbuf = sym->m_src; 1842 in_sg = &cf->sg[1]; 1843 in_sg->extension = 1; 1844 in_sg->final = 1; 1845 in_len = mbuf->data_len; 1846 1847 sg++; 1848 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1849 1850 /* 1st seg */ 1851 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1852 sg->length = mbuf->data_len; 1853 sg->offset = 0; 1854 1855 /* Successive segs */ 1856 mbuf = mbuf->next; 1857 while (mbuf) { 1858 cpu_to_hw_sg(sg); 1859 sg++; 1860 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1861 sg->length = mbuf->data_len; 1862 sg->offset = 0; 1863 in_len += sg->length; 1864 mbuf = mbuf->next; 1865 } 1866 sg->final = 1; 1867 cpu_to_hw_sg(sg); 1868 1869 in_sg->length = in_len; 1870 cpu_to_hw_sg(in_sg); 1871 1872 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1873 1874 return cf; 1875 } 1876 1877 static uint16_t 1878 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1879 uint16_t nb_ops) 1880 { 1881 /* Function to transmit the frames to given device and queuepair */ 1882 uint32_t loop; 1883 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1884 uint16_t num_tx = 0; 1885 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1886 uint32_t frames_to_send; 1887 struct rte_crypto_op *op; 1888 struct dpaa_sec_job *cf; 1889 dpaa_sec_session *ses; 1890 uint16_t auth_hdr_len, auth_tail_len; 1891 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1892 struct qman_fq *inq[DPAA_SEC_BURST]; 1893 1894 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1895 if (rte_dpaa_portal_init((void *)0)) { 1896 DPAA_SEC_ERR("Failure in affining portal"); 1897 return 0; 1898 } 1899 } 1900 1901 while (nb_ops) { 1902 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1903 DPAA_SEC_BURST : nb_ops; 1904 for (loop = 0; loop < frames_to_send; loop++) { 1905 op = *(ops++); 1906 if (*dpaa_seqn(op->sym->m_src) != 0) { 1907 index = *dpaa_seqn(op->sym->m_src) - 1; 1908 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1909 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1910 flags[loop] = ((index & 0x0f) << 8); 1911 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1912 DPAA_PER_LCORE_DQRR_SIZE--; 1913 DPAA_PER_LCORE_DQRR_HELD &= 1914 ~(1 << index); 1915 } 1916 } 1917 1918 switch (op->sess_type) { 1919 case RTE_CRYPTO_OP_WITH_SESSION: 1920 ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1921 break; 1922 case RTE_CRYPTO_OP_SECURITY_SESSION: 1923 ses = SECURITY_GET_SESS_PRIV(op->sym->session); 1924 break; 1925 default: 1926 DPAA_SEC_DP_ERR( 1927 "sessionless crypto op not supported"); 1928 frames_to_send = loop; 1929 nb_ops = loop; 1930 goto send_pkts; 1931 } 1932 1933 if (!ses) { 1934 DPAA_SEC_DP_ERR("session not available"); 1935 frames_to_send = loop; 1936 nb_ops = loop; 1937 goto send_pkts; 1938 } 1939 1940 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1941 if (dpaa_sec_attach_sess_q(qp, ses)) { 1942 frames_to_send = loop; 1943 nb_ops = loop; 1944 goto send_pkts; 1945 } 1946 } else if (unlikely(ses->qp[rte_lcore_id() % 1947 MAX_DPAA_CORES] != qp)) { 1948 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1949 " New qp = %p\n", 1950 ses->qp[rte_lcore_id() % 1951 MAX_DPAA_CORES], qp); 1952 frames_to_send = loop; 1953 nb_ops = loop; 1954 goto send_pkts; 1955 } 1956 1957 auth_hdr_len = op->sym->auth.data.length - 1958 op->sym->cipher.data.length; 1959 auth_tail_len = 0; 1960 1961 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1962 ((op->sym->m_dst == NULL) || 1963 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1964 switch (ses->ctxt) { 1965 case DPAA_SEC_PDCP: 1966 case DPAA_SEC_IPSEC: 1967 cf = build_proto(op, ses); 1968 break; 1969 case DPAA_SEC_AUTH: 1970 cf = build_auth_only(op, ses); 1971 break; 1972 case DPAA_SEC_CIPHER: 1973 cf = build_cipher_only(op, ses); 1974 break; 1975 case DPAA_SEC_AEAD: 1976 cf = build_cipher_auth_gcm(op, ses); 1977 auth_hdr_len = ses->auth_only_len; 1978 break; 1979 case DPAA_SEC_CIPHER_HASH: 1980 auth_hdr_len = 1981 op->sym->cipher.data.offset 1982 - op->sym->auth.data.offset; 1983 auth_tail_len = 1984 op->sym->auth.data.length 1985 - op->sym->cipher.data.length 1986 - auth_hdr_len; 1987 cf = build_cipher_auth(op, ses); 1988 break; 1989 default: 1990 DPAA_SEC_DP_ERR("not supported ops"); 1991 frames_to_send = loop; 1992 nb_ops = loop; 1993 goto send_pkts; 1994 } 1995 } else { 1996 switch (ses->ctxt) { 1997 case DPAA_SEC_PDCP: 1998 case DPAA_SEC_IPSEC: 1999 cf = build_proto_sg(op, ses); 2000 break; 2001 case DPAA_SEC_AUTH: 2002 cf = build_auth_only_sg(op, ses); 2003 break; 2004 case DPAA_SEC_CIPHER: 2005 cf = build_cipher_only_sg(op, ses); 2006 break; 2007 case DPAA_SEC_AEAD: 2008 cf = build_cipher_auth_gcm_sg(op, ses); 2009 auth_hdr_len = ses->auth_only_len; 2010 break; 2011 case DPAA_SEC_CIPHER_HASH: 2012 auth_hdr_len = 2013 op->sym->cipher.data.offset 2014 - op->sym->auth.data.offset; 2015 auth_tail_len = 2016 op->sym->auth.data.length 2017 - op->sym->cipher.data.length 2018 - auth_hdr_len; 2019 cf = build_cipher_auth_sg(op, ses); 2020 break; 2021 default: 2022 DPAA_SEC_DP_ERR("not supported ops"); 2023 frames_to_send = loop; 2024 nb_ops = loop; 2025 goto send_pkts; 2026 } 2027 } 2028 if (unlikely(!cf)) { 2029 frames_to_send = loop; 2030 nb_ops = loop; 2031 goto send_pkts; 2032 } 2033 2034 fd = &fds[loop]; 2035 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 2036 fd->opaque_addr = 0; 2037 fd->cmd = 0; 2038 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 2039 fd->_format1 = qm_fd_compound; 2040 fd->length29 = 2 * sizeof(struct qm_sg_entry); 2041 2042 /* Auth_only_len is set as 0 in descriptor and it is 2043 * overwritten here in the fd.cmd which will update 2044 * the DPOVRD reg. 2045 */ 2046 if (auth_hdr_len || auth_tail_len) { 2047 fd->cmd = 0x80000000; 2048 fd->cmd |= 2049 ((auth_tail_len << 16) | auth_hdr_len); 2050 } 2051 2052 /* In case of PDCP, per packet HFN is stored in 2053 * mbuf priv after sym_op. 2054 */ 2055 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 2056 fd->cmd = 0x80000000 | 2057 *((uint32_t *)((uint8_t *)op + 2058 ses->pdcp.hfn_ovd_offset)); 2059 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", 2060 *((uint32_t *)((uint8_t *)op + 2061 ses->pdcp.hfn_ovd_offset)), 2062 ses->pdcp.hfn_ovd); 2063 } 2064 } 2065 send_pkts: 2066 loop = 0; 2067 while (loop < frames_to_send) { 2068 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 2069 &flags[loop], frames_to_send - loop); 2070 } 2071 nb_ops -= frames_to_send; 2072 num_tx += frames_to_send; 2073 } 2074 2075 dpaa_qp->tx_pkts += num_tx; 2076 dpaa_qp->tx_errs += nb_ops - num_tx; 2077 2078 return num_tx; 2079 } 2080 2081 static uint16_t 2082 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 2083 uint16_t nb_ops) 2084 { 2085 uint16_t num_rx; 2086 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 2087 2088 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2089 if (rte_dpaa_portal_init((void *)0)) { 2090 DPAA_SEC_ERR("Failure in affining portal"); 2091 return 0; 2092 } 2093 } 2094 2095 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 2096 2097 dpaa_qp->rx_pkts += num_rx; 2098 dpaa_qp->rx_errs += nb_ops - num_rx; 2099 2100 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 2101 2102 return num_rx; 2103 } 2104 2105 /** Release queue pair */ 2106 static int 2107 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 2108 uint16_t qp_id) 2109 { 2110 struct dpaa_sec_dev_private *internals; 2111 struct dpaa_sec_qp *qp = NULL; 2112 2113 PMD_INIT_FUNC_TRACE(); 2114 2115 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 2116 2117 internals = dev->data->dev_private; 2118 if (qp_id >= internals->max_nb_queue_pairs) { 2119 DPAA_SEC_ERR("Max supported qpid %d", 2120 internals->max_nb_queue_pairs); 2121 return -EINVAL; 2122 } 2123 2124 qp = &internals->qps[qp_id]; 2125 rte_mempool_free(qp->ctx_pool); 2126 qp->internals = NULL; 2127 dev->data->queue_pairs[qp_id] = NULL; 2128 2129 return 0; 2130 } 2131 2132 /** Setup a queue pair */ 2133 static int 2134 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 2135 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 2136 __rte_unused int socket_id) 2137 { 2138 struct dpaa_sec_dev_private *internals; 2139 struct dpaa_sec_qp *qp = NULL; 2140 char str[20]; 2141 2142 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 2143 2144 internals = dev->data->dev_private; 2145 if (qp_id >= internals->max_nb_queue_pairs) { 2146 DPAA_SEC_ERR("Max supported qpid %d", 2147 internals->max_nb_queue_pairs); 2148 return -EINVAL; 2149 } 2150 2151 qp = &internals->qps[qp_id]; 2152 qp->internals = internals; 2153 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 2154 dev->data->dev_id, qp_id); 2155 if (!qp->ctx_pool) { 2156 qp->ctx_pool = rte_mempool_create((const char *)str, 2157 CTX_POOL_NUM_BUFS, 2158 CTX_POOL_BUF_SIZE, 2159 CTX_POOL_CACHE_SIZE, 0, 2160 NULL, NULL, NULL, NULL, 2161 SOCKET_ID_ANY, 0); 2162 if (!qp->ctx_pool) { 2163 DPAA_SEC_ERR("%s create failed\n", str); 2164 return -ENOMEM; 2165 } 2166 } else 2167 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 2168 dev->data->dev_id, qp_id); 2169 dev->data->queue_pairs[qp_id] = qp; 2170 2171 return 0; 2172 } 2173 2174 /** Returns the size of session structure */ 2175 static unsigned int 2176 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2177 { 2178 PMD_INIT_FUNC_TRACE(); 2179 2180 return sizeof(dpaa_sec_session); 2181 } 2182 2183 static int 2184 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2185 struct rte_crypto_sym_xform *xform, 2186 dpaa_sec_session *session) 2187 { 2188 session->ctxt = DPAA_SEC_CIPHER; 2189 session->cipher_alg = xform->cipher.algo; 2190 session->iv.length = xform->cipher.iv.length; 2191 session->iv.offset = xform->cipher.iv.offset; 2192 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2193 RTE_CACHE_LINE_SIZE); 2194 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2195 DPAA_SEC_ERR("No Memory for cipher key"); 2196 return -ENOMEM; 2197 } 2198 session->cipher_key.length = xform->cipher.key.length; 2199 2200 memcpy(session->cipher_key.data, xform->cipher.key.data, 2201 xform->cipher.key.length); 2202 switch (xform->cipher.algo) { 2203 case RTE_CRYPTO_CIPHER_AES_CBC: 2204 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2205 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2206 break; 2207 case RTE_CRYPTO_CIPHER_DES_CBC: 2208 session->cipher_key.alg = OP_ALG_ALGSEL_DES; 2209 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2210 break; 2211 case RTE_CRYPTO_CIPHER_3DES_CBC: 2212 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2213 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2214 break; 2215 case RTE_CRYPTO_CIPHER_AES_CTR: 2216 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2217 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2218 break; 2219 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2220 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2221 break; 2222 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2223 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2224 break; 2225 default: 2226 DPAA_SEC_ERR("Crypto: Unsupported Cipher specified %s (%u)", 2227 rte_cryptodev_get_cipher_algo_string(xform->cipher.algo), 2228 xform->cipher.algo); 2229 return -ENOTSUP; 2230 } 2231 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2232 DIR_ENC : DIR_DEC; 2233 2234 return 0; 2235 } 2236 2237 static int 2238 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2239 struct rte_crypto_sym_xform *xform, 2240 dpaa_sec_session *session) 2241 { 2242 session->ctxt = DPAA_SEC_AUTH; 2243 session->auth_alg = xform->auth.algo; 2244 session->auth_key.length = xform->auth.key.length; 2245 if (xform->auth.key.length) { 2246 session->auth_key.data = 2247 rte_zmalloc(NULL, xform->auth.key.length, 2248 RTE_CACHE_LINE_SIZE); 2249 if (session->auth_key.data == NULL) { 2250 DPAA_SEC_ERR("No Memory for auth key"); 2251 return -ENOMEM; 2252 } 2253 memcpy(session->auth_key.data, xform->auth.key.data, 2254 xform->auth.key.length); 2255 2256 } 2257 session->digest_length = xform->auth.digest_length; 2258 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2259 session->iv.offset = xform->auth.iv.offset; 2260 session->iv.length = xform->auth.iv.length; 2261 } 2262 2263 switch (xform->auth.algo) { 2264 case RTE_CRYPTO_AUTH_SHA1: 2265 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2266 session->auth_key.algmode = OP_ALG_AAI_HASH; 2267 break; 2268 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2269 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2270 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2271 break; 2272 case RTE_CRYPTO_AUTH_MD5: 2273 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2274 session->auth_key.algmode = OP_ALG_AAI_HASH; 2275 break; 2276 case RTE_CRYPTO_AUTH_MD5_HMAC: 2277 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2278 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2279 break; 2280 case RTE_CRYPTO_AUTH_SHA224: 2281 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2282 session->auth_key.algmode = OP_ALG_AAI_HASH; 2283 break; 2284 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2285 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2286 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2287 break; 2288 case RTE_CRYPTO_AUTH_SHA256: 2289 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2290 session->auth_key.algmode = OP_ALG_AAI_HASH; 2291 break; 2292 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2293 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2294 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2295 break; 2296 case RTE_CRYPTO_AUTH_SHA384: 2297 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2298 session->auth_key.algmode = OP_ALG_AAI_HASH; 2299 break; 2300 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2301 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2302 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2303 break; 2304 case RTE_CRYPTO_AUTH_SHA512: 2305 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2306 session->auth_key.algmode = OP_ALG_AAI_HASH; 2307 break; 2308 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2309 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2310 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2311 break; 2312 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2313 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2314 session->auth_key.algmode = OP_ALG_AAI_F9; 2315 break; 2316 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2317 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2318 session->auth_key.algmode = OP_ALG_AAI_F9; 2319 break; 2320 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2321 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2322 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2323 break; 2324 case RTE_CRYPTO_AUTH_AES_CMAC: 2325 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2326 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2327 break; 2328 default: 2329 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %s (%u)", 2330 rte_cryptodev_get_auth_algo_string(xform->auth.algo), 2331 xform->auth.algo); 2332 return -ENOTSUP; 2333 } 2334 2335 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2336 DIR_ENC : DIR_DEC; 2337 2338 return 0; 2339 } 2340 2341 static int 2342 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2343 struct rte_crypto_sym_xform *xform, 2344 dpaa_sec_session *session) 2345 { 2346 2347 struct rte_crypto_cipher_xform *cipher_xform; 2348 struct rte_crypto_auth_xform *auth_xform; 2349 2350 session->ctxt = DPAA_SEC_CIPHER_HASH; 2351 if (session->auth_cipher_text) { 2352 cipher_xform = &xform->cipher; 2353 auth_xform = &xform->next->auth; 2354 } else { 2355 cipher_xform = &xform->next->cipher; 2356 auth_xform = &xform->auth; 2357 } 2358 2359 /* Set IV parameters */ 2360 session->iv.offset = cipher_xform->iv.offset; 2361 session->iv.length = cipher_xform->iv.length; 2362 2363 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2364 RTE_CACHE_LINE_SIZE); 2365 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2366 DPAA_SEC_ERR("No Memory for cipher key"); 2367 return -ENOMEM; 2368 } 2369 session->cipher_key.length = cipher_xform->key.length; 2370 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2371 RTE_CACHE_LINE_SIZE); 2372 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2373 DPAA_SEC_ERR("No Memory for auth key"); 2374 return -ENOMEM; 2375 } 2376 session->auth_key.length = auth_xform->key.length; 2377 memcpy(session->cipher_key.data, cipher_xform->key.data, 2378 cipher_xform->key.length); 2379 memcpy(session->auth_key.data, auth_xform->key.data, 2380 auth_xform->key.length); 2381 2382 session->digest_length = auth_xform->digest_length; 2383 session->auth_alg = auth_xform->algo; 2384 2385 switch (auth_xform->algo) { 2386 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2387 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2388 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2389 break; 2390 case RTE_CRYPTO_AUTH_MD5_HMAC: 2391 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2392 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2393 break; 2394 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2395 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2396 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2397 break; 2398 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2399 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2400 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2401 break; 2402 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2403 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2404 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2405 break; 2406 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2407 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2408 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2409 break; 2410 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2411 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2412 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2413 break; 2414 case RTE_CRYPTO_AUTH_AES_CMAC: 2415 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2416 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2417 break; 2418 default: 2419 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %s (%u)", 2420 rte_cryptodev_get_auth_algo_string(auth_xform->algo), 2421 auth_xform->algo); 2422 return -ENOTSUP; 2423 } 2424 2425 session->cipher_alg = cipher_xform->algo; 2426 2427 switch (cipher_xform->algo) { 2428 case RTE_CRYPTO_CIPHER_AES_CBC: 2429 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2430 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2431 break; 2432 case RTE_CRYPTO_CIPHER_DES_CBC: 2433 session->cipher_key.alg = OP_ALG_ALGSEL_DES; 2434 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2435 break; 2436 case RTE_CRYPTO_CIPHER_3DES_CBC: 2437 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2438 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2439 break; 2440 case RTE_CRYPTO_CIPHER_AES_CTR: 2441 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2442 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2443 break; 2444 default: 2445 2446 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)", 2447 rte_cryptodev_get_cipher_algo_string(cipher_xform->algo), 2448 cipher_xform->algo); 2449 return -ENOTSUP; 2450 } 2451 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2452 DIR_ENC : DIR_DEC; 2453 return 0; 2454 } 2455 2456 static int 2457 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2458 struct rte_crypto_sym_xform *xform, 2459 dpaa_sec_session *session) 2460 { 2461 session->aead_alg = xform->aead.algo; 2462 session->ctxt = DPAA_SEC_AEAD; 2463 session->iv.length = xform->aead.iv.length; 2464 session->iv.offset = xform->aead.iv.offset; 2465 session->auth_only_len = xform->aead.aad_length; 2466 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2467 RTE_CACHE_LINE_SIZE); 2468 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2469 DPAA_SEC_ERR("No Memory for aead key\n"); 2470 return -ENOMEM; 2471 } 2472 session->aead_key.length = xform->aead.key.length; 2473 session->digest_length = xform->aead.digest_length; 2474 2475 memcpy(session->aead_key.data, xform->aead.key.data, 2476 xform->aead.key.length); 2477 2478 switch (session->aead_alg) { 2479 case RTE_CRYPTO_AEAD_AES_GCM: 2480 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2481 session->aead_key.algmode = OP_ALG_AAI_GCM; 2482 break; 2483 default: 2484 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2485 return -ENOTSUP; 2486 } 2487 2488 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2489 DIR_ENC : DIR_DEC; 2490 2491 return 0; 2492 } 2493 2494 static struct qman_fq * 2495 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2496 { 2497 unsigned int i; 2498 2499 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2500 if (qi->inq_attach[i] == 0) { 2501 qi->inq_attach[i] = 1; 2502 return &qi->inq[i]; 2503 } 2504 } 2505 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2506 2507 return NULL; 2508 } 2509 2510 static int 2511 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2512 { 2513 unsigned int i; 2514 int ret; 2515 2516 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2517 if (&qi->inq[i] == fq) { 2518 ret = qman_retire_fq(fq, NULL); 2519 if (ret != 0) 2520 DPAA_SEC_ERR("Queue %d is not retired" 2521 " err: %d\n", fq->fqid, 2522 ret); 2523 qman_oos_fq(fq); 2524 qi->inq_attach[i] = 0; 2525 return 0; 2526 } 2527 } 2528 return -1; 2529 } 2530 2531 int 2532 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2533 { 2534 int ret; 2535 2536 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2537 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2538 ret = rte_dpaa_portal_init((void *)0); 2539 if (ret) { 2540 DPAA_SEC_ERR("Failure in affining portal"); 2541 return ret; 2542 } 2543 } 2544 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2545 rte_dpaa_mem_vtop(&sess->cdb), 2546 qman_fq_fqid(&qp->outq)); 2547 if (ret) 2548 DPAA_SEC_ERR("Unable to init sec queue"); 2549 2550 return ret; 2551 } 2552 2553 static inline void 2554 free_session_data(dpaa_sec_session *s) 2555 { 2556 if (is_aead(s)) 2557 rte_free(s->aead_key.data); 2558 else { 2559 rte_free(s->auth_key.data); 2560 rte_free(s->cipher_key.data); 2561 } 2562 memset(s, 0, sizeof(dpaa_sec_session)); 2563 } 2564 2565 static int 2566 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2567 struct rte_crypto_sym_xform *xform, void *sess) 2568 { 2569 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2570 dpaa_sec_session *session = sess; 2571 uint32_t i; 2572 int ret; 2573 2574 PMD_INIT_FUNC_TRACE(); 2575 2576 if (unlikely(sess == NULL)) { 2577 DPAA_SEC_ERR("invalid session struct"); 2578 return -EINVAL; 2579 } 2580 memset(session, 0, sizeof(dpaa_sec_session)); 2581 2582 /* Default IV length = 0 */ 2583 session->iv.length = 0; 2584 2585 /* Cipher Only */ 2586 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2587 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2588 ret = dpaa_sec_cipher_init(dev, xform, session); 2589 2590 /* Authentication Only */ 2591 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2592 xform->next == NULL) { 2593 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2594 session->ctxt = DPAA_SEC_AUTH; 2595 ret = dpaa_sec_auth_init(dev, xform, session); 2596 2597 /* Cipher then Authenticate */ 2598 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2599 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2600 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2601 session->auth_cipher_text = 1; 2602 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2603 ret = dpaa_sec_auth_init(dev, xform, session); 2604 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2605 ret = dpaa_sec_cipher_init(dev, xform, session); 2606 else 2607 ret = dpaa_sec_chain_init(dev, xform, session); 2608 } else { 2609 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2610 return -ENOTSUP; 2611 } 2612 /* Authenticate then Cipher */ 2613 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2614 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2615 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2616 session->auth_cipher_text = 0; 2617 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2618 ret = dpaa_sec_cipher_init(dev, xform, session); 2619 else if (xform->next->cipher.algo 2620 == RTE_CRYPTO_CIPHER_NULL) 2621 ret = dpaa_sec_auth_init(dev, xform, session); 2622 else 2623 ret = dpaa_sec_chain_init(dev, xform, session); 2624 } else { 2625 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2626 return -ENOTSUP; 2627 } 2628 2629 /* AEAD operation for AES-GCM kind of Algorithms */ 2630 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2631 xform->next == NULL) { 2632 ret = dpaa_sec_aead_init(dev, xform, session); 2633 2634 } else { 2635 DPAA_SEC_ERR("Invalid crypto type"); 2636 return -EINVAL; 2637 } 2638 if (ret) { 2639 DPAA_SEC_ERR("unable to init session"); 2640 goto err1; 2641 } 2642 2643 rte_spinlock_lock(&internals->lock); 2644 for (i = 0; i < MAX_DPAA_CORES; i++) { 2645 session->inq[i] = dpaa_sec_attach_rxq(internals); 2646 if (session->inq[i] == NULL) { 2647 DPAA_SEC_ERR("unable to attach sec queue"); 2648 rte_spinlock_unlock(&internals->lock); 2649 ret = -EBUSY; 2650 goto err1; 2651 } 2652 } 2653 rte_spinlock_unlock(&internals->lock); 2654 2655 return 0; 2656 2657 err1: 2658 free_session_data(session); 2659 return ret; 2660 } 2661 2662 static int 2663 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2664 struct rte_crypto_sym_xform *xform, 2665 struct rte_cryptodev_sym_session *sess) 2666 { 2667 void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2668 int ret; 2669 2670 PMD_INIT_FUNC_TRACE(); 2671 2672 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2673 if (ret != 0) { 2674 DPAA_SEC_ERR("failed to configure session parameters"); 2675 return ret; 2676 } 2677 2678 ret = dpaa_sec_prep_cdb(sess_private_data); 2679 if (ret) { 2680 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2681 return ret; 2682 } 2683 2684 return 0; 2685 } 2686 2687 static inline void 2688 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2689 { 2690 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2691 uint8_t i; 2692 2693 for (i = 0; i < MAX_DPAA_CORES; i++) { 2694 if (s->inq[i]) 2695 dpaa_sec_detach_rxq(qi, s->inq[i]); 2696 s->inq[i] = NULL; 2697 s->qp[i] = NULL; 2698 } 2699 free_session_data(s); 2700 } 2701 2702 /** Clear the memory of session so it doesn't leave key material behind */ 2703 static void 2704 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2705 struct rte_cryptodev_sym_session *sess) 2706 { 2707 PMD_INIT_FUNC_TRACE(); 2708 void *sess_priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2709 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2710 2711 free_session_memory(dev, s); 2712 } 2713 2714 static int 2715 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2716 struct rte_security_ipsec_xform *ipsec_xform, 2717 dpaa_sec_session *session) 2718 { 2719 PMD_INIT_FUNC_TRACE(); 2720 2721 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2722 RTE_CACHE_LINE_SIZE); 2723 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2724 DPAA_SEC_ERR("No Memory for aead key"); 2725 return -ENOMEM; 2726 } 2727 memcpy(session->aead_key.data, aead_xform->key.data, 2728 aead_xform->key.length); 2729 2730 session->digest_length = aead_xform->digest_length; 2731 session->aead_key.length = aead_xform->key.length; 2732 2733 switch (aead_xform->algo) { 2734 case RTE_CRYPTO_AEAD_AES_GCM: 2735 switch (session->digest_length) { 2736 case 8: 2737 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2738 break; 2739 case 12: 2740 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2741 break; 2742 case 16: 2743 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2744 break; 2745 default: 2746 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2747 session->digest_length); 2748 return -EINVAL; 2749 } 2750 if (session->dir == DIR_ENC) { 2751 memcpy(session->encap_pdb.gcm.salt, 2752 (uint8_t *)&(ipsec_xform->salt), 4); 2753 } else { 2754 memcpy(session->decap_pdb.gcm.salt, 2755 (uint8_t *)&(ipsec_xform->salt), 4); 2756 } 2757 session->aead_key.algmode = OP_ALG_AAI_GCM; 2758 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2759 break; 2760 default: 2761 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2762 aead_xform->algo); 2763 return -ENOTSUP; 2764 } 2765 return 0; 2766 } 2767 2768 static int 2769 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2770 struct rte_crypto_auth_xform *auth_xform, 2771 struct rte_security_ipsec_xform *ipsec_xform, 2772 dpaa_sec_session *session) 2773 { 2774 if (cipher_xform) { 2775 session->cipher_key.data = rte_zmalloc(NULL, 2776 cipher_xform->key.length, 2777 RTE_CACHE_LINE_SIZE); 2778 if (session->cipher_key.data == NULL && 2779 cipher_xform->key.length > 0) { 2780 DPAA_SEC_ERR("No Memory for cipher key"); 2781 return -ENOMEM; 2782 } 2783 2784 session->cipher_key.length = cipher_xform->key.length; 2785 memcpy(session->cipher_key.data, cipher_xform->key.data, 2786 cipher_xform->key.length); 2787 session->cipher_alg = cipher_xform->algo; 2788 } else { 2789 session->cipher_key.data = NULL; 2790 session->cipher_key.length = 0; 2791 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2792 } 2793 2794 if (auth_xform) { 2795 session->auth_key.data = rte_zmalloc(NULL, 2796 auth_xform->key.length, 2797 RTE_CACHE_LINE_SIZE); 2798 if (session->auth_key.data == NULL && 2799 auth_xform->key.length > 0) { 2800 DPAA_SEC_ERR("No Memory for auth key"); 2801 return -ENOMEM; 2802 } 2803 session->auth_key.length = auth_xform->key.length; 2804 memcpy(session->auth_key.data, auth_xform->key.data, 2805 auth_xform->key.length); 2806 session->auth_alg = auth_xform->algo; 2807 session->digest_length = auth_xform->digest_length; 2808 } else { 2809 session->auth_key.data = NULL; 2810 session->auth_key.length = 0; 2811 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2812 } 2813 2814 switch (session->auth_alg) { 2815 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2816 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2817 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2818 break; 2819 case RTE_CRYPTO_AUTH_MD5_HMAC: 2820 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2821 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2822 break; 2823 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2824 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2825 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2826 if (session->digest_length != 16) 2827 DPAA_SEC_WARN( 2828 "+++Using sha256-hmac truncated len is non-standard," 2829 "it will not work with lookaside proto"); 2830 break; 2831 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2832 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2833 if (session->digest_length == 6) 2834 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_96; 2835 else if (session->digest_length == 14) 2836 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_224; 2837 else 2838 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_112; 2839 break; 2840 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2841 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2842 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2843 break; 2844 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2845 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2846 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2847 break; 2848 case RTE_CRYPTO_AUTH_AES_CMAC: 2849 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2850 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2851 break; 2852 case RTE_CRYPTO_AUTH_NULL: 2853 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2854 break; 2855 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2856 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96; 2857 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2858 break; 2859 default: 2860 DPAA_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 2861 rte_cryptodev_get_auth_algo_string(session->auth_alg), 2862 session->auth_alg); 2863 return -ENOTSUP; 2864 } 2865 2866 switch (session->cipher_alg) { 2867 case RTE_CRYPTO_CIPHER_AES_CBC: 2868 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2869 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2870 break; 2871 case RTE_CRYPTO_CIPHER_DES_CBC: 2872 session->cipher_key.alg = OP_PCL_IPSEC_DES; 2873 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2874 break; 2875 case RTE_CRYPTO_CIPHER_3DES_CBC: 2876 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2877 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2878 break; 2879 case RTE_CRYPTO_CIPHER_AES_CTR: 2880 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2881 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2882 if (session->dir == DIR_ENC) { 2883 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2884 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2885 } else { 2886 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2887 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2888 } 2889 break; 2890 case RTE_CRYPTO_CIPHER_NULL: 2891 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2892 break; 2893 default: 2894 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 2895 rte_cryptodev_get_cipher_algo_string(session->cipher_alg), 2896 session->cipher_alg); 2897 return -ENOTSUP; 2898 } 2899 2900 return 0; 2901 } 2902 2903 static int 2904 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2905 struct rte_security_session_conf *conf, 2906 void *sess) 2907 { 2908 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2909 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2910 struct rte_crypto_auth_xform *auth_xform = NULL; 2911 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2912 struct rte_crypto_aead_xform *aead_xform = NULL; 2913 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2914 uint32_t i; 2915 int ret; 2916 2917 PMD_INIT_FUNC_TRACE(); 2918 2919 memset(session, 0, sizeof(dpaa_sec_session)); 2920 session->proto_alg = conf->protocol; 2921 session->ctxt = DPAA_SEC_IPSEC; 2922 2923 if (ipsec_xform->life.bytes_hard_limit != 0 || 2924 ipsec_xform->life.bytes_soft_limit != 0 || 2925 ipsec_xform->life.packets_hard_limit != 0 || 2926 ipsec_xform->life.packets_soft_limit != 0) 2927 return -ENOTSUP; 2928 2929 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2930 session->dir = DIR_ENC; 2931 else 2932 session->dir = DIR_DEC; 2933 2934 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2935 cipher_xform = &conf->crypto_xform->cipher; 2936 if (conf->crypto_xform->next) 2937 auth_xform = &conf->crypto_xform->next->auth; 2938 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2939 ipsec_xform, session); 2940 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2941 auth_xform = &conf->crypto_xform->auth; 2942 if (conf->crypto_xform->next) 2943 cipher_xform = &conf->crypto_xform->next->cipher; 2944 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2945 ipsec_xform, session); 2946 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2947 aead_xform = &conf->crypto_xform->aead; 2948 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2949 ipsec_xform, session); 2950 } else { 2951 DPAA_SEC_ERR("XFORM not specified"); 2952 ret = -EINVAL; 2953 goto out; 2954 } 2955 if (ret) { 2956 DPAA_SEC_ERR("Failed to process xform"); 2957 goto out; 2958 } 2959 2960 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2961 if (ipsec_xform->tunnel.type == 2962 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2963 session->ip4_hdr.ip_v = IPVERSION; 2964 session->ip4_hdr.ip_hl = 5; 2965 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2966 sizeof(session->ip4_hdr)); 2967 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2968 session->ip4_hdr.ip_id = 0; 2969 session->ip4_hdr.ip_off = 0; 2970 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2971 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2972 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2973 IPPROTO_ESP : IPPROTO_AH; 2974 session->ip4_hdr.ip_sum = 0; 2975 session->ip4_hdr.ip_src = 2976 ipsec_xform->tunnel.ipv4.src_ip; 2977 session->ip4_hdr.ip_dst = 2978 ipsec_xform->tunnel.ipv4.dst_ip; 2979 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2980 (void *)&session->ip4_hdr, 2981 sizeof(struct ip)); 2982 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2983 } else if (ipsec_xform->tunnel.type == 2984 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2985 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2986 DPAA_IPv6_DEFAULT_VTC_FLOW | 2987 ((ipsec_xform->tunnel.ipv6.dscp << 2988 RTE_IPV6_HDR_TC_SHIFT) & 2989 RTE_IPV6_HDR_TC_MASK) | 2990 ((ipsec_xform->tunnel.ipv6.flabel << 2991 RTE_IPV6_HDR_FL_SHIFT) & 2992 RTE_IPV6_HDR_FL_MASK)); 2993 /* Payload length will be updated by HW */ 2994 session->ip6_hdr.payload_len = 0; 2995 session->ip6_hdr.hop_limits = 2996 ipsec_xform->tunnel.ipv6.hlimit; 2997 session->ip6_hdr.proto = (ipsec_xform->proto == 2998 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2999 IPPROTO_ESP : IPPROTO_AH; 3000 memcpy(&session->ip6_hdr.src_addr, 3001 &ipsec_xform->tunnel.ipv6.src_addr, 16); 3002 memcpy(&session->ip6_hdr.dst_addr, 3003 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 3004 session->encap_pdb.ip_hdr_len = 3005 sizeof(struct rte_ipv6_hdr); 3006 } 3007 3008 session->encap_pdb.options = 3009 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 3010 PDBOPTS_ESP_OIHI_PDB_INL | 3011 PDBOPTS_ESP_IVSRC | 3012 PDBHMO_ESP_SNR; 3013 if (ipsec_xform->options.dec_ttl) 3014 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 3015 if (ipsec_xform->options.esn) 3016 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 3017 session->encap_pdb.spi = ipsec_xform->spi; 3018 3019 } else if (ipsec_xform->direction == 3020 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 3021 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 3022 session->decap_pdb.options = sizeof(struct ip) << 16; 3023 else 3024 session->decap_pdb.options = 3025 sizeof(struct rte_ipv6_hdr) << 16; 3026 if (ipsec_xform->options.esn) 3027 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 3028 if (ipsec_xform->replay_win_sz) { 3029 uint32_t win_sz; 3030 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3031 3032 switch (win_sz) { 3033 case 1: 3034 case 2: 3035 case 4: 3036 case 8: 3037 case 16: 3038 case 32: 3039 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 3040 break; 3041 case 64: 3042 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 3043 break; 3044 default: 3045 session->decap_pdb.options |= 3046 PDBOPTS_ESP_ARS128; 3047 } 3048 } 3049 } else 3050 goto out; 3051 rte_spinlock_lock(&internals->lock); 3052 for (i = 0; i < MAX_DPAA_CORES; i++) { 3053 session->inq[i] = dpaa_sec_attach_rxq(internals); 3054 if (session->inq[i] == NULL) { 3055 DPAA_SEC_ERR("unable to attach sec queue"); 3056 rte_spinlock_unlock(&internals->lock); 3057 goto out; 3058 } 3059 } 3060 rte_spinlock_unlock(&internals->lock); 3061 3062 return 0; 3063 out: 3064 free_session_data(session); 3065 return -1; 3066 } 3067 3068 static int 3069 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 3070 struct rte_security_session_conf *conf, 3071 void *sess) 3072 { 3073 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3074 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3075 struct rte_crypto_auth_xform *auth_xform = NULL; 3076 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3077 dpaa_sec_session *session = (dpaa_sec_session *)sess; 3078 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 3079 uint32_t i; 3080 int ret; 3081 3082 PMD_INIT_FUNC_TRACE(); 3083 3084 memset(session, 0, sizeof(dpaa_sec_session)); 3085 3086 /* find xfrm types */ 3087 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3088 cipher_xform = &xform->cipher; 3089 if (xform->next != NULL && 3090 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) 3091 auth_xform = &xform->next->auth; 3092 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3093 auth_xform = &xform->auth; 3094 if (xform->next != NULL && 3095 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 3096 cipher_xform = &xform->next->cipher; 3097 } else { 3098 DPAA_SEC_ERR("Invalid crypto type"); 3099 return -EINVAL; 3100 } 3101 3102 session->proto_alg = conf->protocol; 3103 session->ctxt = DPAA_SEC_PDCP; 3104 3105 if (cipher_xform) { 3106 switch (cipher_xform->algo) { 3107 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3108 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 3109 break; 3110 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3111 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 3112 break; 3113 case RTE_CRYPTO_CIPHER_AES_CTR: 3114 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 3115 break; 3116 case RTE_CRYPTO_CIPHER_NULL: 3117 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 3118 break; 3119 default: 3120 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 3121 session->cipher_alg); 3122 return -EINVAL; 3123 } 3124 3125 session->cipher_key.data = rte_zmalloc(NULL, 3126 cipher_xform->key.length, 3127 RTE_CACHE_LINE_SIZE); 3128 if (session->cipher_key.data == NULL && 3129 cipher_xform->key.length > 0) { 3130 DPAA_SEC_ERR("No Memory for cipher key"); 3131 return -ENOMEM; 3132 } 3133 session->cipher_key.length = cipher_xform->key.length; 3134 memcpy(session->cipher_key.data, cipher_xform->key.data, 3135 cipher_xform->key.length); 3136 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3137 DIR_ENC : DIR_DEC; 3138 session->cipher_alg = cipher_xform->algo; 3139 } else { 3140 session->cipher_key.data = NULL; 3141 session->cipher_key.length = 0; 3142 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3143 session->dir = DIR_ENC; 3144 } 3145 3146 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3147 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 3148 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 3149 DPAA_SEC_ERR( 3150 "PDCP Seq Num size should be 5/12 bits for cmode"); 3151 ret = -EINVAL; 3152 goto out; 3153 } 3154 } 3155 3156 if (auth_xform) { 3157 switch (auth_xform->algo) { 3158 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3159 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 3160 break; 3161 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3162 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 3163 break; 3164 case RTE_CRYPTO_AUTH_AES_CMAC: 3165 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 3166 break; 3167 case RTE_CRYPTO_AUTH_NULL: 3168 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 3169 break; 3170 default: 3171 DPAA_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 3172 rte_cryptodev_get_auth_algo_string(session->auth_alg), 3173 session->auth_alg); 3174 rte_free(session->cipher_key.data); 3175 return -EINVAL; 3176 } 3177 session->auth_key.data = rte_zmalloc(NULL, 3178 auth_xform->key.length, 3179 RTE_CACHE_LINE_SIZE); 3180 if (!session->auth_key.data && 3181 auth_xform->key.length > 0) { 3182 DPAA_SEC_ERR("No Memory for auth key"); 3183 rte_free(session->cipher_key.data); 3184 return -ENOMEM; 3185 } 3186 session->auth_key.length = auth_xform->key.length; 3187 memcpy(session->auth_key.data, auth_xform->key.data, 3188 auth_xform->key.length); 3189 session->auth_alg = auth_xform->algo; 3190 } else { 3191 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3192 DPAA_SEC_ERR("Crypto: Integrity must for c-plane"); 3193 ret = -EINVAL; 3194 goto out; 3195 } 3196 session->auth_key.data = NULL; 3197 session->auth_key.length = 0; 3198 session->auth_alg = 0; 3199 } 3200 session->pdcp.domain = pdcp_xform->domain; 3201 session->pdcp.bearer = pdcp_xform->bearer; 3202 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3203 session->pdcp.sn_size = pdcp_xform->sn_size; 3204 session->pdcp.hfn = pdcp_xform->hfn; 3205 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3206 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3207 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled; 3208 if (cipher_xform) 3209 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3210 3211 rte_spinlock_lock(&dev_priv->lock); 3212 for (i = 0; i < MAX_DPAA_CORES; i++) { 3213 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 3214 if (session->inq[i] == NULL) { 3215 DPAA_SEC_ERR("unable to attach sec queue"); 3216 rte_spinlock_unlock(&dev_priv->lock); 3217 ret = -EBUSY; 3218 goto out; 3219 } 3220 } 3221 rte_spinlock_unlock(&dev_priv->lock); 3222 return 0; 3223 out: 3224 rte_free(session->auth_key.data); 3225 rte_free(session->cipher_key.data); 3226 memset(session, 0, sizeof(dpaa_sec_session)); 3227 return ret; 3228 } 3229 3230 static int 3231 dpaa_sec_security_session_create(void *dev, 3232 struct rte_security_session_conf *conf, 3233 struct rte_security_session *sess) 3234 { 3235 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3236 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3237 int ret; 3238 3239 switch (conf->protocol) { 3240 case RTE_SECURITY_PROTOCOL_IPSEC: 3241 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3242 sess_private_data); 3243 break; 3244 case RTE_SECURITY_PROTOCOL_PDCP: 3245 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3246 sess_private_data); 3247 break; 3248 case RTE_SECURITY_PROTOCOL_MACSEC: 3249 return -ENOTSUP; 3250 default: 3251 return -EINVAL; 3252 } 3253 if (ret != 0) { 3254 DPAA_SEC_ERR("failed to configure session parameters"); 3255 return ret; 3256 } 3257 3258 ret = dpaa_sec_prep_cdb(sess_private_data); 3259 if (ret) { 3260 DPAA_SEC_ERR("Unable to prepare sec cdb"); 3261 return ret; 3262 } 3263 3264 return ret; 3265 } 3266 3267 /** Clear the memory of session so it doesn't leave key material behind */ 3268 static int 3269 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3270 struct rte_security_session *sess) 3271 { 3272 PMD_INIT_FUNC_TRACE(); 3273 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 3274 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3275 3276 if (sess_priv) { 3277 free_session_memory((struct rte_cryptodev *)dev, s); 3278 } 3279 return 0; 3280 } 3281 3282 static unsigned int 3283 dpaa_sec_security_session_get_size(void *device __rte_unused) 3284 { 3285 return sizeof(dpaa_sec_session); 3286 } 3287 3288 static int 3289 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3290 struct rte_cryptodev_config *config __rte_unused) 3291 { 3292 PMD_INIT_FUNC_TRACE(); 3293 3294 return 0; 3295 } 3296 3297 static int 3298 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3299 { 3300 PMD_INIT_FUNC_TRACE(); 3301 return 0; 3302 } 3303 3304 static void 3305 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3306 { 3307 PMD_INIT_FUNC_TRACE(); 3308 } 3309 3310 static int 3311 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3312 { 3313 PMD_INIT_FUNC_TRACE(); 3314 3315 if (dev == NULL) 3316 return -ENOMEM; 3317 3318 return 0; 3319 } 3320 3321 static void 3322 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3323 struct rte_cryptodev_info *info) 3324 { 3325 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3326 3327 PMD_INIT_FUNC_TRACE(); 3328 if (info != NULL) { 3329 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3330 info->feature_flags = dev->feature_flags; 3331 info->capabilities = dpaa_sec_capabilities; 3332 info->sym.max_nb_sessions = internals->max_nb_sessions; 3333 info->driver_id = dpaa_cryptodev_driver_id; 3334 } 3335 } 3336 3337 static enum qman_cb_dqrr_result 3338 dpaa_sec_process_parallel_event(void *event, 3339 struct qman_portal *qm __always_unused, 3340 struct qman_fq *outq, 3341 const struct qm_dqrr_entry *dqrr, 3342 void **bufs) 3343 { 3344 const struct qm_fd *fd; 3345 struct dpaa_sec_job *job; 3346 struct dpaa_sec_op_ctx *ctx; 3347 struct rte_event *ev = (struct rte_event *)event; 3348 3349 fd = &dqrr->fd; 3350 3351 /* sg is embedded in an op ctx, 3352 * sg[0] is for output 3353 * sg[1] for input 3354 */ 3355 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3356 3357 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3358 ctx->fd_status = fd->status; 3359 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3360 struct qm_sg_entry *sg_out; 3361 uint32_t len; 3362 3363 sg_out = &job->sg[0]; 3364 hw_sg_to_cpu(sg_out); 3365 len = sg_out->length; 3366 ctx->op->sym->m_src->pkt_len = len; 3367 ctx->op->sym->m_src->data_len = len; 3368 } 3369 if (!ctx->fd_status) { 3370 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3371 } else { 3372 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3373 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3374 } 3375 ev->event_ptr = (void *)ctx->op; 3376 3377 ev->flow_id = outq->ev.flow_id; 3378 ev->sub_event_type = outq->ev.sub_event_type; 3379 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3380 ev->op = RTE_EVENT_OP_NEW; 3381 ev->sched_type = outq->ev.sched_type; 3382 ev->queue_id = outq->ev.queue_id; 3383 ev->priority = outq->ev.priority; 3384 *bufs = (void *)ctx->op; 3385 3386 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3387 3388 return qman_cb_dqrr_consume; 3389 } 3390 3391 static enum qman_cb_dqrr_result 3392 dpaa_sec_process_atomic_event(void *event, 3393 struct qman_portal *qm __rte_unused, 3394 struct qman_fq *outq, 3395 const struct qm_dqrr_entry *dqrr, 3396 void **bufs) 3397 { 3398 u8 index; 3399 const struct qm_fd *fd; 3400 struct dpaa_sec_job *job; 3401 struct dpaa_sec_op_ctx *ctx; 3402 struct rte_event *ev = (struct rte_event *)event; 3403 3404 fd = &dqrr->fd; 3405 3406 /* sg is embedded in an op ctx, 3407 * sg[0] is for output 3408 * sg[1] for input 3409 */ 3410 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3411 3412 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3413 ctx->fd_status = fd->status; 3414 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3415 struct qm_sg_entry *sg_out; 3416 uint32_t len; 3417 3418 sg_out = &job->sg[0]; 3419 hw_sg_to_cpu(sg_out); 3420 len = sg_out->length; 3421 ctx->op->sym->m_src->pkt_len = len; 3422 ctx->op->sym->m_src->data_len = len; 3423 } 3424 if (!ctx->fd_status) { 3425 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3426 } else { 3427 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3428 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3429 } 3430 ev->event_ptr = (void *)ctx->op; 3431 ev->flow_id = outq->ev.flow_id; 3432 ev->sub_event_type = outq->ev.sub_event_type; 3433 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3434 ev->op = RTE_EVENT_OP_NEW; 3435 ev->sched_type = outq->ev.sched_type; 3436 ev->queue_id = outq->ev.queue_id; 3437 ev->priority = outq->ev.priority; 3438 3439 /* Save active dqrr entries */ 3440 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3441 DPAA_PER_LCORE_DQRR_SIZE++; 3442 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3443 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3444 ev->impl_opaque = index + 1; 3445 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1; 3446 *bufs = (void *)ctx->op; 3447 3448 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3449 3450 return qman_cb_dqrr_defer; 3451 } 3452 3453 int 3454 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3455 int qp_id, 3456 uint16_t ch_id, 3457 const struct rte_event *event) 3458 { 3459 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3460 struct qm_mcc_initfq opts = {0}; 3461 3462 int ret; 3463 3464 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3465 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3466 opts.fqd.dest.channel = ch_id; 3467 3468 switch (event->sched_type) { 3469 case RTE_SCHED_TYPE_ATOMIC: 3470 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3471 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3472 * configuration with HOLD_ACTIVE setting 3473 */ 3474 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3475 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3476 break; 3477 case RTE_SCHED_TYPE_ORDERED: 3478 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); 3479 return -ENOTSUP; 3480 default: 3481 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3482 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3483 break; 3484 } 3485 3486 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3487 if (unlikely(ret)) { 3488 DPAA_SEC_ERR("unable to init caam source fq!"); 3489 return ret; 3490 } 3491 3492 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3493 3494 return 0; 3495 } 3496 3497 int 3498 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3499 int qp_id) 3500 { 3501 struct qm_mcc_initfq opts = {0}; 3502 int ret; 3503 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3504 3505 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3506 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3507 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3508 qp->outq.cb.ern = ern_sec_fq_handler; 3509 qman_retire_fq(&qp->outq, NULL); 3510 qman_oos_fq(&qp->outq); 3511 ret = qman_init_fq(&qp->outq, 0, &opts); 3512 if (ret) 3513 DPAA_SEC_ERR("Error in qman_init_fq: ret: %d", ret); 3514 qp->outq.cb.dqrr = NULL; 3515 3516 return ret; 3517 } 3518 3519 static struct rte_cryptodev_ops crypto_ops = { 3520 .dev_configure = dpaa_sec_dev_configure, 3521 .dev_start = dpaa_sec_dev_start, 3522 .dev_stop = dpaa_sec_dev_stop, 3523 .dev_close = dpaa_sec_dev_close, 3524 .dev_infos_get = dpaa_sec_dev_infos_get, 3525 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3526 .queue_pair_release = dpaa_sec_queue_pair_release, 3527 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3528 .sym_session_configure = dpaa_sec_sym_session_configure, 3529 .sym_session_clear = dpaa_sec_sym_session_clear, 3530 /* Raw data-path API related operations */ 3531 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size, 3532 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx, 3533 }; 3534 3535 static const struct rte_security_capability * 3536 dpaa_sec_capabilities_get(void *device __rte_unused) 3537 { 3538 return dpaa_sec_security_cap; 3539 } 3540 3541 static const struct rte_security_ops dpaa_sec_security_ops = { 3542 .session_create = dpaa_sec_security_session_create, 3543 .session_update = NULL, 3544 .session_get_size = dpaa_sec_security_session_get_size, 3545 .session_stats_get = NULL, 3546 .session_destroy = dpaa_sec_security_session_destroy, 3547 .set_pkt_metadata = NULL, 3548 .capabilities_get = dpaa_sec_capabilities_get 3549 }; 3550 3551 static int 3552 dpaa_sec_uninit(struct rte_cryptodev *dev) 3553 { 3554 struct dpaa_sec_dev_private *internals; 3555 3556 if (dev == NULL) 3557 return -ENODEV; 3558 3559 internals = dev->data->dev_private; 3560 rte_free(dev->security_ctx); 3561 3562 rte_free(internals); 3563 3564 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3565 dev->data->name, rte_socket_id()); 3566 3567 return 0; 3568 } 3569 3570 static int 3571 check_devargs_handler(__rte_unused const char *key, const char *value, 3572 __rte_unused void *opaque) 3573 { 3574 dpaa_sec_dp_dump = atoi(value); 3575 if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) { 3576 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not " 3577 "supported, changing to FULL error prints\n"); 3578 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP; 3579 } 3580 3581 return 0; 3582 } 3583 3584 static void 3585 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key) 3586 { 3587 struct rte_kvargs *kvlist; 3588 3589 if (!devargs) 3590 return; 3591 3592 kvlist = rte_kvargs_parse(devargs->args, NULL); 3593 if (!kvlist) 3594 return; 3595 3596 if (!rte_kvargs_count(kvlist, key)) { 3597 rte_kvargs_free(kvlist); 3598 return; 3599 } 3600 3601 rte_kvargs_process(kvlist, key, 3602 check_devargs_handler, NULL); 3603 rte_kvargs_free(kvlist); 3604 } 3605 3606 static int 3607 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3608 { 3609 struct dpaa_sec_dev_private *internals; 3610 struct rte_security_ctx *security_instance; 3611 struct dpaa_sec_qp *qp; 3612 uint32_t i, flags; 3613 int ret; 3614 void *cmd_map; 3615 int map_fd = -1; 3616 3617 PMD_INIT_FUNC_TRACE(); 3618 3619 internals = cryptodev->data->dev_private; 3620 map_fd = open("/dev/mem", O_RDWR); 3621 if (unlikely(map_fd < 0)) { 3622 DPAA_SEC_ERR("Unable to open (/dev/mem)"); 3623 return map_fd; 3624 } 3625 internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, 3626 MAP_SHARED, map_fd, SEC_BASE_ADDR); 3627 if (internals->sec_hw == MAP_FAILED) { 3628 DPAA_SEC_ERR("Memory map failed"); 3629 close(map_fd); 3630 return -EINVAL; 3631 } 3632 cmd_map = (uint8_t *)internals->sec_hw + 3633 (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG; 3634 if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN)) 3635 /* enable QI interface */ 3636 rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map); 3637 3638 ret = munmap(internals->sec_hw, MAP_SIZE); 3639 if (ret) 3640 DPAA_SEC_WARN("munmap failed\n"); 3641 3642 close(map_fd); 3643 cryptodev->driver_id = dpaa_cryptodev_driver_id; 3644 cryptodev->dev_ops = &crypto_ops; 3645 3646 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3647 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3648 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3649 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3650 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3651 RTE_CRYPTODEV_FF_SECURITY | 3652 RTE_CRYPTODEV_FF_SYM_RAW_DP | 3653 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3654 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3655 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3656 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3657 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3658 3659 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3660 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3661 3662 /* 3663 * For secondary processes, we don't initialise any further as primary 3664 * has already done this work. Only check we don't need a different 3665 * RX function 3666 */ 3667 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3668 DPAA_SEC_WARN("Device already init by primary process"); 3669 return 0; 3670 } 3671 /* Initialize security_ctx only for primary process*/ 3672 security_instance = rte_malloc("rte_security_instances_ops", 3673 sizeof(struct rte_security_ctx), 0); 3674 if (security_instance == NULL) 3675 return -ENOMEM; 3676 security_instance->device = (void *)cryptodev; 3677 security_instance->ops = &dpaa_sec_security_ops; 3678 security_instance->sess_cnt = 0; 3679 cryptodev->security_ctx = security_instance; 3680 rte_spinlock_init(&internals->lock); 3681 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3682 /* init qman fq for queue pair */ 3683 qp = &internals->qps[i]; 3684 ret = dpaa_sec_init_tx(&qp->outq); 3685 if (ret) { 3686 DPAA_SEC_ERR("config tx of queue pair %d", i); 3687 goto init_error; 3688 } 3689 } 3690 3691 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3692 QMAN_FQ_FLAG_TO_DCPORTAL; 3693 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3694 /* create rx qman fq for sessions*/ 3695 ret = qman_create_fq(0, flags, &internals->inq[i]); 3696 if (unlikely(ret != 0)) { 3697 DPAA_SEC_ERR("sec qman_create_fq failed"); 3698 goto init_error; 3699 } 3700 } 3701 3702 dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE); 3703 3704 DPAA_SEC_INFO("%s cryptodev init", cryptodev->data->name); 3705 return 0; 3706 3707 init_error: 3708 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); 3709 3710 rte_free(cryptodev->security_ctx); 3711 return -EFAULT; 3712 } 3713 3714 static int 3715 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3716 struct rte_dpaa_device *dpaa_dev) 3717 { 3718 struct rte_cryptodev *cryptodev; 3719 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3720 3721 int retval; 3722 3723 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3724 return 0; 3725 3726 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3727 3728 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3729 if (cryptodev == NULL) 3730 return -ENOMEM; 3731 3732 cryptodev->data->dev_private = rte_zmalloc_socket( 3733 "cryptodev private structure", 3734 sizeof(struct dpaa_sec_dev_private), 3735 RTE_CACHE_LINE_SIZE, 3736 rte_socket_id()); 3737 3738 if (cryptodev->data->dev_private == NULL) 3739 rte_panic("Cannot allocate memzone for private " 3740 "device data"); 3741 3742 dpaa_dev->crypto_dev = cryptodev; 3743 cryptodev->device = &dpaa_dev->device; 3744 3745 /* init user callbacks */ 3746 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3747 3748 /* if sec device version is not configured */ 3749 if (!rta_get_sec_era()) { 3750 const struct device_node *caam_node; 3751 3752 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3753 const uint32_t *prop = of_get_property(caam_node, 3754 "fsl,sec-era", 3755 NULL); 3756 if (prop) { 3757 rta_set_sec_era( 3758 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3759 break; 3760 } 3761 } 3762 } 3763 3764 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 3765 retval = rte_dpaa_portal_init((void *)1); 3766 if (retval) { 3767 DPAA_SEC_ERR("Unable to initialize portal"); 3768 goto out; 3769 } 3770 } 3771 3772 /* Invoke PMD device initialization function */ 3773 retval = dpaa_sec_dev_init(cryptodev); 3774 if (retval == 0) { 3775 rte_cryptodev_pmd_probing_finish(cryptodev); 3776 return 0; 3777 } 3778 3779 retval = -ENXIO; 3780 out: 3781 /* In case of error, cleanup is done */ 3782 rte_free(cryptodev->data->dev_private); 3783 3784 rte_cryptodev_pmd_release_device(cryptodev); 3785 3786 return retval; 3787 } 3788 3789 static int 3790 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3791 { 3792 struct rte_cryptodev *cryptodev; 3793 int ret; 3794 3795 cryptodev = dpaa_dev->crypto_dev; 3796 if (cryptodev == NULL) 3797 return -ENODEV; 3798 3799 ret = dpaa_sec_uninit(cryptodev); 3800 if (ret) 3801 return ret; 3802 3803 return rte_cryptodev_pmd_destroy(cryptodev); 3804 } 3805 3806 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3807 .drv_type = FSL_DPAA_CRYPTO, 3808 .driver = { 3809 .name = "DPAA SEC PMD" 3810 }, 3811 .probe = cryptodev_dpaa_sec_probe, 3812 .remove = cryptodev_dpaa_sec_remove, 3813 }; 3814 3815 static struct cryptodev_driver dpaa_sec_crypto_drv; 3816 3817 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3818 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3819 dpaa_cryptodev_driver_id); 3820 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD, 3821 DRIVER_DUMP_MODE "=<int>"); 3822 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE); 3823