1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017-2024 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #include <rte_security_driver.h> 19 #include <rte_cycles.h> 20 #include <dev_driver.h> 21 #include <rte_io.h> 22 #include <rte_ip.h> 23 #include <rte_udp.h> 24 #include <rte_kvargs.h> 25 #include <rte_malloc.h> 26 #include <rte_mbuf.h> 27 #include <rte_memcpy.h> 28 #include <rte_string_fns.h> 29 #include <rte_spinlock.h> 30 #include <rte_hexdump.h> 31 32 #include <fsl_usd.h> 33 #include <fsl_qman.h> 34 #include <dpaa_of.h> 35 36 /* RTA header files */ 37 #include <desc/common.h> 38 #include <desc/algo.h> 39 #include <desc/ipsec.h> 40 #include <desc/pdcp.h> 41 #include <desc/sdap.h> 42 43 #include <bus_dpaa_driver.h> 44 #include <dpaa_sec.h> 45 #include <dpaa_sec_event.h> 46 #include <dpaa_sec_log.h> 47 #include <dpaax_iova_table.h> 48 49 #define DRIVER_DUMP_MODE "drv_dump_mode" 50 #define DPAA_DEFAULT_NAT_T_PORT 4500 51 52 /* DPAA_SEC_DP_DUMP levels */ 53 enum dpaa_sec_dump_levels { 54 DPAA_SEC_DP_NO_DUMP, 55 DPAA_SEC_DP_ERR_DUMP, 56 DPAA_SEC_DP_FULL_DUMP 57 }; 58 59 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP; 60 61 uint8_t dpaa_cryptodev_driver_id; 62 63 static inline void 64 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 65 { 66 if (!ctx->fd_status) { 67 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 68 } else { 69 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 70 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 71 } 72 } 73 74 static inline struct dpaa_sec_op_ctx * 75 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count) 76 { 77 struct dpaa_sec_op_ctx *ctx; 78 int i, retval; 79 80 retval = rte_mempool_get( 81 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool, 82 (void **)(&ctx)); 83 if (!ctx || retval) { 84 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); 85 return NULL; 86 } 87 /* 88 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 89 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 90 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 91 * each packet, memset is costlier than dcbz_64(). 92 */ 93 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4) 94 dcbz_64(&ctx->job.sg[i]); 95 96 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool; 97 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 98 99 return ctx; 100 } 101 102 static void 103 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 104 struct qman_fq *fq, 105 const struct qm_mr_entry *msg) 106 { 107 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x", 108 fq->fqid, msg->ern.rc, msg->ern.seqnum); 109 } 110 111 /* initialize the queue with dest chan as caam chan so that 112 * all the packets in this queue could be dispatched into caam 113 */ 114 static int 115 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 116 uint32_t fqid_out) 117 { 118 struct qm_mcc_initfq fq_opts; 119 uint32_t flags; 120 int ret = -1; 121 122 /* Clear FQ options */ 123 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 124 125 flags = QMAN_INITFQ_FLAG_SCHED; 126 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 127 QM_INITFQ_WE_CONTEXTB; 128 129 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 130 fq_opts.fqd.context_b = fqid_out; 131 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam(); 132 fq_opts.fqd.dest.wq = 0; 133 134 fq_in->cb.ern = ern_sec_fq_handler; 135 136 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); 137 138 ret = qman_init_fq(fq_in, flags, &fq_opts); 139 if (unlikely(ret != 0)) 140 DPAA_SEC_ERR("qman_init_fq failed %d", ret); 141 142 return ret; 143 } 144 145 /* something is put into in_fq and caam put the crypto result into out_fq */ 146 static enum qman_cb_dqrr_result 147 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 148 struct qman_fq *fq __always_unused, 149 const struct qm_dqrr_entry *dqrr) 150 { 151 const struct qm_fd *fd; 152 struct dpaa_sec_job *job; 153 struct dpaa_sec_op_ctx *ctx; 154 155 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 156 return qman_cb_dqrr_consume; 157 158 fd = &dqrr->fd; 159 /* sg is embedded in an op ctx, 160 * sg[0] is for output 161 * sg[1] for input 162 */ 163 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 164 165 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 166 ctx->fd_status = fd->status; 167 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 168 struct qm_sg_entry *sg_out; 169 uint32_t len; 170 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ? 171 ctx->op->sym->m_src : ctx->op->sym->m_dst; 172 173 sg_out = &job->sg[0]; 174 hw_sg_to_cpu(sg_out); 175 len = sg_out->length; 176 mbuf->pkt_len = len; 177 while (mbuf->next != NULL) { 178 len -= mbuf->data_len; 179 mbuf = mbuf->next; 180 } 181 mbuf->data_len = len; 182 } 183 dpaa_sec_op_ending(ctx); 184 185 return qman_cb_dqrr_consume; 186 } 187 188 /* caam result is put into this queue */ 189 static int 190 dpaa_sec_init_tx(struct qman_fq *fq) 191 { 192 int ret; 193 struct qm_mcc_initfq opts; 194 uint32_t flags; 195 196 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 197 QMAN_FQ_FLAG_DYNAMIC_FQID; 198 199 ret = qman_create_fq(0, flags, fq); 200 if (unlikely(ret)) { 201 DPAA_SEC_ERR("qman_create_fq failed"); 202 return ret; 203 } 204 205 memset(&opts, 0, sizeof(opts)); 206 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 207 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 208 209 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 210 211 fq->cb.dqrr = dqrr_out_fq_cb_rx; 212 fq->cb.ern = ern_sec_fq_handler; 213 214 ret = qman_init_fq(fq, 0, &opts); 215 if (unlikely(ret)) { 216 DPAA_SEC_ERR("unable to init caam source fq!"); 217 return ret; 218 } 219 220 return ret; 221 } 222 223 static inline int is_aead(dpaa_sec_session *ses) 224 { 225 return ((ses->cipher_alg == 0) && 226 (ses->auth_alg == 0) && 227 (ses->aead_alg != 0)); 228 } 229 230 static inline int is_encode(dpaa_sec_session *ses) 231 { 232 return ses->dir == DIR_ENC; 233 } 234 235 static inline int is_decode(dpaa_sec_session *ses) 236 { 237 return ses->dir == DIR_DEC; 238 } 239 240 static int 241 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) 242 { 243 struct alginfo authdata = {0}, cipherdata = {0}; 244 struct sec_cdb *cdb = &ses->cdb; 245 struct alginfo *p_authdata = NULL; 246 int32_t shared_desc_len = 0; 247 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 248 int swap = false; 249 #else 250 int swap = true; 251 #endif 252 253 cipherdata.key = (size_t)ses->cipher_key.data; 254 cipherdata.keylen = ses->cipher_key.length; 255 cipherdata.key_enc_flags = 0; 256 cipherdata.key_type = RTA_DATA_IMM; 257 cipherdata.algtype = ses->cipher_key.alg; 258 cipherdata.algmode = ses->cipher_key.algmode; 259 260 if (ses->auth_alg) { 261 authdata.key = (size_t)ses->auth_key.data; 262 authdata.keylen = ses->auth_key.length; 263 authdata.key_enc_flags = 0; 264 authdata.key_type = RTA_DATA_IMM; 265 authdata.algtype = ses->auth_key.alg; 266 authdata.algmode = ses->auth_key.algmode; 267 268 p_authdata = &authdata; 269 } 270 271 if (ses->pdcp.sdap_enabled) { 272 int nb_keys_to_inline = 273 rta_inline_pdcp_sdap_query(authdata.algtype, 274 cipherdata.algtype, 275 ses->pdcp.sn_size, 276 ses->pdcp.hfn_ovd); 277 if (nb_keys_to_inline >= 1) { 278 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *) 279 (size_t)cipherdata.key); 280 cipherdata.key_type = RTA_DATA_PTR; 281 } 282 if (nb_keys_to_inline >= 2) { 283 authdata.key = (size_t)rte_dpaa_mem_vtop((void *) 284 (size_t)authdata.key); 285 authdata.key_type = RTA_DATA_PTR; 286 } 287 } else { 288 if (rta_inline_pdcp_query(authdata.algtype, 289 cipherdata.algtype, 290 ses->pdcp.sn_size, 291 ses->pdcp.hfn_ovd)) { 292 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *) 293 (size_t)cipherdata.key); 294 cipherdata.key_type = RTA_DATA_PTR; 295 } 296 } 297 298 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 299 if (ses->dir == DIR_ENC) 300 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( 301 cdb->sh_desc, 1, swap, 302 ses->pdcp.hfn, 303 ses->pdcp.sn_size, 304 ses->pdcp.bearer, 305 ses->pdcp.pkt_dir, 306 ses->pdcp.hfn_threshold, 307 &cipherdata, &authdata); 308 else if (ses->dir == DIR_DEC) 309 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( 310 cdb->sh_desc, 1, swap, 311 ses->pdcp.hfn, 312 ses->pdcp.sn_size, 313 ses->pdcp.bearer, 314 ses->pdcp.pkt_dir, 315 ses->pdcp.hfn_threshold, 316 &cipherdata, &authdata); 317 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 318 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc, 319 1, swap, &authdata); 320 } else { 321 if (ses->dir == DIR_ENC) { 322 if (ses->pdcp.sdap_enabled) 323 shared_desc_len = 324 cnstr_shdsc_pdcp_sdap_u_plane_encap( 325 cdb->sh_desc, 1, swap, 326 ses->pdcp.sn_size, 327 ses->pdcp.hfn, 328 ses->pdcp.bearer, 329 ses->pdcp.pkt_dir, 330 ses->pdcp.hfn_threshold, 331 &cipherdata, p_authdata); 332 else 333 shared_desc_len = 334 cnstr_shdsc_pdcp_u_plane_encap( 335 cdb->sh_desc, 1, swap, 336 ses->pdcp.sn_size, 337 ses->pdcp.hfn, 338 ses->pdcp.bearer, 339 ses->pdcp.pkt_dir, 340 ses->pdcp.hfn_threshold, 341 &cipherdata, p_authdata); 342 } else if (ses->dir == DIR_DEC) { 343 if (ses->pdcp.sdap_enabled) 344 shared_desc_len = 345 cnstr_shdsc_pdcp_sdap_u_plane_decap( 346 cdb->sh_desc, 1, swap, 347 ses->pdcp.sn_size, 348 ses->pdcp.hfn, 349 ses->pdcp.bearer, 350 ses->pdcp.pkt_dir, 351 ses->pdcp.hfn_threshold, 352 &cipherdata, p_authdata); 353 else 354 shared_desc_len = 355 cnstr_shdsc_pdcp_u_plane_decap( 356 cdb->sh_desc, 1, swap, 357 ses->pdcp.sn_size, 358 ses->pdcp.hfn, 359 ses->pdcp.bearer, 360 ses->pdcp.pkt_dir, 361 ses->pdcp.hfn_threshold, 362 &cipherdata, p_authdata); 363 } 364 } 365 return shared_desc_len; 366 } 367 368 /* prepare ipsec proto command block of the session */ 369 static int 370 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) 371 { 372 struct alginfo cipherdata = {0}, authdata = {0}; 373 struct sec_cdb *cdb = &ses->cdb; 374 int32_t shared_desc_len = 0; 375 int err; 376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 377 int swap = false; 378 #else 379 int swap = true; 380 #endif 381 382 cipherdata.key = (size_t)ses->cipher_key.data; 383 cipherdata.keylen = ses->cipher_key.length; 384 cipherdata.key_enc_flags = 0; 385 cipherdata.key_type = RTA_DATA_IMM; 386 cipherdata.algtype = ses->cipher_key.alg; 387 cipherdata.algmode = ses->cipher_key.algmode; 388 389 if (ses->auth_key.length) { 390 authdata.key = (size_t)ses->auth_key.data; 391 authdata.keylen = ses->auth_key.length; 392 authdata.key_enc_flags = 0; 393 authdata.key_type = RTA_DATA_IMM; 394 authdata.algtype = ses->auth_key.alg; 395 authdata.algmode = ses->auth_key.algmode; 396 } 397 398 cdb->sh_desc[0] = cipherdata.keylen; 399 cdb->sh_desc[1] = authdata.keylen; 400 err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 401 DESC_JOB_IO_LEN, 402 (unsigned int *)cdb->sh_desc, 403 &cdb->sh_desc[2], 2, authdata.algtype, 1); 404 405 if (err < 0) { 406 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 407 return err; 408 } 409 if (cdb->sh_desc[2] & 1) 410 cipherdata.key_type = RTA_DATA_IMM; 411 else { 412 cipherdata.key = (size_t)rte_dpaa_mem_vtop( 413 (void *)(size_t)cipherdata.key); 414 cipherdata.key_type = RTA_DATA_PTR; 415 } 416 if (cdb->sh_desc[2] & (1<<1)) 417 authdata.key_type = RTA_DATA_IMM; 418 else { 419 authdata.key = (size_t)rte_dpaa_mem_vtop( 420 (void *)(size_t)authdata.key); 421 authdata.key_type = RTA_DATA_PTR; 422 } 423 424 cdb->sh_desc[0] = 0; 425 cdb->sh_desc[1] = 0; 426 cdb->sh_desc[2] = 0; 427 if (ses->dir == DIR_ENC) { 428 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 429 cdb->sh_desc, 430 true, swap, SHR_SERIAL, 431 &ses->encap_pdb, 432 (uint8_t *)&ses->ip4_hdr, 433 &cipherdata, &authdata); 434 } else if (ses->dir == DIR_DEC) { 435 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 436 cdb->sh_desc, 437 true, swap, SHR_SERIAL, 438 &ses->decap_pdb, 439 &cipherdata, &authdata); 440 } 441 return shared_desc_len; 442 } 443 444 /* prepare command block of the session */ 445 static int 446 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 447 { 448 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 449 int32_t shared_desc_len = 0; 450 struct sec_cdb *cdb = &ses->cdb; 451 int err; 452 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 453 int swap = false; 454 #else 455 int swap = true; 456 #endif 457 458 memset(cdb, 0, sizeof(struct sec_cdb)); 459 460 switch (ses->ctxt) { 461 case DPAA_SEC_IPSEC: 462 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); 463 break; 464 case DPAA_SEC_PDCP: 465 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); 466 break; 467 case DPAA_SEC_CIPHER: 468 alginfo_c.key = (size_t)ses->cipher_key.data; 469 alginfo_c.keylen = ses->cipher_key.length; 470 alginfo_c.key_enc_flags = 0; 471 alginfo_c.key_type = RTA_DATA_IMM; 472 alginfo_c.algtype = ses->cipher_key.alg; 473 alginfo_c.algmode = ses->cipher_key.algmode; 474 475 switch (ses->cipher_alg) { 476 case RTE_CRYPTO_CIPHER_AES_CBC: 477 case RTE_CRYPTO_CIPHER_3DES_CBC: 478 case RTE_CRYPTO_CIPHER_DES_CBC: 479 case RTE_CRYPTO_CIPHER_AES_CTR: 480 case RTE_CRYPTO_CIPHER_3DES_CTR: 481 shared_desc_len = cnstr_shdsc_blkcipher( 482 cdb->sh_desc, true, 483 swap, SHR_NEVER, &alginfo_c, 484 ses->iv.length, 485 ses->dir); 486 break; 487 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 488 shared_desc_len = cnstr_shdsc_snow_f8( 489 cdb->sh_desc, true, swap, 490 &alginfo_c, 491 ses->dir); 492 break; 493 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 494 shared_desc_len = cnstr_shdsc_zuce( 495 cdb->sh_desc, true, swap, 496 &alginfo_c, 497 ses->dir); 498 break; 499 default: 500 DPAA_SEC_ERR("unsupported cipher alg %s (%d)", 501 rte_cryptodev_get_cipher_algo_string(ses->cipher_alg), 502 ses->cipher_alg); 503 return -ENOTSUP; 504 } 505 break; 506 case DPAA_SEC_AUTH: 507 alginfo_a.key = (size_t)ses->auth_key.data; 508 alginfo_a.keylen = ses->auth_key.length; 509 alginfo_a.key_enc_flags = 0; 510 alginfo_a.key_type = RTA_DATA_IMM; 511 alginfo_a.algtype = ses->auth_key.alg; 512 alginfo_a.algmode = ses->auth_key.algmode; 513 switch (ses->auth_alg) { 514 case RTE_CRYPTO_AUTH_MD5: 515 case RTE_CRYPTO_AUTH_SHA1: 516 case RTE_CRYPTO_AUTH_SHA224: 517 case RTE_CRYPTO_AUTH_SHA256: 518 case RTE_CRYPTO_AUTH_SHA384: 519 case RTE_CRYPTO_AUTH_SHA512: 520 shared_desc_len = cnstr_shdsc_hash( 521 cdb->sh_desc, true, 522 swap, SHR_NEVER, &alginfo_a, 523 !ses->dir, 524 ses->digest_length); 525 break; 526 case RTE_CRYPTO_AUTH_MD5_HMAC: 527 case RTE_CRYPTO_AUTH_SHA1_HMAC: 528 case RTE_CRYPTO_AUTH_SHA224_HMAC: 529 case RTE_CRYPTO_AUTH_SHA256_HMAC: 530 case RTE_CRYPTO_AUTH_SHA384_HMAC: 531 case RTE_CRYPTO_AUTH_SHA512_HMAC: 532 shared_desc_len = cnstr_shdsc_hmac( 533 cdb->sh_desc, true, 534 swap, SHR_NEVER, &alginfo_a, 535 !ses->dir, 536 ses->digest_length); 537 break; 538 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 539 shared_desc_len = cnstr_shdsc_snow_f9( 540 cdb->sh_desc, true, swap, 541 &alginfo_a, 542 !ses->dir, 543 ses->digest_length); 544 break; 545 case RTE_CRYPTO_AUTH_ZUC_EIA3: 546 shared_desc_len = cnstr_shdsc_zuca( 547 cdb->sh_desc, true, swap, 548 &alginfo_a, 549 !ses->dir, 550 ses->digest_length); 551 break; 552 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 553 case RTE_CRYPTO_AUTH_AES_CMAC: 554 shared_desc_len = cnstr_shdsc_aes_mac( 555 cdb->sh_desc, 556 true, swap, SHR_NEVER, 557 &alginfo_a, 558 !ses->dir, 559 ses->digest_length); 560 break; 561 default: 562 DPAA_SEC_ERR("unsupported auth alg %s (%u)", 563 rte_cryptodev_get_auth_algo_string(ses->auth_alg), 564 ses->auth_alg); 565 } 566 break; 567 case DPAA_SEC_AEAD: 568 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 569 DPAA_SEC_ERR("not supported aead alg"); 570 return -ENOTSUP; 571 } 572 alginfo.key = (size_t)ses->aead_key.data; 573 alginfo.keylen = ses->aead_key.length; 574 alginfo.key_enc_flags = 0; 575 alginfo.key_type = RTA_DATA_IMM; 576 alginfo.algtype = ses->aead_key.alg; 577 alginfo.algmode = ses->aead_key.algmode; 578 579 if (ses->dir == DIR_ENC) 580 shared_desc_len = cnstr_shdsc_gcm_encap( 581 cdb->sh_desc, true, swap, SHR_NEVER, 582 &alginfo, 583 ses->iv.length, 584 ses->digest_length); 585 else 586 shared_desc_len = cnstr_shdsc_gcm_decap( 587 cdb->sh_desc, true, swap, SHR_NEVER, 588 &alginfo, 589 ses->iv.length, 590 ses->digest_length); 591 break; 592 case DPAA_SEC_CIPHER_HASH: 593 alginfo_c.key = (size_t)ses->cipher_key.data; 594 alginfo_c.keylen = ses->cipher_key.length; 595 alginfo_c.key_enc_flags = 0; 596 alginfo_c.key_type = RTA_DATA_IMM; 597 alginfo_c.algtype = ses->cipher_key.alg; 598 alginfo_c.algmode = ses->cipher_key.algmode; 599 600 alginfo_a.key = (size_t)ses->auth_key.data; 601 alginfo_a.keylen = ses->auth_key.length; 602 alginfo_a.key_enc_flags = 0; 603 alginfo_a.key_type = RTA_DATA_IMM; 604 alginfo_a.algtype = ses->auth_key.alg; 605 alginfo_a.algmode = ses->auth_key.algmode; 606 607 cdb->sh_desc[0] = alginfo_c.keylen; 608 cdb->sh_desc[1] = alginfo_a.keylen; 609 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 610 DESC_JOB_IO_LEN, 611 (unsigned int *)cdb->sh_desc, 612 &cdb->sh_desc[2], 2); 613 614 if (err < 0) { 615 DPAA_SEC_ERR("Crypto: Incorrect key lengths"); 616 return err; 617 } 618 if (cdb->sh_desc[2] & 1) 619 alginfo_c.key_type = RTA_DATA_IMM; 620 else { 621 alginfo_c.key = (size_t)rte_dpaa_mem_vtop( 622 (void *)(size_t)alginfo_c.key); 623 alginfo_c.key_type = RTA_DATA_PTR; 624 } 625 if (cdb->sh_desc[2] & (1<<1)) 626 alginfo_a.key_type = RTA_DATA_IMM; 627 else { 628 alginfo_a.key = (size_t)rte_dpaa_mem_vtop( 629 (void *)(size_t)alginfo_a.key); 630 alginfo_a.key_type = RTA_DATA_PTR; 631 } 632 cdb->sh_desc[0] = 0; 633 cdb->sh_desc[1] = 0; 634 cdb->sh_desc[2] = 0; 635 /* Auth_only_len is set as 0 here and it will be 636 * overwritten in fd for each packet. 637 */ 638 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 639 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, 640 ses->iv.length, 641 ses->digest_length, ses->dir); 642 break; 643 default: 644 DPAA_SEC_ERR("error: Unsupported session %d", ses->ctxt); 645 return -ENOTSUP; 646 } 647 648 if (shared_desc_len < 0) { 649 DPAA_SEC_ERR("error in preparing command block"); 650 return shared_desc_len; 651 } 652 653 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 654 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 655 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 656 657 return 0; 658 } 659 660 static void 661 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp, FILE *f) 662 { 663 struct dpaa_sec_job *job = &ctx->job; 664 struct rte_crypto_op *op = ctx->op; 665 dpaa_sec_session *sess = NULL; 666 struct sec_cdb c_cdb, *cdb; 667 uint8_t bufsize; 668 struct rte_crypto_sym_op *sym_op; 669 struct qm_sg_entry sg[2]; 670 671 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 672 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 673 #ifdef RTE_LIB_SECURITY 674 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 675 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 676 #endif 677 if (sess == NULL) { 678 printf("session is NULL\n"); 679 goto mbuf_dump; 680 } 681 682 cdb = &sess->cdb; 683 rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); 684 #ifdef RTE_LIB_SECURITY 685 fprintf(f, "\nsession protocol type = %d\n", sess->proto_alg); 686 #endif 687 fprintf(f, "\n****************************************\n" 688 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n" 689 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n" 690 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n" 691 "\tCipher algmode:\t%d\n", sess->ctxt, 692 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC", 693 sess->cipher_alg, sess->auth_alg, sess->aead_alg, 694 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg, 695 sess->cipher_key.algmode); 696 rte_hexdump(f, "cipher key", sess->cipher_key.data, 697 sess->cipher_key.length); 698 rte_hexdump(f, "auth key", sess->auth_key.data, 699 sess->auth_key.length); 700 fprintf(f, "\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n" 701 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n" 702 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n" 703 "\taead cipher text:\t%d\n", 704 (uint64_t)sess->auth_key.length, sess->auth_key.alg, 705 sess->auth_key.algmode, 706 sess->iv.length, sess->iv.offset, 707 sess->digest_length, sess->auth_only_len, 708 sess->auth_cipher_text); 709 #ifdef RTE_LIB_SECURITY 710 fprintf(f, "PDCP session params:\n" 711 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" 712 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:" 713 "\t%d\n\thfn:\t\t%d\n" 714 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain, 715 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd, 716 sess->pdcp.sn_size, sess->pdcp.sdap_enabled, 717 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn, 718 sess->pdcp.hfn_threshold); 719 #endif 720 c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word); 721 c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word); 722 bufsize = c_cdb.sh_hdr.hi.field.idlen; 723 724 fprintf(f, "cdb = %p\n\n", cdb); 725 fprintf(f, "Descriptor size = %d\n", bufsize); 726 int m; 727 for (m = 0; m < bufsize; m++) 728 fprintf(f, "0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m])); 729 730 fprintf(f, "\n"); 731 mbuf_dump: 732 sym_op = op->sym; 733 if (sym_op->m_src) { 734 fprintf(f, "Source mbuf:\n"); 735 rte_pktmbuf_dump(f, sym_op->m_src, 736 sym_op->m_src->data_len); 737 } 738 if (sym_op->m_dst) { 739 fprintf(f, "Destination mbuf:\n"); 740 rte_pktmbuf_dump(f, sym_op->m_dst, 741 sym_op->m_dst->data_len); 742 } 743 744 fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n" 745 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n", 746 sym_op->session, sym_op->cipher.data.offset, 747 sym_op->cipher.data.length, 748 sym_op->auth.data.offset, sym_op->auth.data.length, 749 sym_op->aead.data.offset, sym_op->aead.data.length); 750 fprintf(f, "\n"); 751 752 fprintf(f, "******************************************************\n"); 753 fprintf(f, "ctx info:\n"); 754 fprintf(f, "job->sg[0] output info:\n"); 755 memcpy(&sg[0], &job->sg[0], sizeof(sg[0])); 756 fprintf(f, "\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d" 757 "\n\tbpid = %d\n\toffset = %d\n", 758 (uint64_t)sg[0].addr, sg[0].length, sg[0].final, 759 sg[0].extension, sg[0].bpid, sg[0].offset); 760 fprintf(f, "\njob->sg[1] input info:\n"); 761 memcpy(&sg[1], &job->sg[1], sizeof(sg[1])); 762 hw_sg_to_cpu(&sg[1]); 763 fprintf(f, "\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textension = %d" 764 "\n\tbpid = %d\n\toffset = %d\n", 765 (uint64_t)sg[1].addr, sg[1].length, sg[1].final, 766 sg[1].extension, sg[1].bpid, sg[1].offset); 767 768 fprintf(f, "\nctx pool addr = %p\n", ctx->ctx_pool); 769 if (ctx->ctx_pool) 770 fprintf(f, "ctx pool available counts = %d\n", 771 rte_mempool_avail_count(ctx->ctx_pool)); 772 773 fprintf(f, "\nop pool addr = %p\n", op->mempool); 774 if (op->mempool) 775 fprintf(f, "op pool available counts = %d\n", 776 rte_mempool_avail_count(op->mempool)); 777 778 fprintf(f, "********************************************************\n"); 779 fprintf(f, "Queue data:\n"); 780 fprintf(f, "\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n" 781 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts" 782 "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n", 783 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc, 784 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts, 785 qp->rx_errs, qp->tx_errs); 786 } 787 788 /* qp is lockless, should be accessed by only one thread */ 789 static int 790 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 791 { 792 struct qman_fq *fq; 793 unsigned int pkts = 0; 794 int num_rx_bufs, ret; 795 struct qm_dqrr_entry *dq; 796 uint32_t vdqcr_flags = 0; 797 798 fq = &qp->outq; 799 /* 800 * Until request for four buffers, we provide exact number of buffers. 801 * Otherwise we do not set the QM_VDQCR_EXACT flag. 802 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 803 * requested, so we request two less in this case. 804 */ 805 if (nb_ops < 4) { 806 vdqcr_flags = QM_VDQCR_EXACT; 807 num_rx_bufs = nb_ops; 808 } else { 809 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 810 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); 811 } 812 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 813 if (ret) 814 return 0; 815 816 do { 817 const struct qm_fd *fd; 818 struct dpaa_sec_job *job; 819 struct dpaa_sec_op_ctx *ctx; 820 struct rte_crypto_op *op; 821 822 dq = qman_dequeue(fq); 823 if (!dq) 824 continue; 825 826 fd = &dq->fd; 827 /* sg is embedded in an op ctx, 828 * sg[0] is for output 829 * sg[1] for input 830 */ 831 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 832 833 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 834 ctx->fd_status = fd->status; 835 op = ctx->op; 836 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 837 struct qm_sg_entry *sg_out; 838 uint32_t len; 839 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ? 840 op->sym->m_src : op->sym->m_dst; 841 842 sg_out = &job->sg[0]; 843 hw_sg_to_cpu(sg_out); 844 len = sg_out->length; 845 mbuf->pkt_len = len; 846 while (mbuf->next != NULL) { 847 len -= mbuf->data_len; 848 mbuf = mbuf->next; 849 } 850 mbuf->data_len = len; 851 } 852 if (!ctx->fd_status) { 853 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 854 } else { 855 if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) { 856 DPAA_SEC_DP_WARN("SEC return err:0x%x", 857 ctx->fd_status); 858 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP) 859 dpaa_sec_dump(ctx, qp, stdout); 860 } 861 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 862 } 863 ops[pkts++] = op; 864 865 /* report op status to sym->op and then free the ctx memory */ 866 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 867 868 qman_dqrr_consume(fq, dq); 869 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 870 871 return pkts; 872 } 873 874 static inline struct dpaa_sec_job * 875 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 876 { 877 struct rte_crypto_sym_op *sym = op->sym; 878 struct rte_mbuf *mbuf = sym->m_src; 879 struct dpaa_sec_job *cf; 880 struct dpaa_sec_op_ctx *ctx; 881 struct qm_sg_entry *sg, *out_sg, *in_sg; 882 phys_addr_t start_addr; 883 uint8_t *old_digest, extra_segs; 884 int data_len, data_offset; 885 886 data_len = sym->auth.data.length; 887 data_offset = sym->auth.data.offset; 888 889 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 890 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 891 if ((data_len & 7) || (data_offset & 7)) { 892 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 893 return NULL; 894 } 895 896 data_len = data_len >> 3; 897 data_offset = data_offset >> 3; 898 } 899 900 if (is_decode(ses)) 901 extra_segs = 3; 902 else 903 extra_segs = 2; 904 905 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 906 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", 907 MAX_SG_ENTRIES); 908 return NULL; 909 } 910 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs); 911 if (!ctx) 912 return NULL; 913 914 cf = &ctx->job; 915 ctx->op = op; 916 old_digest = ctx->digest; 917 918 /* output */ 919 out_sg = &cf->sg[0]; 920 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr); 921 out_sg->length = ses->digest_length; 922 cpu_to_hw_sg(out_sg); 923 924 /* input */ 925 in_sg = &cf->sg[1]; 926 /* need to extend the input to a compound frame */ 927 in_sg->extension = 1; 928 in_sg->final = 1; 929 in_sg->length = data_len; 930 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 931 932 /* 1st seg */ 933 sg = in_sg + 1; 934 935 if (ses->iv.length) { 936 uint8_t *iv_ptr; 937 938 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 939 ses->iv.offset); 940 941 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 942 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 943 sg->length = 12; 944 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 945 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 946 sg->length = 8; 947 } else { 948 sg->length = ses->iv.length; 949 } 950 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 951 in_sg->length += sg->length; 952 cpu_to_hw_sg(sg); 953 sg++; 954 } 955 956 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 957 sg->offset = data_offset; 958 959 if (data_len <= (mbuf->data_len - data_offset)) { 960 sg->length = data_len; 961 } else { 962 sg->length = mbuf->data_len - data_offset; 963 964 /* remaining i/p segs */ 965 while ((data_len = data_len - sg->length) && 966 (mbuf = mbuf->next)) { 967 cpu_to_hw_sg(sg); 968 sg++; 969 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 970 if (data_len > mbuf->data_len) 971 sg->length = mbuf->data_len; 972 else 973 sg->length = data_len; 974 } 975 } 976 977 if (is_decode(ses)) { 978 /* Digest verification case */ 979 cpu_to_hw_sg(sg); 980 sg++; 981 rte_memcpy(old_digest, sym->auth.digest.data, 982 ses->digest_length); 983 start_addr = rte_dpaa_mem_vtop(old_digest); 984 qm_sg_entry_set64(sg, start_addr); 985 sg->length = ses->digest_length; 986 in_sg->length += ses->digest_length; 987 } 988 sg->final = 1; 989 cpu_to_hw_sg(sg); 990 cpu_to_hw_sg(in_sg); 991 992 return cf; 993 } 994 995 /** 996 * packet looks like: 997 * |<----data_len------->| 998 * |ip_header|ah_header|icv|payload| 999 * ^ 1000 * | 1001 * mbuf->pkt.data 1002 */ 1003 static inline struct dpaa_sec_job * 1004 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1005 { 1006 struct rte_crypto_sym_op *sym = op->sym; 1007 struct rte_mbuf *mbuf = sym->m_src; 1008 struct dpaa_sec_job *cf; 1009 struct dpaa_sec_op_ctx *ctx; 1010 struct qm_sg_entry *sg, *in_sg; 1011 rte_iova_t start_addr; 1012 uint8_t *old_digest; 1013 int data_len, data_offset; 1014 1015 data_len = sym->auth.data.length; 1016 data_offset = sym->auth.data.offset; 1017 1018 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1019 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1020 if ((data_len & 7) || (data_offset & 7)) { 1021 DPAA_SEC_ERR("AUTH: len/offset must be full bytes"); 1022 return NULL; 1023 } 1024 1025 data_len = data_len >> 3; 1026 data_offset = data_offset >> 3; 1027 } 1028 1029 ctx = dpaa_sec_alloc_ctx(ses, 4); 1030 if (!ctx) 1031 return NULL; 1032 1033 cf = &ctx->job; 1034 ctx->op = op; 1035 old_digest = ctx->digest; 1036 1037 start_addr = rte_pktmbuf_iova(mbuf); 1038 /* output */ 1039 sg = &cf->sg[0]; 1040 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1041 sg->length = ses->digest_length; 1042 cpu_to_hw_sg(sg); 1043 1044 /* input */ 1045 in_sg = &cf->sg[1]; 1046 /* need to extend the input to a compound frame */ 1047 in_sg->extension = 1; 1048 in_sg->final = 1; 1049 in_sg->length = data_len; 1050 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1051 sg = &cf->sg[2]; 1052 1053 if (ses->iv.length) { 1054 uint8_t *iv_ptr; 1055 1056 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1057 ses->iv.offset); 1058 1059 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1060 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1061 sg->length = 12; 1062 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1063 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1064 sg->length = 8; 1065 } else { 1066 sg->length = ses->iv.length; 1067 } 1068 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr)); 1069 in_sg->length += sg->length; 1070 cpu_to_hw_sg(sg); 1071 sg++; 1072 } 1073 1074 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1075 sg->offset = data_offset; 1076 sg->length = data_len; 1077 1078 if (is_decode(ses)) { 1079 /* Digest verification case */ 1080 cpu_to_hw_sg(sg); 1081 /* hash result or digest, save digest first */ 1082 rte_memcpy(old_digest, sym->auth.digest.data, 1083 ses->digest_length); 1084 /* let's check digest by hw */ 1085 start_addr = rte_dpaa_mem_vtop(old_digest); 1086 sg++; 1087 qm_sg_entry_set64(sg, start_addr); 1088 sg->length = ses->digest_length; 1089 in_sg->length += ses->digest_length; 1090 } 1091 sg->final = 1; 1092 cpu_to_hw_sg(sg); 1093 cpu_to_hw_sg(in_sg); 1094 1095 return cf; 1096 } 1097 1098 static inline struct dpaa_sec_job * 1099 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1100 { 1101 struct rte_crypto_sym_op *sym = op->sym; 1102 struct dpaa_sec_job *cf; 1103 struct dpaa_sec_op_ctx *ctx; 1104 struct qm_sg_entry *sg, *out_sg, *in_sg; 1105 struct rte_mbuf *mbuf; 1106 uint8_t req_segs; 1107 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1108 ses->iv.offset); 1109 int data_len, data_offset; 1110 1111 data_len = sym->cipher.data.length; 1112 data_offset = sym->cipher.data.offset; 1113 1114 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1115 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1116 if ((data_len & 7) || (data_offset & 7)) { 1117 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1118 return NULL; 1119 } 1120 1121 data_len = data_len >> 3; 1122 data_offset = data_offset >> 3; 1123 } 1124 1125 if (sym->m_dst) { 1126 mbuf = sym->m_dst; 1127 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 1128 } else { 1129 mbuf = sym->m_src; 1130 req_segs = mbuf->nb_segs * 2 + 3; 1131 } 1132 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1133 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", 1134 MAX_SG_ENTRIES); 1135 return NULL; 1136 } 1137 1138 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1139 if (!ctx) 1140 return NULL; 1141 1142 cf = &ctx->job; 1143 ctx->op = op; 1144 1145 /* output */ 1146 out_sg = &cf->sg[0]; 1147 out_sg->extension = 1; 1148 out_sg->length = data_len; 1149 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1150 cpu_to_hw_sg(out_sg); 1151 1152 /* 1st seg */ 1153 sg = &cf->sg[2]; 1154 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1155 sg->length = mbuf->data_len - data_offset; 1156 sg->offset = data_offset; 1157 1158 /* Successive segs */ 1159 mbuf = mbuf->next; 1160 while (mbuf) { 1161 cpu_to_hw_sg(sg); 1162 sg++; 1163 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1164 sg->length = mbuf->data_len; 1165 mbuf = mbuf->next; 1166 } 1167 sg->final = 1; 1168 cpu_to_hw_sg(sg); 1169 1170 /* input */ 1171 mbuf = sym->m_src; 1172 in_sg = &cf->sg[1]; 1173 in_sg->extension = 1; 1174 in_sg->final = 1; 1175 in_sg->length = data_len + ses->iv.length; 1176 1177 sg++; 1178 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1179 cpu_to_hw_sg(in_sg); 1180 1181 /* IV */ 1182 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1183 sg->length = ses->iv.length; 1184 cpu_to_hw_sg(sg); 1185 1186 /* 1st seg */ 1187 sg++; 1188 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1189 sg->length = mbuf->data_len - data_offset; 1190 sg->offset = data_offset; 1191 1192 /* Successive segs */ 1193 mbuf = mbuf->next; 1194 while (mbuf) { 1195 cpu_to_hw_sg(sg); 1196 sg++; 1197 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1198 sg->length = mbuf->data_len; 1199 mbuf = mbuf->next; 1200 } 1201 sg->final = 1; 1202 cpu_to_hw_sg(sg); 1203 1204 return cf; 1205 } 1206 1207 static inline struct dpaa_sec_job * 1208 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 1209 { 1210 struct rte_crypto_sym_op *sym = op->sym; 1211 struct dpaa_sec_job *cf; 1212 struct dpaa_sec_op_ctx *ctx; 1213 struct qm_sg_entry *sg; 1214 rte_iova_t src_start_addr, dst_start_addr; 1215 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1216 ses->iv.offset); 1217 int data_len, data_offset; 1218 1219 data_len = sym->cipher.data.length; 1220 data_offset = sym->cipher.data.offset; 1221 1222 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1223 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1224 if ((data_len & 7) || (data_offset & 7)) { 1225 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes"); 1226 return NULL; 1227 } 1228 1229 data_len = data_len >> 3; 1230 data_offset = data_offset >> 3; 1231 } 1232 1233 ctx = dpaa_sec_alloc_ctx(ses, 4); 1234 if (!ctx) 1235 return NULL; 1236 1237 cf = &ctx->job; 1238 ctx->op = op; 1239 1240 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1241 1242 if (sym->m_dst) 1243 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1244 else 1245 dst_start_addr = src_start_addr; 1246 1247 /* output */ 1248 sg = &cf->sg[0]; 1249 qm_sg_entry_set64(sg, dst_start_addr + data_offset); 1250 sg->length = data_len + ses->iv.length; 1251 cpu_to_hw_sg(sg); 1252 1253 /* input */ 1254 sg = &cf->sg[1]; 1255 1256 /* need to extend the input to a compound frame */ 1257 sg->extension = 1; 1258 sg->final = 1; 1259 sg->length = data_len + ses->iv.length; 1260 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1261 cpu_to_hw_sg(sg); 1262 1263 sg = &cf->sg[2]; 1264 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1265 sg->length = ses->iv.length; 1266 cpu_to_hw_sg(sg); 1267 1268 sg++; 1269 qm_sg_entry_set64(sg, src_start_addr + data_offset); 1270 sg->length = data_len; 1271 sg->final = 1; 1272 cpu_to_hw_sg(sg); 1273 1274 return cf; 1275 } 1276 1277 static inline struct dpaa_sec_job * 1278 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1279 { 1280 struct rte_crypto_sym_op *sym = op->sym; 1281 struct dpaa_sec_job *cf; 1282 struct dpaa_sec_op_ctx *ctx; 1283 struct qm_sg_entry *sg, *out_sg, *in_sg; 1284 struct rte_mbuf *mbuf; 1285 uint8_t req_segs; 1286 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1287 ses->iv.offset); 1288 1289 if (sym->m_dst) { 1290 mbuf = sym->m_dst; 1291 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1292 } else { 1293 mbuf = sym->m_src; 1294 req_segs = mbuf->nb_segs * 2 + 4; 1295 } 1296 1297 if (ses->auth_only_len) 1298 req_segs++; 1299 1300 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1301 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", 1302 MAX_SG_ENTRIES); 1303 return NULL; 1304 } 1305 1306 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1307 if (!ctx) 1308 return NULL; 1309 1310 cf = &ctx->job; 1311 ctx->op = op; 1312 1313 rte_prefetch0(cf->sg); 1314 1315 /* output */ 1316 out_sg = &cf->sg[0]; 1317 out_sg->extension = 1; 1318 if (is_encode(ses)) 1319 out_sg->length = sym->aead.data.length + ses->digest_length; 1320 else 1321 out_sg->length = sym->aead.data.length; 1322 1323 /* output sg entries */ 1324 sg = &cf->sg[2]; 1325 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1326 cpu_to_hw_sg(out_sg); 1327 1328 /* 1st seg */ 1329 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1330 sg->length = mbuf->data_len - sym->aead.data.offset; 1331 sg->offset = sym->aead.data.offset; 1332 1333 /* Successive segs */ 1334 mbuf = mbuf->next; 1335 while (mbuf) { 1336 cpu_to_hw_sg(sg); 1337 sg++; 1338 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1339 sg->length = mbuf->data_len; 1340 mbuf = mbuf->next; 1341 } 1342 sg->length -= ses->digest_length; 1343 1344 if (is_encode(ses)) { 1345 cpu_to_hw_sg(sg); 1346 /* set auth output */ 1347 sg++; 1348 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1349 sg->length = ses->digest_length; 1350 } 1351 sg->final = 1; 1352 cpu_to_hw_sg(sg); 1353 1354 /* input */ 1355 mbuf = sym->m_src; 1356 in_sg = &cf->sg[1]; 1357 in_sg->extension = 1; 1358 in_sg->final = 1; 1359 if (is_encode(ses)) 1360 in_sg->length = ses->iv.length + sym->aead.data.length 1361 + ses->auth_only_len; 1362 else 1363 in_sg->length = ses->iv.length + sym->aead.data.length 1364 + ses->auth_only_len + ses->digest_length; 1365 1366 /* input sg entries */ 1367 sg++; 1368 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1369 cpu_to_hw_sg(in_sg); 1370 1371 /* 1st seg IV */ 1372 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1373 sg->length = ses->iv.length; 1374 cpu_to_hw_sg(sg); 1375 1376 /* 2nd seg auth only */ 1377 if (ses->auth_only_len) { 1378 sg++; 1379 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data)); 1380 sg->length = ses->auth_only_len; 1381 cpu_to_hw_sg(sg); 1382 } 1383 1384 /* 3rd seg */ 1385 sg++; 1386 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1387 sg->length = mbuf->data_len - sym->aead.data.offset; 1388 sg->offset = sym->aead.data.offset; 1389 1390 /* Successive segs */ 1391 mbuf = mbuf->next; 1392 while (mbuf) { 1393 cpu_to_hw_sg(sg); 1394 sg++; 1395 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1396 sg->length = mbuf->data_len; 1397 mbuf = mbuf->next; 1398 } 1399 1400 if (is_decode(ses)) { 1401 cpu_to_hw_sg(sg); 1402 sg++; 1403 memcpy(ctx->digest, sym->aead.digest.data, 1404 ses->digest_length); 1405 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1406 sg->length = ses->digest_length; 1407 } 1408 sg->final = 1; 1409 cpu_to_hw_sg(sg); 1410 1411 return cf; 1412 } 1413 1414 static inline struct dpaa_sec_job * 1415 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 1416 { 1417 struct rte_crypto_sym_op *sym = op->sym; 1418 struct dpaa_sec_job *cf; 1419 struct dpaa_sec_op_ctx *ctx; 1420 struct qm_sg_entry *sg; 1421 uint32_t length = 0; 1422 rte_iova_t src_start_addr, dst_start_addr; 1423 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1424 ses->iv.offset); 1425 1426 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1427 1428 if (sym->m_dst) 1429 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1430 else 1431 dst_start_addr = src_start_addr; 1432 1433 ctx = dpaa_sec_alloc_ctx(ses, 7); 1434 if (!ctx) 1435 return NULL; 1436 1437 cf = &ctx->job; 1438 ctx->op = op; 1439 1440 /* input */ 1441 rte_prefetch0(cf->sg); 1442 sg = &cf->sg[2]; 1443 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1444 if (is_encode(ses)) { 1445 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1446 sg->length = ses->iv.length; 1447 length += sg->length; 1448 cpu_to_hw_sg(sg); 1449 1450 sg++; 1451 if (ses->auth_only_len) { 1452 qm_sg_entry_set64(sg, 1453 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1454 sg->length = ses->auth_only_len; 1455 length += sg->length; 1456 cpu_to_hw_sg(sg); 1457 sg++; 1458 } 1459 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1460 sg->length = sym->aead.data.length; 1461 length += sg->length; 1462 sg->final = 1; 1463 cpu_to_hw_sg(sg); 1464 } else { 1465 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1466 sg->length = ses->iv.length; 1467 length += sg->length; 1468 cpu_to_hw_sg(sg); 1469 1470 sg++; 1471 if (ses->auth_only_len) { 1472 qm_sg_entry_set64(sg, 1473 rte_dpaa_mem_vtop(sym->aead.aad.data)); 1474 sg->length = ses->auth_only_len; 1475 length += sg->length; 1476 cpu_to_hw_sg(sg); 1477 sg++; 1478 } 1479 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 1480 sg->length = sym->aead.data.length; 1481 length += sg->length; 1482 cpu_to_hw_sg(sg); 1483 1484 memcpy(ctx->digest, sym->aead.digest.data, 1485 ses->digest_length); 1486 sg++; 1487 1488 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1489 sg->length = ses->digest_length; 1490 length += sg->length; 1491 sg->final = 1; 1492 cpu_to_hw_sg(sg); 1493 } 1494 /* input compound frame */ 1495 cf->sg[1].length = length; 1496 cf->sg[1].extension = 1; 1497 cf->sg[1].final = 1; 1498 cpu_to_hw_sg(&cf->sg[1]); 1499 1500 /* output */ 1501 sg++; 1502 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1503 qm_sg_entry_set64(sg, 1504 dst_start_addr + sym->aead.data.offset); 1505 sg->length = sym->aead.data.length; 1506 length = sg->length; 1507 if (is_encode(ses)) { 1508 cpu_to_hw_sg(sg); 1509 /* set auth output */ 1510 sg++; 1511 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 1512 sg->length = ses->digest_length; 1513 length += sg->length; 1514 } 1515 sg->final = 1; 1516 cpu_to_hw_sg(sg); 1517 1518 /* output compound frame */ 1519 cf->sg[0].length = length; 1520 cf->sg[0].extension = 1; 1521 cpu_to_hw_sg(&cf->sg[0]); 1522 1523 return cf; 1524 } 1525 1526 static inline struct dpaa_sec_job * 1527 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1528 { 1529 struct rte_crypto_sym_op *sym = op->sym; 1530 struct dpaa_sec_job *cf; 1531 struct dpaa_sec_op_ctx *ctx; 1532 struct qm_sg_entry *sg, *out_sg, *in_sg; 1533 struct rte_mbuf *mbuf; 1534 uint8_t req_segs; 1535 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1536 ses->iv.offset); 1537 1538 if (sym->m_dst) { 1539 mbuf = sym->m_dst; 1540 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4; 1541 } else { 1542 mbuf = sym->m_src; 1543 req_segs = mbuf->nb_segs * 2 + 4; 1544 } 1545 1546 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1547 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1548 MAX_SG_ENTRIES); 1549 return NULL; 1550 } 1551 1552 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1553 if (!ctx) 1554 return NULL; 1555 1556 cf = &ctx->job; 1557 ctx->op = op; 1558 1559 rte_prefetch0(cf->sg); 1560 1561 /* output */ 1562 out_sg = &cf->sg[0]; 1563 out_sg->extension = 1; 1564 if (is_encode(ses)) 1565 out_sg->length = sym->auth.data.length + ses->digest_length; 1566 else 1567 out_sg->length = sym->auth.data.length; 1568 1569 /* output sg entries */ 1570 sg = &cf->sg[2]; 1571 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg)); 1572 cpu_to_hw_sg(out_sg); 1573 1574 /* 1st seg */ 1575 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1576 sg->length = mbuf->data_len - sym->auth.data.offset; 1577 sg->offset = sym->auth.data.offset; 1578 1579 /* Successive segs */ 1580 mbuf = mbuf->next; 1581 while (mbuf) { 1582 cpu_to_hw_sg(sg); 1583 sg++; 1584 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1585 sg->length = mbuf->data_len; 1586 mbuf = mbuf->next; 1587 } 1588 sg->length -= ses->digest_length; 1589 1590 if (is_encode(ses)) { 1591 cpu_to_hw_sg(sg); 1592 /* set auth output */ 1593 sg++; 1594 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1595 sg->length = ses->digest_length; 1596 } 1597 sg->final = 1; 1598 cpu_to_hw_sg(sg); 1599 1600 /* input */ 1601 mbuf = sym->m_src; 1602 in_sg = &cf->sg[1]; 1603 in_sg->extension = 1; 1604 in_sg->final = 1; 1605 if (is_encode(ses)) 1606 in_sg->length = ses->iv.length + sym->auth.data.length; 1607 else 1608 in_sg->length = ses->iv.length + sym->auth.data.length 1609 + ses->digest_length; 1610 1611 /* input sg entries */ 1612 sg++; 1613 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1614 cpu_to_hw_sg(in_sg); 1615 1616 /* 1st seg IV */ 1617 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1618 sg->length = ses->iv.length; 1619 cpu_to_hw_sg(sg); 1620 1621 /* 2nd seg */ 1622 sg++; 1623 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1624 sg->length = mbuf->data_len - sym->auth.data.offset; 1625 sg->offset = sym->auth.data.offset; 1626 1627 /* Successive segs */ 1628 mbuf = mbuf->next; 1629 while (mbuf) { 1630 cpu_to_hw_sg(sg); 1631 sg++; 1632 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1633 sg->length = mbuf->data_len; 1634 mbuf = mbuf->next; 1635 } 1636 1637 sg->length -= ses->digest_length; 1638 if (is_decode(ses)) { 1639 cpu_to_hw_sg(sg); 1640 sg++; 1641 memcpy(ctx->digest, sym->auth.digest.data, 1642 ses->digest_length); 1643 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1644 sg->length = ses->digest_length; 1645 } 1646 sg->final = 1; 1647 cpu_to_hw_sg(sg); 1648 1649 return cf; 1650 } 1651 1652 static inline struct dpaa_sec_job * 1653 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 1654 { 1655 struct rte_crypto_sym_op *sym = op->sym; 1656 struct dpaa_sec_job *cf; 1657 struct dpaa_sec_op_ctx *ctx; 1658 struct qm_sg_entry *sg; 1659 rte_iova_t src_start_addr, dst_start_addr; 1660 uint32_t length = 0; 1661 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1662 ses->iv.offset); 1663 1664 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 1665 if (sym->m_dst) 1666 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 1667 else 1668 dst_start_addr = src_start_addr; 1669 1670 ctx = dpaa_sec_alloc_ctx(ses, 7); 1671 if (!ctx) 1672 return NULL; 1673 1674 cf = &ctx->job; 1675 ctx->op = op; 1676 1677 /* input */ 1678 rte_prefetch0(cf->sg); 1679 sg = &cf->sg[2]; 1680 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg)); 1681 if (is_encode(ses)) { 1682 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1683 sg->length = ses->iv.length; 1684 length += sg->length; 1685 cpu_to_hw_sg(sg); 1686 1687 sg++; 1688 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1689 sg->length = sym->auth.data.length; 1690 length += sg->length; 1691 sg->final = 1; 1692 cpu_to_hw_sg(sg); 1693 } else { 1694 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr)); 1695 sg->length = ses->iv.length; 1696 length += sg->length; 1697 cpu_to_hw_sg(sg); 1698 1699 sg++; 1700 1701 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 1702 sg->length = sym->auth.data.length; 1703 length += sg->length; 1704 cpu_to_hw_sg(sg); 1705 1706 memcpy(ctx->digest, sym->auth.digest.data, 1707 ses->digest_length); 1708 sg++; 1709 1710 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest)); 1711 sg->length = ses->digest_length; 1712 length += sg->length; 1713 sg->final = 1; 1714 cpu_to_hw_sg(sg); 1715 } 1716 /* input compound frame */ 1717 cf->sg[1].length = length; 1718 cf->sg[1].extension = 1; 1719 cf->sg[1].final = 1; 1720 cpu_to_hw_sg(&cf->sg[1]); 1721 1722 /* output */ 1723 sg++; 1724 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg)); 1725 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 1726 sg->length = sym->cipher.data.length; 1727 length = sg->length; 1728 if (is_encode(ses)) { 1729 cpu_to_hw_sg(sg); 1730 /* set auth output */ 1731 sg++; 1732 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 1733 sg->length = ses->digest_length; 1734 length += sg->length; 1735 } 1736 sg->final = 1; 1737 cpu_to_hw_sg(sg); 1738 1739 /* output compound frame */ 1740 cf->sg[0].length = length; 1741 cf->sg[0].extension = 1; 1742 cpu_to_hw_sg(&cf->sg[0]); 1743 1744 return cf; 1745 } 1746 1747 static inline struct dpaa_sec_job * 1748 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 1749 { 1750 struct rte_crypto_sym_op *sym = op->sym; 1751 struct dpaa_sec_job *cf; 1752 struct dpaa_sec_op_ctx *ctx; 1753 struct qm_sg_entry *sg; 1754 phys_addr_t src_start_addr, dst_start_addr; 1755 1756 ctx = dpaa_sec_alloc_ctx(ses, 2); 1757 if (!ctx) 1758 return NULL; 1759 cf = &ctx->job; 1760 ctx->op = op; 1761 1762 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1763 1764 if (sym->m_dst) 1765 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1766 else 1767 dst_start_addr = src_start_addr; 1768 1769 /* input */ 1770 sg = &cf->sg[1]; 1771 qm_sg_entry_set64(sg, src_start_addr); 1772 sg->length = sym->m_src->pkt_len; 1773 sg->final = 1; 1774 cpu_to_hw_sg(sg); 1775 1776 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1777 /* output */ 1778 sg = &cf->sg[0]; 1779 qm_sg_entry_set64(sg, dst_start_addr); 1780 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 1781 cpu_to_hw_sg(sg); 1782 1783 return cf; 1784 } 1785 1786 static inline struct dpaa_sec_job * 1787 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) 1788 { 1789 struct rte_crypto_sym_op *sym = op->sym; 1790 struct dpaa_sec_job *cf; 1791 struct dpaa_sec_op_ctx *ctx; 1792 struct qm_sg_entry *sg, *out_sg, *in_sg; 1793 struct rte_mbuf *mbuf; 1794 uint8_t req_segs; 1795 uint32_t in_len = 0, out_len = 0; 1796 1797 if (sym->m_dst) 1798 mbuf = sym->m_dst; 1799 else 1800 mbuf = sym->m_src; 1801 1802 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 1803 if (mbuf->nb_segs > MAX_SG_ENTRIES) { 1804 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d", 1805 MAX_SG_ENTRIES); 1806 return NULL; 1807 } 1808 1809 ctx = dpaa_sec_alloc_ctx(ses, req_segs); 1810 if (!ctx) 1811 return NULL; 1812 cf = &ctx->job; 1813 ctx->op = op; 1814 /* output */ 1815 out_sg = &cf->sg[0]; 1816 out_sg->extension = 1; 1817 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2])); 1818 1819 /* 1st seg */ 1820 sg = &cf->sg[2]; 1821 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1822 sg->offset = 0; 1823 1824 /* Successive segs */ 1825 while (mbuf->next) { 1826 sg->length = mbuf->data_len; 1827 out_len += sg->length; 1828 mbuf = mbuf->next; 1829 cpu_to_hw_sg(sg); 1830 sg++; 1831 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1832 sg->offset = 0; 1833 } 1834 sg->length = mbuf->buf_len - mbuf->data_off; 1835 out_len += sg->length; 1836 sg->final = 1; 1837 cpu_to_hw_sg(sg); 1838 1839 out_sg->length = out_len; 1840 cpu_to_hw_sg(out_sg); 1841 1842 /* input */ 1843 mbuf = sym->m_src; 1844 in_sg = &cf->sg[1]; 1845 in_sg->extension = 1; 1846 in_sg->final = 1; 1847 in_len = mbuf->data_len; 1848 1849 sg++; 1850 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg)); 1851 1852 /* 1st seg */ 1853 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1854 sg->length = mbuf->data_len; 1855 sg->offset = 0; 1856 1857 /* Successive segs */ 1858 mbuf = mbuf->next; 1859 while (mbuf) { 1860 cpu_to_hw_sg(sg); 1861 sg++; 1862 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf)); 1863 sg->length = mbuf->data_len; 1864 sg->offset = 0; 1865 in_len += sg->length; 1866 mbuf = mbuf->next; 1867 } 1868 sg->final = 1; 1869 cpu_to_hw_sg(sg); 1870 1871 in_sg->length = in_len; 1872 cpu_to_hw_sg(in_sg); 1873 1874 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1875 1876 return cf; 1877 } 1878 1879 static uint16_t 1880 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1881 uint16_t nb_ops) 1882 { 1883 /* Function to transmit the frames to given device and queuepair */ 1884 uint32_t loop; 1885 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1886 uint16_t num_tx = 0; 1887 struct qm_fd fds[DPAA_SEC_BURST], *fd; 1888 uint32_t frames_to_send; 1889 struct rte_crypto_op *op; 1890 struct dpaa_sec_job *cf; 1891 dpaa_sec_session *ses; 1892 uint16_t auth_hdr_len, auth_tail_len; 1893 uint32_t index, flags[DPAA_SEC_BURST] = {0}; 1894 struct qman_fq *inq[DPAA_SEC_BURST]; 1895 1896 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1897 if (rte_dpaa_portal_init((void *)0)) { 1898 DPAA_SEC_ERR("Failure in affining portal"); 1899 return 0; 1900 } 1901 } 1902 1903 while (nb_ops) { 1904 frames_to_send = (nb_ops > DPAA_SEC_BURST) ? 1905 DPAA_SEC_BURST : nb_ops; 1906 for (loop = 0; loop < frames_to_send; loop++) { 1907 op = *(ops++); 1908 if (*dpaa_seqn(op->sym->m_src) != 0) { 1909 index = *dpaa_seqn(op->sym->m_src) - 1; 1910 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1911 /* QM_EQCR_DCA_IDXMASK = 0x0f */ 1912 flags[loop] = ((index & 0x0f) << 8); 1913 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1914 DPAA_PER_LCORE_DQRR_SIZE--; 1915 DPAA_PER_LCORE_DQRR_HELD &= 1916 ~(1 << index); 1917 } 1918 } 1919 1920 switch (op->sess_type) { 1921 case RTE_CRYPTO_OP_WITH_SESSION: 1922 ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1923 break; 1924 case RTE_CRYPTO_OP_SECURITY_SESSION: 1925 ses = SECURITY_GET_SESS_PRIV(op->sym->session); 1926 break; 1927 default: 1928 DPAA_SEC_DP_ERR( 1929 "sessionless crypto op not supported"); 1930 frames_to_send = loop; 1931 nb_ops = loop; 1932 goto send_pkts; 1933 } 1934 1935 if (!ses) { 1936 DPAA_SEC_DP_ERR("session not available"); 1937 frames_to_send = loop; 1938 nb_ops = loop; 1939 goto send_pkts; 1940 } 1941 1942 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { 1943 if (dpaa_sec_attach_sess_q(qp, ses)) { 1944 frames_to_send = loop; 1945 nb_ops = loop; 1946 goto send_pkts; 1947 } 1948 } else if (unlikely(ses->qp[rte_lcore_id() % 1949 MAX_DPAA_CORES] != qp)) { 1950 DPAA_SEC_DP_ERR("Old:sess->qp = %p" 1951 " New qp = %p", 1952 ses->qp[rte_lcore_id() % 1953 MAX_DPAA_CORES], qp); 1954 frames_to_send = loop; 1955 nb_ops = loop; 1956 goto send_pkts; 1957 } 1958 1959 auth_hdr_len = op->sym->auth.data.length - 1960 op->sym->cipher.data.length; 1961 auth_tail_len = 0; 1962 1963 if (rte_pktmbuf_is_contiguous(op->sym->m_src) && 1964 ((op->sym->m_dst == NULL) || 1965 rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1966 switch (ses->ctxt) { 1967 case DPAA_SEC_PDCP: 1968 case DPAA_SEC_IPSEC: 1969 cf = build_proto(op, ses); 1970 break; 1971 case DPAA_SEC_AUTH: 1972 cf = build_auth_only(op, ses); 1973 break; 1974 case DPAA_SEC_CIPHER: 1975 cf = build_cipher_only(op, ses); 1976 break; 1977 case DPAA_SEC_AEAD: 1978 cf = build_cipher_auth_gcm(op, ses); 1979 auth_hdr_len = ses->auth_only_len; 1980 break; 1981 case DPAA_SEC_CIPHER_HASH: 1982 auth_hdr_len = 1983 op->sym->cipher.data.offset 1984 - op->sym->auth.data.offset; 1985 auth_tail_len = 1986 op->sym->auth.data.length 1987 - op->sym->cipher.data.length 1988 - auth_hdr_len; 1989 cf = build_cipher_auth(op, ses); 1990 break; 1991 default: 1992 DPAA_SEC_DP_ERR("not supported ops"); 1993 frames_to_send = loop; 1994 nb_ops = loop; 1995 goto send_pkts; 1996 } 1997 } else { 1998 switch (ses->ctxt) { 1999 case DPAA_SEC_PDCP: 2000 case DPAA_SEC_IPSEC: 2001 cf = build_proto_sg(op, ses); 2002 break; 2003 case DPAA_SEC_AUTH: 2004 cf = build_auth_only_sg(op, ses); 2005 break; 2006 case DPAA_SEC_CIPHER: 2007 cf = build_cipher_only_sg(op, ses); 2008 break; 2009 case DPAA_SEC_AEAD: 2010 cf = build_cipher_auth_gcm_sg(op, ses); 2011 auth_hdr_len = ses->auth_only_len; 2012 break; 2013 case DPAA_SEC_CIPHER_HASH: 2014 auth_hdr_len = 2015 op->sym->cipher.data.offset 2016 - op->sym->auth.data.offset; 2017 auth_tail_len = 2018 op->sym->auth.data.length 2019 - op->sym->cipher.data.length 2020 - auth_hdr_len; 2021 cf = build_cipher_auth_sg(op, ses); 2022 break; 2023 default: 2024 DPAA_SEC_DP_ERR("not supported ops"); 2025 frames_to_send = loop; 2026 nb_ops = loop; 2027 goto send_pkts; 2028 } 2029 } 2030 if (unlikely(!cf)) { 2031 frames_to_send = loop; 2032 nb_ops = loop; 2033 goto send_pkts; 2034 } 2035 2036 fd = &fds[loop]; 2037 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; 2038 fd->opaque_addr = 0; 2039 fd->cmd = 0; 2040 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg)); 2041 fd->_format1 = qm_fd_compound; 2042 fd->length29 = 2 * sizeof(struct qm_sg_entry); 2043 2044 /* Auth_only_len is set as 0 in descriptor and it is 2045 * overwritten here in the fd.cmd which will update 2046 * the DPOVRD reg. 2047 */ 2048 if (auth_hdr_len || auth_tail_len) { 2049 fd->cmd = 0x80000000; 2050 fd->cmd |= 2051 ((auth_tail_len << 16) | auth_hdr_len); 2052 } 2053 2054 /* In case of PDCP, per packet HFN is stored in 2055 * mbuf priv after sym_op. 2056 */ 2057 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) { 2058 fd->cmd = 0x80000000 | 2059 *((uint32_t *)((uint8_t *)op + 2060 ses->pdcp.hfn_ovd_offset)); 2061 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u", 2062 *((uint32_t *)((uint8_t *)op + 2063 ses->pdcp.hfn_ovd_offset)), 2064 ses->pdcp.hfn_ovd); 2065 } 2066 } 2067 send_pkts: 2068 loop = 0; 2069 while (loop < frames_to_send) { 2070 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop], 2071 &flags[loop], frames_to_send - loop); 2072 } 2073 nb_ops -= frames_to_send; 2074 num_tx += frames_to_send; 2075 } 2076 2077 dpaa_qp->tx_pkts += num_tx; 2078 dpaa_qp->tx_errs += nb_ops - num_tx; 2079 2080 return num_tx; 2081 } 2082 2083 static uint16_t 2084 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 2085 uint16_t nb_ops) 2086 { 2087 uint16_t num_rx; 2088 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 2089 2090 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2091 if (rte_dpaa_portal_init((void *)0)) { 2092 DPAA_SEC_ERR("Failure in affining portal"); 2093 return 0; 2094 } 2095 } 2096 2097 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 2098 2099 dpaa_qp->rx_pkts += num_rx; 2100 dpaa_qp->rx_errs += nb_ops - num_rx; 2101 2102 DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx); 2103 2104 return num_rx; 2105 } 2106 2107 /** Release queue pair */ 2108 static int 2109 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 2110 uint16_t qp_id) 2111 { 2112 struct dpaa_sec_dev_private *internals; 2113 struct dpaa_sec_qp *qp = NULL; 2114 2115 PMD_INIT_FUNC_TRACE(); 2116 2117 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); 2118 2119 internals = dev->data->dev_private; 2120 if (qp_id >= internals->max_nb_queue_pairs) { 2121 DPAA_SEC_ERR("Max supported qpid %d", 2122 internals->max_nb_queue_pairs); 2123 return -EINVAL; 2124 } 2125 2126 qp = &internals->qps[qp_id]; 2127 rte_mempool_free(qp->ctx_pool); 2128 qp->internals = NULL; 2129 dev->data->queue_pairs[qp_id] = NULL; 2130 2131 return 0; 2132 } 2133 2134 /** Setup a queue pair */ 2135 static int 2136 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 2137 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 2138 __rte_unused int socket_id) 2139 { 2140 struct dpaa_sec_dev_private *internals; 2141 struct dpaa_sec_qp *qp = NULL; 2142 char str[20]; 2143 2144 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 2145 2146 internals = dev->data->dev_private; 2147 if (qp_id >= internals->max_nb_queue_pairs) { 2148 DPAA_SEC_ERR("Max supported qpid %d", 2149 internals->max_nb_queue_pairs); 2150 return -EINVAL; 2151 } 2152 2153 qp = &internals->qps[qp_id]; 2154 qp->internals = internals; 2155 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d", 2156 dev->data->dev_id, qp_id); 2157 if (!qp->ctx_pool) { 2158 qp->ctx_pool = rte_mempool_create((const char *)str, 2159 CTX_POOL_NUM_BUFS, 2160 CTX_POOL_BUF_SIZE, 2161 CTX_POOL_CACHE_SIZE, 0, 2162 NULL, NULL, NULL, NULL, 2163 SOCKET_ID_ANY, 0); 2164 if (!qp->ctx_pool) { 2165 DPAA_SEC_ERR("%s create failed", str); 2166 return -ENOMEM; 2167 } 2168 } else 2169 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d", 2170 dev->data->dev_id, qp_id); 2171 dev->data->queue_pairs[qp_id] = qp; 2172 2173 return 0; 2174 } 2175 2176 /** Returns the size of session structure */ 2177 static unsigned int 2178 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2179 { 2180 PMD_INIT_FUNC_TRACE(); 2181 2182 return sizeof(dpaa_sec_session); 2183 } 2184 2185 static int 2186 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 2187 struct rte_crypto_sym_xform *xform, 2188 dpaa_sec_session *session) 2189 { 2190 session->ctxt = DPAA_SEC_CIPHER; 2191 session->cipher_alg = xform->cipher.algo; 2192 session->iv.length = xform->cipher.iv.length; 2193 session->iv.offset = xform->cipher.iv.offset; 2194 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2195 RTE_CACHE_LINE_SIZE); 2196 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2197 DPAA_SEC_ERR("No Memory for cipher key"); 2198 return -ENOMEM; 2199 } 2200 session->cipher_key.length = xform->cipher.key.length; 2201 2202 memcpy(session->cipher_key.data, xform->cipher.key.data, 2203 xform->cipher.key.length); 2204 switch (xform->cipher.algo) { 2205 case RTE_CRYPTO_CIPHER_AES_CBC: 2206 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2207 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2208 break; 2209 case RTE_CRYPTO_CIPHER_DES_CBC: 2210 session->cipher_key.alg = OP_ALG_ALGSEL_DES; 2211 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2212 break; 2213 case RTE_CRYPTO_CIPHER_3DES_CBC: 2214 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2215 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2216 break; 2217 case RTE_CRYPTO_CIPHER_AES_CTR: 2218 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2219 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2220 break; 2221 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2222 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8; 2223 break; 2224 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2225 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE; 2226 break; 2227 default: 2228 DPAA_SEC_ERR("Crypto: Unsupported Cipher specified %s (%u)", 2229 rte_cryptodev_get_cipher_algo_string(xform->cipher.algo), 2230 xform->cipher.algo); 2231 return -ENOTSUP; 2232 } 2233 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2234 DIR_ENC : DIR_DEC; 2235 2236 return 0; 2237 } 2238 2239 static int 2240 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 2241 struct rte_crypto_sym_xform *xform, 2242 dpaa_sec_session *session) 2243 { 2244 session->ctxt = DPAA_SEC_AUTH; 2245 session->auth_alg = xform->auth.algo; 2246 session->auth_key.length = xform->auth.key.length; 2247 if (xform->auth.key.length) { 2248 session->auth_key.data = 2249 rte_zmalloc(NULL, xform->auth.key.length, 2250 RTE_CACHE_LINE_SIZE); 2251 if (session->auth_key.data == NULL) { 2252 DPAA_SEC_ERR("No Memory for auth key"); 2253 return -ENOMEM; 2254 } 2255 memcpy(session->auth_key.data, xform->auth.key.data, 2256 xform->auth.key.length); 2257 2258 } 2259 session->digest_length = xform->auth.digest_length; 2260 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) { 2261 session->iv.offset = xform->auth.iv.offset; 2262 session->iv.length = xform->auth.iv.length; 2263 } 2264 2265 switch (xform->auth.algo) { 2266 case RTE_CRYPTO_AUTH_SHA1: 2267 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2268 session->auth_key.algmode = OP_ALG_AAI_HASH; 2269 break; 2270 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2271 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2272 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2273 break; 2274 case RTE_CRYPTO_AUTH_MD5: 2275 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2276 session->auth_key.algmode = OP_ALG_AAI_HASH; 2277 break; 2278 case RTE_CRYPTO_AUTH_MD5_HMAC: 2279 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2280 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2281 break; 2282 case RTE_CRYPTO_AUTH_SHA224: 2283 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2284 session->auth_key.algmode = OP_ALG_AAI_HASH; 2285 break; 2286 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2287 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2288 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2289 break; 2290 case RTE_CRYPTO_AUTH_SHA256: 2291 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2292 session->auth_key.algmode = OP_ALG_AAI_HASH; 2293 break; 2294 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2295 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2296 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2297 break; 2298 case RTE_CRYPTO_AUTH_SHA384: 2299 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2300 session->auth_key.algmode = OP_ALG_AAI_HASH; 2301 break; 2302 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2303 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2304 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2305 break; 2306 case RTE_CRYPTO_AUTH_SHA512: 2307 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2308 session->auth_key.algmode = OP_ALG_AAI_HASH; 2309 break; 2310 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2311 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2312 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2313 break; 2314 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2315 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9; 2316 session->auth_key.algmode = OP_ALG_AAI_F9; 2317 break; 2318 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2319 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA; 2320 session->auth_key.algmode = OP_ALG_AAI_F9; 2321 break; 2322 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2323 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2324 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2325 break; 2326 case RTE_CRYPTO_AUTH_AES_CMAC: 2327 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2328 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2329 break; 2330 default: 2331 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %s (%u)", 2332 rte_cryptodev_get_auth_algo_string(xform->auth.algo), 2333 xform->auth.algo); 2334 return -ENOTSUP; 2335 } 2336 2337 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2338 DIR_ENC : DIR_DEC; 2339 2340 return 0; 2341 } 2342 2343 static int 2344 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, 2345 struct rte_crypto_sym_xform *xform, 2346 dpaa_sec_session *session) 2347 { 2348 2349 struct rte_crypto_cipher_xform *cipher_xform; 2350 struct rte_crypto_auth_xform *auth_xform; 2351 2352 session->ctxt = DPAA_SEC_CIPHER_HASH; 2353 if (session->auth_cipher_text) { 2354 cipher_xform = &xform->cipher; 2355 auth_xform = &xform->next->auth; 2356 } else { 2357 cipher_xform = &xform->next->cipher; 2358 auth_xform = &xform->auth; 2359 } 2360 2361 /* Set IV parameters */ 2362 session->iv.offset = cipher_xform->iv.offset; 2363 session->iv.length = cipher_xform->iv.length; 2364 2365 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2366 RTE_CACHE_LINE_SIZE); 2367 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2368 DPAA_SEC_ERR("No Memory for cipher key"); 2369 return -ENOMEM; 2370 } 2371 session->cipher_key.length = cipher_xform->key.length; 2372 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2373 RTE_CACHE_LINE_SIZE); 2374 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2375 DPAA_SEC_ERR("No Memory for auth key"); 2376 return -ENOMEM; 2377 } 2378 session->auth_key.length = auth_xform->key.length; 2379 memcpy(session->cipher_key.data, cipher_xform->key.data, 2380 cipher_xform->key.length); 2381 memcpy(session->auth_key.data, auth_xform->key.data, 2382 auth_xform->key.length); 2383 2384 session->digest_length = auth_xform->digest_length; 2385 session->auth_alg = auth_xform->algo; 2386 2387 switch (auth_xform->algo) { 2388 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2389 session->auth_key.alg = OP_ALG_ALGSEL_SHA1; 2390 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2391 break; 2392 case RTE_CRYPTO_AUTH_MD5_HMAC: 2393 session->auth_key.alg = OP_ALG_ALGSEL_MD5; 2394 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2395 break; 2396 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2397 session->auth_key.alg = OP_ALG_ALGSEL_SHA224; 2398 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2399 break; 2400 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2401 session->auth_key.alg = OP_ALG_ALGSEL_SHA256; 2402 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2403 break; 2404 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2405 session->auth_key.alg = OP_ALG_ALGSEL_SHA384; 2406 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2407 break; 2408 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2409 session->auth_key.alg = OP_ALG_ALGSEL_SHA512; 2410 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2411 break; 2412 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2413 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2414 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2415 break; 2416 case RTE_CRYPTO_AUTH_AES_CMAC: 2417 session->auth_key.alg = OP_ALG_ALGSEL_AES; 2418 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2419 break; 2420 default: 2421 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %s (%u)", 2422 rte_cryptodev_get_auth_algo_string(auth_xform->algo), 2423 auth_xform->algo); 2424 return -ENOTSUP; 2425 } 2426 2427 session->cipher_alg = cipher_xform->algo; 2428 2429 switch (cipher_xform->algo) { 2430 case RTE_CRYPTO_CIPHER_AES_CBC: 2431 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2432 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2433 break; 2434 case RTE_CRYPTO_CIPHER_DES_CBC: 2435 session->cipher_key.alg = OP_ALG_ALGSEL_DES; 2436 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2437 break; 2438 case RTE_CRYPTO_CIPHER_3DES_CBC: 2439 session->cipher_key.alg = OP_ALG_ALGSEL_3DES; 2440 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2441 break; 2442 case RTE_CRYPTO_CIPHER_AES_CTR: 2443 session->cipher_key.alg = OP_ALG_ALGSEL_AES; 2444 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2445 break; 2446 default: 2447 2448 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)", 2449 rte_cryptodev_get_cipher_algo_string(cipher_xform->algo), 2450 cipher_xform->algo); 2451 return -ENOTSUP; 2452 } 2453 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2454 DIR_ENC : DIR_DEC; 2455 return 0; 2456 } 2457 2458 static int 2459 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 2460 struct rte_crypto_sym_xform *xform, 2461 dpaa_sec_session *session) 2462 { 2463 session->aead_alg = xform->aead.algo; 2464 session->ctxt = DPAA_SEC_AEAD; 2465 session->iv.length = xform->aead.iv.length; 2466 session->iv.offset = xform->aead.iv.offset; 2467 session->auth_only_len = xform->aead.aad_length; 2468 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 2469 RTE_CACHE_LINE_SIZE); 2470 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 2471 DPAA_SEC_ERR("No Memory for aead key"); 2472 return -ENOMEM; 2473 } 2474 session->aead_key.length = xform->aead.key.length; 2475 session->digest_length = xform->aead.digest_length; 2476 2477 memcpy(session->aead_key.data, xform->aead.key.data, 2478 xform->aead.key.length); 2479 2480 switch (session->aead_alg) { 2481 case RTE_CRYPTO_AEAD_AES_GCM: 2482 session->aead_key.alg = OP_ALG_ALGSEL_AES; 2483 session->aead_key.algmode = OP_ALG_AAI_GCM; 2484 break; 2485 default: 2486 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); 2487 return -ENOTSUP; 2488 } 2489 2490 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2491 DIR_ENC : DIR_DEC; 2492 2493 return 0; 2494 } 2495 2496 static struct qman_fq * 2497 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 2498 { 2499 unsigned int i; 2500 2501 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2502 if (qi->inq_attach[i] == 0) { 2503 qi->inq_attach[i] = 1; 2504 return &qi->inq[i]; 2505 } 2506 } 2507 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); 2508 2509 return NULL; 2510 } 2511 2512 static int 2513 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 2514 { 2515 unsigned int i; 2516 int ret; 2517 2518 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 2519 if (&qi->inq[i] == fq) { 2520 ret = qman_retire_fq(fq, NULL); 2521 if (ret != 0) 2522 DPAA_SEC_ERR("Queue %d is not retired err: %d", 2523 fq->fqid, ret); 2524 qman_oos_fq(fq); 2525 qi->inq_attach[i] = 0; 2526 return 0; 2527 } 2528 } 2529 return -1; 2530 } 2531 2532 int 2533 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 2534 { 2535 int ret; 2536 2537 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; 2538 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 2539 ret = rte_dpaa_portal_init((void *)0); 2540 if (ret) { 2541 DPAA_SEC_ERR("Failure in affining portal"); 2542 return ret; 2543 } 2544 } 2545 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], 2546 rte_dpaa_mem_vtop(&sess->cdb), 2547 qman_fq_fqid(&qp->outq)); 2548 if (ret) 2549 DPAA_SEC_ERR("Unable to init sec queue"); 2550 2551 return ret; 2552 } 2553 2554 static inline void 2555 free_session_data(dpaa_sec_session *s) 2556 { 2557 if (is_aead(s)) 2558 rte_free(s->aead_key.data); 2559 else { 2560 rte_free(s->auth_key.data); 2561 rte_free(s->cipher_key.data); 2562 } 2563 memset(s, 0, sizeof(dpaa_sec_session)); 2564 } 2565 2566 static int 2567 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 2568 struct rte_crypto_sym_xform *xform, void *sess) 2569 { 2570 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2571 dpaa_sec_session *session = sess; 2572 uint32_t i; 2573 int ret; 2574 2575 PMD_INIT_FUNC_TRACE(); 2576 2577 if (unlikely(sess == NULL)) { 2578 DPAA_SEC_ERR("invalid session struct"); 2579 return -EINVAL; 2580 } 2581 memset(session, 0, sizeof(dpaa_sec_session)); 2582 2583 /* Default IV length = 0 */ 2584 session->iv.length = 0; 2585 2586 /* Cipher Only */ 2587 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2588 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2589 ret = dpaa_sec_cipher_init(dev, xform, session); 2590 2591 /* Authentication Only */ 2592 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2593 xform->next == NULL) { 2594 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2595 session->ctxt = DPAA_SEC_AUTH; 2596 ret = dpaa_sec_auth_init(dev, xform, session); 2597 2598 /* Cipher then Authenticate */ 2599 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2600 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2601 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 2602 session->auth_cipher_text = 1; 2603 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2604 ret = dpaa_sec_auth_init(dev, xform, session); 2605 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2606 ret = dpaa_sec_cipher_init(dev, xform, session); 2607 else 2608 ret = dpaa_sec_chain_init(dev, xform, session); 2609 } else { 2610 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2611 return -ENOTSUP; 2612 } 2613 /* Authenticate then Cipher */ 2614 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2615 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2616 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 2617 session->auth_cipher_text = 0; 2618 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2619 ret = dpaa_sec_cipher_init(dev, xform, session); 2620 else if (xform->next->cipher.algo 2621 == RTE_CRYPTO_CIPHER_NULL) 2622 ret = dpaa_sec_auth_init(dev, xform, session); 2623 else 2624 ret = dpaa_sec_chain_init(dev, xform, session); 2625 } else { 2626 DPAA_SEC_ERR("Not supported: Auth then Cipher"); 2627 return -ENOTSUP; 2628 } 2629 2630 /* AEAD operation for AES-GCM kind of Algorithms */ 2631 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2632 xform->next == NULL) { 2633 ret = dpaa_sec_aead_init(dev, xform, session); 2634 2635 } else { 2636 DPAA_SEC_ERR("Invalid crypto type"); 2637 return -EINVAL; 2638 } 2639 if (ret) { 2640 DPAA_SEC_ERR("unable to init session"); 2641 goto err1; 2642 } 2643 2644 rte_spinlock_lock(&internals->lock); 2645 for (i = 0; i < MAX_DPAA_CORES; i++) { 2646 session->inq[i] = dpaa_sec_attach_rxq(internals); 2647 if (session->inq[i] == NULL) { 2648 DPAA_SEC_ERR("unable to attach sec queue"); 2649 rte_spinlock_unlock(&internals->lock); 2650 ret = -EBUSY; 2651 goto err1; 2652 } 2653 } 2654 rte_spinlock_unlock(&internals->lock); 2655 2656 return 0; 2657 2658 err1: 2659 free_session_data(session); 2660 return ret; 2661 } 2662 2663 static int 2664 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, 2665 struct rte_crypto_sym_xform *xform, 2666 struct rte_cryptodev_sym_session *sess) 2667 { 2668 void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2669 int ret; 2670 2671 PMD_INIT_FUNC_TRACE(); 2672 2673 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 2674 if (ret != 0) { 2675 DPAA_SEC_ERR("failed to configure session parameters"); 2676 return ret; 2677 } 2678 2679 ret = dpaa_sec_prep_cdb(sess_private_data); 2680 if (ret) { 2681 DPAA_SEC_ERR("Unable to prepare sec cdb"); 2682 return ret; 2683 } 2684 2685 return 0; 2686 } 2687 2688 static inline void 2689 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s) 2690 { 2691 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 2692 uint8_t i; 2693 2694 for (i = 0; i < MAX_DPAA_CORES; i++) { 2695 if (s->inq[i]) 2696 dpaa_sec_detach_rxq(qi, s->inq[i]); 2697 s->inq[i] = NULL; 2698 s->qp[i] = NULL; 2699 } 2700 free_session_data(s); 2701 } 2702 2703 /** Clear the memory of session so it doesn't leave key material behind */ 2704 static void 2705 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, 2706 struct rte_cryptodev_sym_session *sess) 2707 { 2708 PMD_INIT_FUNC_TRACE(); 2709 void *sess_priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 2710 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 2711 2712 free_session_memory(dev, s); 2713 } 2714 2715 static int 2716 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2717 struct rte_security_ipsec_xform *ipsec_xform, 2718 dpaa_sec_session *session) 2719 { 2720 PMD_INIT_FUNC_TRACE(); 2721 2722 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2723 RTE_CACHE_LINE_SIZE); 2724 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2725 DPAA_SEC_ERR("No Memory for aead key"); 2726 return -ENOMEM; 2727 } 2728 memcpy(session->aead_key.data, aead_xform->key.data, 2729 aead_xform->key.length); 2730 2731 session->digest_length = aead_xform->digest_length; 2732 session->aead_key.length = aead_xform->key.length; 2733 2734 switch (aead_xform->algo) { 2735 case RTE_CRYPTO_AEAD_AES_GCM: 2736 switch (session->digest_length) { 2737 case 8: 2738 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8; 2739 break; 2740 case 12: 2741 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12; 2742 break; 2743 case 16: 2744 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16; 2745 break; 2746 default: 2747 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", 2748 session->digest_length); 2749 return -EINVAL; 2750 } 2751 if (session->dir == DIR_ENC) { 2752 memcpy(session->encap_pdb.gcm.salt, 2753 (uint8_t *)&(ipsec_xform->salt), 4); 2754 } else { 2755 memcpy(session->decap_pdb.gcm.salt, 2756 (uint8_t *)&(ipsec_xform->salt), 4); 2757 } 2758 session->aead_key.algmode = OP_ALG_AAI_GCM; 2759 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2760 break; 2761 default: 2762 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", 2763 aead_xform->algo); 2764 return -ENOTSUP; 2765 } 2766 return 0; 2767 } 2768 2769 static int 2770 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2771 struct rte_crypto_auth_xform *auth_xform, 2772 struct rte_security_ipsec_xform *ipsec_xform, 2773 dpaa_sec_session *session) 2774 { 2775 if (cipher_xform) { 2776 session->cipher_key.data = rte_zmalloc(NULL, 2777 cipher_xform->key.length, 2778 RTE_CACHE_LINE_SIZE); 2779 if (session->cipher_key.data == NULL && 2780 cipher_xform->key.length > 0) { 2781 DPAA_SEC_ERR("No Memory for cipher key"); 2782 return -ENOMEM; 2783 } 2784 2785 session->cipher_key.length = cipher_xform->key.length; 2786 memcpy(session->cipher_key.data, cipher_xform->key.data, 2787 cipher_xform->key.length); 2788 session->cipher_alg = cipher_xform->algo; 2789 } else { 2790 session->cipher_key.data = NULL; 2791 session->cipher_key.length = 0; 2792 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2793 } 2794 2795 if (auth_xform) { 2796 session->auth_key.data = rte_zmalloc(NULL, 2797 auth_xform->key.length, 2798 RTE_CACHE_LINE_SIZE); 2799 if (session->auth_key.data == NULL && 2800 auth_xform->key.length > 0) { 2801 DPAA_SEC_ERR("No Memory for auth key"); 2802 return -ENOMEM; 2803 } 2804 session->auth_key.length = auth_xform->key.length; 2805 memcpy(session->auth_key.data, auth_xform->key.data, 2806 auth_xform->key.length); 2807 session->auth_alg = auth_xform->algo; 2808 session->digest_length = auth_xform->digest_length; 2809 } else { 2810 session->auth_key.data = NULL; 2811 session->auth_key.length = 0; 2812 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2813 } 2814 2815 switch (session->auth_alg) { 2816 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2817 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96; 2818 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2819 break; 2820 case RTE_CRYPTO_AUTH_MD5_HMAC: 2821 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96; 2822 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2823 break; 2824 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2825 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2826 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2827 if (session->digest_length != 16) 2828 DPAA_SEC_WARN( 2829 "+++Using sha256-hmac truncated len is non-standard," 2830 "it will not work with lookaside proto"); 2831 break; 2832 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2833 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2834 if (session->digest_length == 6) 2835 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_96; 2836 else if (session->digest_length == 14) 2837 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_224; 2838 else 2839 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_224_112; 2840 break; 2841 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2842 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2843 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2844 break; 2845 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2846 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2847 session->auth_key.algmode = OP_ALG_AAI_HMAC; 2848 break; 2849 case RTE_CRYPTO_AUTH_AES_CMAC: 2850 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96; 2851 session->auth_key.algmode = OP_ALG_AAI_CMAC; 2852 break; 2853 case RTE_CRYPTO_AUTH_NULL: 2854 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL; 2855 break; 2856 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2857 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96; 2858 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC; 2859 break; 2860 default: 2861 DPAA_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 2862 rte_cryptodev_get_auth_algo_string(session->auth_alg), 2863 session->auth_alg); 2864 return -ENOTSUP; 2865 } 2866 2867 switch (session->cipher_alg) { 2868 case RTE_CRYPTO_CIPHER_AES_CBC: 2869 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC; 2870 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2871 break; 2872 case RTE_CRYPTO_CIPHER_DES_CBC: 2873 session->cipher_key.alg = OP_PCL_IPSEC_DES; 2874 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2875 break; 2876 case RTE_CRYPTO_CIPHER_3DES_CBC: 2877 session->cipher_key.alg = OP_PCL_IPSEC_3DES; 2878 session->cipher_key.algmode = OP_ALG_AAI_CBC; 2879 break; 2880 case RTE_CRYPTO_CIPHER_AES_CTR: 2881 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR; 2882 session->cipher_key.algmode = OP_ALG_AAI_CTR; 2883 if (session->dir == DIR_ENC) { 2884 session->encap_pdb.ctr.ctr_initial = 0x00000001; 2885 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2886 } else { 2887 session->decap_pdb.ctr.ctr_initial = 0x00000001; 2888 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2889 } 2890 break; 2891 case RTE_CRYPTO_CIPHER_NULL: 2892 session->cipher_key.alg = OP_PCL_IPSEC_NULL; 2893 break; 2894 default: 2895 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 2896 rte_cryptodev_get_cipher_algo_string(session->cipher_alg), 2897 session->cipher_alg); 2898 return -ENOTSUP; 2899 } 2900 2901 return 0; 2902 } 2903 2904 static int 2905 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 2906 struct rte_security_session_conf *conf, 2907 void *sess) 2908 { 2909 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 2910 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2911 struct rte_crypto_auth_xform *auth_xform = NULL; 2912 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2913 struct rte_crypto_aead_xform *aead_xform = NULL; 2914 dpaa_sec_session *session = (dpaa_sec_session *)sess; 2915 uint32_t i; 2916 int ret; 2917 2918 PMD_INIT_FUNC_TRACE(); 2919 2920 memset(session, 0, sizeof(dpaa_sec_session)); 2921 session->proto_alg = conf->protocol; 2922 session->ctxt = DPAA_SEC_IPSEC; 2923 2924 if (ipsec_xform->life.bytes_hard_limit != 0 || 2925 ipsec_xform->life.bytes_soft_limit != 0 || 2926 ipsec_xform->life.packets_hard_limit != 0 || 2927 ipsec_xform->life.packets_soft_limit != 0) 2928 return -ENOTSUP; 2929 2930 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 2931 session->dir = DIR_ENC; 2932 else 2933 session->dir = DIR_DEC; 2934 2935 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2936 cipher_xform = &conf->crypto_xform->cipher; 2937 if (conf->crypto_xform->next) 2938 auth_xform = &conf->crypto_xform->next->auth; 2939 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2940 ipsec_xform, session); 2941 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2942 auth_xform = &conf->crypto_xform->auth; 2943 if (conf->crypto_xform->next) 2944 cipher_xform = &conf->crypto_xform->next->cipher; 2945 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform, 2946 ipsec_xform, session); 2947 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2948 aead_xform = &conf->crypto_xform->aead; 2949 ret = dpaa_sec_ipsec_aead_init(aead_xform, 2950 ipsec_xform, session); 2951 } else { 2952 DPAA_SEC_ERR("XFORM not specified"); 2953 ret = -EINVAL; 2954 goto out; 2955 } 2956 if (ret) { 2957 DPAA_SEC_ERR("Failed to process xform"); 2958 goto out; 2959 } 2960 2961 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2962 if (ipsec_xform->tunnel.type == 2963 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2964 session->ip4_hdr.ip_v = IPVERSION; 2965 session->ip4_hdr.ip_hl = 5; 2966 if (ipsec_xform->options.udp_encap) 2967 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2968 sizeof(session->ip4_hdr) + sizeof(struct rte_udp_hdr)); 2969 else 2970 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 2971 sizeof(session->ip4_hdr)); 2972 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2973 session->ip4_hdr.ip_id = 0; 2974 session->ip4_hdr.ip_off = 0; 2975 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2976 if (ipsec_xform->options.udp_encap) 2977 session->ip4_hdr.ip_p = IPPROTO_UDP; 2978 else 2979 session->ip4_hdr.ip_p = (ipsec_xform->proto == 2980 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2981 IPPROTO_ESP : IPPROTO_AH; 2982 session->ip4_hdr.ip_sum = 0; 2983 session->ip4_hdr.ip_src = 2984 ipsec_xform->tunnel.ipv4.src_ip; 2985 session->ip4_hdr.ip_dst = 2986 ipsec_xform->tunnel.ipv4.dst_ip; 2987 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 2988 (void *)&session->ip4_hdr, 2989 sizeof(struct ip)); 2990 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 2991 } else if (ipsec_xform->tunnel.type == 2992 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2993 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2994 DPAA_IPv6_DEFAULT_VTC_FLOW | 2995 ((ipsec_xform->tunnel.ipv6.dscp << 2996 RTE_IPV6_HDR_TC_SHIFT) & 2997 RTE_IPV6_HDR_TC_MASK) | 2998 ((ipsec_xform->tunnel.ipv6.flabel << 2999 RTE_IPV6_HDR_FL_SHIFT) & 3000 RTE_IPV6_HDR_FL_MASK)); 3001 /* Payload length will be updated by HW */ 3002 session->ip6_hdr.payload_len = 0; 3003 session->ip6_hdr.hop_limits = 3004 ipsec_xform->tunnel.ipv6.hlimit; 3005 if (ipsec_xform->options.udp_encap) 3006 session->ip6_hdr.proto = IPPROTO_UDP; 3007 else 3008 session->ip6_hdr.proto = (ipsec_xform->proto == 3009 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 3010 IPPROTO_ESP : IPPROTO_AH; 3011 memcpy(&session->ip6_hdr.src_addr, 3012 &ipsec_xform->tunnel.ipv6.src_addr, 16); 3013 memcpy(&session->ip6_hdr.dst_addr, 3014 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 3015 session->encap_pdb.ip_hdr_len = 3016 sizeof(struct rte_ipv6_hdr); 3017 } 3018 3019 session->encap_pdb.options = 3020 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 3021 PDBOPTS_ESP_OIHI_PDB_INL | 3022 PDBOPTS_ESP_IVSRC | 3023 PDBHMO_ESP_SNR; 3024 if (ipsec_xform->options.dec_ttl) 3025 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 3026 session->encap_pdb.spi = ipsec_xform->spi; 3027 /* Initializing the sequence number to 1, Security 3028 * engine will choose this sequence number for first packet 3029 * Refer: RFC4303 section: 3.3.3.Sequence Number Generation 3030 */ 3031 session->encap_pdb.seq_num = 1; 3032 if (ipsec_xform->options.esn) { 3033 session->encap_pdb.options |= PDBOPTS_ESP_ESN; 3034 session->encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3035 session->encap_pdb.seq_num = conf->ipsec.esn.low; 3036 } 3037 if (ipsec_xform->options.udp_encap) { 3038 struct rte_udp_hdr *udp_hdr; 3039 3040 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 3041 udp_hdr = (struct rte_udp_hdr *)(&session->udp4.udp_hdr); 3042 else 3043 udp_hdr = (struct rte_udp_hdr *)(&session->udp6.udp_hdr); 3044 3045 if (ipsec_xform->udp.sport) 3046 udp_hdr->src_port = rte_cpu_to_be_16(ipsec_xform->udp.sport); 3047 else 3048 udp_hdr->src_port = rte_cpu_to_be_16(DPAA_DEFAULT_NAT_T_PORT); 3049 3050 if (ipsec_xform->udp.dport) 3051 udp_hdr->dst_port = rte_cpu_to_be_16(ipsec_xform->udp.dport); 3052 else 3053 udp_hdr->dst_port = rte_cpu_to_be_16(DPAA_DEFAULT_NAT_T_PORT); 3054 udp_hdr->dgram_len = 0; 3055 udp_hdr->dgram_cksum = 0; 3056 3057 session->encap_pdb.ip_hdr_len += sizeof(struct rte_udp_hdr); 3058 session->encap_pdb.options |= PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; 3059 } 3060 if (ipsec_xform->options.ecn) 3061 session->encap_pdb.options |= PDBOPTS_ESP_TECN; 3062 } else if (ipsec_xform->direction == 3063 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 3064 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3065 if (ipsec_xform->options.udp_encap) 3066 session->decap_pdb.options = 3067 (sizeof(struct ip) + sizeof(struct rte_udp_hdr)) << 16; 3068 else 3069 session->decap_pdb.options = sizeof(struct ip) << 16; 3070 if (ipsec_xform->options.copy_df) 3071 session->decap_pdb.options |= PDBHMO_ESP_DFV; 3072 } else { 3073 if (ipsec_xform->options.udp_encap) 3074 session->decap_pdb.options = 3075 (sizeof(struct rte_ipv6_hdr) + sizeof(struct rte_udp_hdr)) << 16; 3076 else 3077 session->decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; 3078 } 3079 if (ipsec_xform->options.esn) { 3080 session->decap_pdb.options |= PDBOPTS_ESP_ESN; 3081 session->decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3082 session->decap_pdb.seq_num = conf->ipsec.esn.low; 3083 } 3084 if (ipsec_xform->options.copy_dscp) 3085 session->decap_pdb.options |= PDBHMO_ESP_DIFFSERV; 3086 if (ipsec_xform->options.ecn) 3087 session->decap_pdb.options |= PDBOPTS_ESP_TECN; 3088 3089 if (ipsec_xform->replay_win_sz) { 3090 uint32_t win_sz; 3091 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3092 3093 switch (win_sz) { 3094 case 1: 3095 case 2: 3096 case 4: 3097 case 8: 3098 case 16: 3099 case 32: 3100 session->decap_pdb.options |= PDBOPTS_ESP_ARS32; 3101 break; 3102 case 64: 3103 session->decap_pdb.options |= PDBOPTS_ESP_ARS64; 3104 break; 3105 default: 3106 session->decap_pdb.options |= 3107 PDBOPTS_ESP_ARS128; 3108 } 3109 } 3110 } else 3111 goto out; 3112 rte_spinlock_lock(&internals->lock); 3113 for (i = 0; i < MAX_DPAA_CORES; i++) { 3114 session->inq[i] = dpaa_sec_attach_rxq(internals); 3115 if (session->inq[i] == NULL) { 3116 DPAA_SEC_ERR("unable to attach sec queue"); 3117 rte_spinlock_unlock(&internals->lock); 3118 goto out; 3119 } 3120 } 3121 rte_spinlock_unlock(&internals->lock); 3122 3123 return 0; 3124 out: 3125 free_session_data(session); 3126 return -1; 3127 } 3128 3129 static int 3130 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, 3131 struct rte_security_session_conf *conf, 3132 void *sess) 3133 { 3134 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3135 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3136 struct rte_crypto_auth_xform *auth_xform = NULL; 3137 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3138 dpaa_sec_session *session = (dpaa_sec_session *)sess; 3139 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; 3140 uint32_t i; 3141 int ret; 3142 3143 PMD_INIT_FUNC_TRACE(); 3144 3145 memset(session, 0, sizeof(dpaa_sec_session)); 3146 3147 /* find xfrm types */ 3148 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3149 cipher_xform = &xform->cipher; 3150 if (xform->next != NULL && 3151 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) 3152 auth_xform = &xform->next->auth; 3153 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3154 auth_xform = &xform->auth; 3155 if (xform->next != NULL && 3156 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 3157 cipher_xform = &xform->next->cipher; 3158 } else { 3159 DPAA_SEC_ERR("Invalid crypto type"); 3160 return -EINVAL; 3161 } 3162 3163 session->proto_alg = conf->protocol; 3164 session->ctxt = DPAA_SEC_PDCP; 3165 3166 if (cipher_xform) { 3167 switch (cipher_xform->algo) { 3168 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3169 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW; 3170 break; 3171 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3172 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC; 3173 break; 3174 case RTE_CRYPTO_CIPHER_AES_CTR: 3175 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES; 3176 break; 3177 case RTE_CRYPTO_CIPHER_NULL: 3178 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL; 3179 break; 3180 default: 3181 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", 3182 session->cipher_alg); 3183 return -EINVAL; 3184 } 3185 3186 session->cipher_key.data = rte_zmalloc(NULL, 3187 cipher_xform->key.length, 3188 RTE_CACHE_LINE_SIZE); 3189 if (session->cipher_key.data == NULL && 3190 cipher_xform->key.length > 0) { 3191 DPAA_SEC_ERR("No Memory for cipher key"); 3192 return -ENOMEM; 3193 } 3194 session->cipher_key.length = cipher_xform->key.length; 3195 memcpy(session->cipher_key.data, cipher_xform->key.data, 3196 cipher_xform->key.length); 3197 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3198 DIR_ENC : DIR_DEC; 3199 session->cipher_alg = cipher_xform->algo; 3200 } else { 3201 session->cipher_key.data = NULL; 3202 session->cipher_key.length = 0; 3203 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3204 session->dir = DIR_ENC; 3205 } 3206 3207 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3208 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 && 3209 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { 3210 DPAA_SEC_ERR( 3211 "PDCP Seq Num size should be 5/12 bits for cmode"); 3212 ret = -EINVAL; 3213 goto out; 3214 } 3215 } 3216 3217 if (auth_xform) { 3218 switch (auth_xform->algo) { 3219 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3220 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW; 3221 break; 3222 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3223 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC; 3224 break; 3225 case RTE_CRYPTO_AUTH_AES_CMAC: 3226 session->auth_key.alg = PDCP_AUTH_TYPE_AES; 3227 break; 3228 case RTE_CRYPTO_AUTH_NULL: 3229 session->auth_key.alg = PDCP_AUTH_TYPE_NULL; 3230 break; 3231 default: 3232 DPAA_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 3233 rte_cryptodev_get_auth_algo_string(session->auth_alg), 3234 session->auth_alg); 3235 rte_free(session->cipher_key.data); 3236 return -EINVAL; 3237 } 3238 session->auth_key.data = rte_zmalloc(NULL, 3239 auth_xform->key.length, 3240 RTE_CACHE_LINE_SIZE); 3241 if (!session->auth_key.data && 3242 auth_xform->key.length > 0) { 3243 DPAA_SEC_ERR("No Memory for auth key"); 3244 rte_free(session->cipher_key.data); 3245 return -ENOMEM; 3246 } 3247 session->auth_key.length = auth_xform->key.length; 3248 memcpy(session->auth_key.data, auth_xform->key.data, 3249 auth_xform->key.length); 3250 session->auth_alg = auth_xform->algo; 3251 } else { 3252 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3253 DPAA_SEC_ERR("Crypto: Integrity must for c-plane"); 3254 ret = -EINVAL; 3255 goto out; 3256 } 3257 session->auth_key.data = NULL; 3258 session->auth_key.length = 0; 3259 session->auth_alg = 0; 3260 } 3261 session->pdcp.domain = pdcp_xform->domain; 3262 session->pdcp.bearer = pdcp_xform->bearer; 3263 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3264 session->pdcp.sn_size = pdcp_xform->sn_size; 3265 session->pdcp.hfn = pdcp_xform->hfn; 3266 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3267 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3268 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled; 3269 if (cipher_xform) 3270 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3271 3272 rte_spinlock_lock(&dev_priv->lock); 3273 for (i = 0; i < MAX_DPAA_CORES; i++) { 3274 session->inq[i] = dpaa_sec_attach_rxq(dev_priv); 3275 if (session->inq[i] == NULL) { 3276 DPAA_SEC_ERR("unable to attach sec queue"); 3277 rte_spinlock_unlock(&dev_priv->lock); 3278 ret = -EBUSY; 3279 goto out; 3280 } 3281 } 3282 rte_spinlock_unlock(&dev_priv->lock); 3283 return 0; 3284 out: 3285 rte_free(session->auth_key.data); 3286 rte_free(session->cipher_key.data); 3287 memset(session, 0, sizeof(dpaa_sec_session)); 3288 return ret; 3289 } 3290 3291 static int 3292 dpaa_sec_security_session_create(void *dev, 3293 struct rte_security_session_conf *conf, 3294 struct rte_security_session *sess) 3295 { 3296 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3297 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3298 int ret; 3299 3300 switch (conf->protocol) { 3301 case RTE_SECURITY_PROTOCOL_IPSEC: 3302 ret = dpaa_sec_set_ipsec_session(cdev, conf, 3303 sess_private_data); 3304 break; 3305 case RTE_SECURITY_PROTOCOL_PDCP: 3306 ret = dpaa_sec_set_pdcp_session(cdev, conf, 3307 sess_private_data); 3308 break; 3309 case RTE_SECURITY_PROTOCOL_MACSEC: 3310 return -ENOTSUP; 3311 default: 3312 return -EINVAL; 3313 } 3314 if (ret != 0) { 3315 DPAA_SEC_ERR("failed to configure session parameters"); 3316 return ret; 3317 } 3318 3319 ret = dpaa_sec_prep_cdb(sess_private_data); 3320 if (ret) { 3321 DPAA_SEC_ERR("Unable to prepare sec cdb"); 3322 return ret; 3323 } 3324 3325 return ret; 3326 } 3327 3328 /** Clear the memory of session so it doesn't leave key material behind */ 3329 static int 3330 dpaa_sec_security_session_destroy(void *dev __rte_unused, 3331 struct rte_security_session *sess) 3332 { 3333 PMD_INIT_FUNC_TRACE(); 3334 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 3335 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 3336 3337 if (sess_priv) { 3338 free_session_memory((struct rte_cryptodev *)dev, s); 3339 } 3340 return 0; 3341 } 3342 3343 static unsigned int 3344 dpaa_sec_security_session_get_size(void *device __rte_unused) 3345 { 3346 return sizeof(dpaa_sec_session); 3347 } 3348 3349 static int 3350 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3351 struct rte_cryptodev_config *config __rte_unused) 3352 { 3353 PMD_INIT_FUNC_TRACE(); 3354 3355 return 0; 3356 } 3357 3358 static int 3359 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 3360 { 3361 PMD_INIT_FUNC_TRACE(); 3362 return 0; 3363 } 3364 3365 static void 3366 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 3367 { 3368 PMD_INIT_FUNC_TRACE(); 3369 } 3370 3371 static int 3372 dpaa_sec_dev_close(struct rte_cryptodev *dev) 3373 { 3374 PMD_INIT_FUNC_TRACE(); 3375 3376 if (dev == NULL) 3377 return -ENOMEM; 3378 3379 return 0; 3380 } 3381 3382 static void 3383 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 3384 struct rte_cryptodev_info *info) 3385 { 3386 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 3387 3388 PMD_INIT_FUNC_TRACE(); 3389 if (info != NULL) { 3390 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3391 info->feature_flags = dev->feature_flags; 3392 info->capabilities = dpaa_sec_capabilities; 3393 info->sym.max_nb_sessions = internals->max_nb_sessions; 3394 info->driver_id = dpaa_cryptodev_driver_id; 3395 } 3396 } 3397 3398 static enum qman_cb_dqrr_result 3399 dpaa_sec_process_parallel_event(void *event, 3400 struct qman_portal *qm __always_unused, 3401 struct qman_fq *outq, 3402 const struct qm_dqrr_entry *dqrr, 3403 void **bufs) 3404 { 3405 const struct qm_fd *fd; 3406 struct dpaa_sec_job *job; 3407 struct dpaa_sec_op_ctx *ctx; 3408 struct rte_event *ev = (struct rte_event *)event; 3409 3410 fd = &dqrr->fd; 3411 3412 /* sg is embedded in an op ctx, 3413 * sg[0] is for output 3414 * sg[1] for input 3415 */ 3416 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3417 3418 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3419 ctx->fd_status = fd->status; 3420 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3421 struct qm_sg_entry *sg_out; 3422 uint32_t len; 3423 3424 sg_out = &job->sg[0]; 3425 hw_sg_to_cpu(sg_out); 3426 len = sg_out->length; 3427 ctx->op->sym->m_src->pkt_len = len; 3428 ctx->op->sym->m_src->data_len = len; 3429 } 3430 if (!ctx->fd_status) { 3431 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3432 } else { 3433 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3434 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3435 } 3436 ev->event_ptr = (void *)ctx->op; 3437 3438 ev->flow_id = outq->ev.flow_id; 3439 ev->sub_event_type = outq->ev.sub_event_type; 3440 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3441 ev->op = RTE_EVENT_OP_NEW; 3442 ev->sched_type = outq->ev.sched_type; 3443 ev->queue_id = outq->ev.queue_id; 3444 ev->priority = outq->ev.priority; 3445 *bufs = (void *)ctx->op; 3446 3447 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3448 3449 return qman_cb_dqrr_consume; 3450 } 3451 3452 static enum qman_cb_dqrr_result 3453 dpaa_sec_process_atomic_event(void *event, 3454 struct qman_portal *qm __rte_unused, 3455 struct qman_fq *outq, 3456 const struct qm_dqrr_entry *dqrr, 3457 void **bufs) 3458 { 3459 u8 index; 3460 const struct qm_fd *fd; 3461 struct dpaa_sec_job *job; 3462 struct dpaa_sec_op_ctx *ctx; 3463 struct rte_event *ev = (struct rte_event *)event; 3464 3465 fd = &dqrr->fd; 3466 3467 /* sg is embedded in an op ctx, 3468 * sg[0] is for output 3469 * sg[1] for input 3470 */ 3471 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd)); 3472 3473 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 3474 ctx->fd_status = fd->status; 3475 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 3476 struct qm_sg_entry *sg_out; 3477 uint32_t len; 3478 3479 sg_out = &job->sg[0]; 3480 hw_sg_to_cpu(sg_out); 3481 len = sg_out->length; 3482 ctx->op->sym->m_src->pkt_len = len; 3483 ctx->op->sym->m_src->data_len = len; 3484 } 3485 if (!ctx->fd_status) { 3486 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 3487 } else { 3488 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); 3489 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 3490 } 3491 ev->event_ptr = (void *)ctx->op; 3492 ev->flow_id = outq->ev.flow_id; 3493 ev->sub_event_type = outq->ev.sub_event_type; 3494 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3495 ev->op = RTE_EVENT_OP_NEW; 3496 ev->sched_type = outq->ev.sched_type; 3497 ev->queue_id = outq->ev.queue_id; 3498 ev->priority = outq->ev.priority; 3499 3500 /* Save active dqrr entries */ 3501 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1); 3502 DPAA_PER_LCORE_DQRR_SIZE++; 3503 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 3504 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src; 3505 ev->impl_opaque = index + 1; 3506 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1; 3507 *bufs = (void *)ctx->op; 3508 3509 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 3510 3511 return qman_cb_dqrr_defer; 3512 } 3513 3514 int 3515 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, 3516 int qp_id, 3517 uint16_t ch_id, 3518 const struct rte_event *event) 3519 { 3520 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3521 struct qm_mcc_initfq opts = {0}; 3522 3523 int ret; 3524 3525 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3526 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3527 opts.fqd.dest.channel = ch_id; 3528 3529 switch (event->sched_type) { 3530 case RTE_SCHED_TYPE_ATOMIC: 3531 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; 3532 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary 3533 * configuration with HOLD_ACTIVE setting 3534 */ 3535 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); 3536 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; 3537 break; 3538 case RTE_SCHED_TYPE_ORDERED: 3539 DPAA_SEC_ERR("Ordered queue schedule type is not supported"); 3540 return -ENOTSUP; 3541 default: 3542 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; 3543 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event; 3544 break; 3545 } 3546 3547 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts); 3548 if (unlikely(ret)) { 3549 DPAA_SEC_ERR("unable to init caam source fq!"); 3550 return ret; 3551 } 3552 3553 memcpy(&qp->outq.ev, event, sizeof(struct rte_event)); 3554 3555 return 0; 3556 } 3557 3558 int 3559 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev, 3560 int qp_id) 3561 { 3562 struct qm_mcc_initfq opts = {0}; 3563 int ret; 3564 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3565 3566 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 3567 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 3568 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx; 3569 qp->outq.cb.ern = ern_sec_fq_handler; 3570 qman_retire_fq(&qp->outq, NULL); 3571 qman_oos_fq(&qp->outq); 3572 ret = qman_init_fq(&qp->outq, 0, &opts); 3573 if (ret) 3574 DPAA_SEC_ERR("Error in qman_init_fq: ret: %d", ret); 3575 qp->outq.cb.dqrr = NULL; 3576 3577 return ret; 3578 } 3579 3580 static struct rte_cryptodev_ops crypto_ops = { 3581 .dev_configure = dpaa_sec_dev_configure, 3582 .dev_start = dpaa_sec_dev_start, 3583 .dev_stop = dpaa_sec_dev_stop, 3584 .dev_close = dpaa_sec_dev_close, 3585 .dev_infos_get = dpaa_sec_dev_infos_get, 3586 .queue_pair_setup = dpaa_sec_queue_pair_setup, 3587 .queue_pair_release = dpaa_sec_queue_pair_release, 3588 .sym_session_get_size = dpaa_sec_sym_session_get_size, 3589 .sym_session_configure = dpaa_sec_sym_session_configure, 3590 .sym_session_clear = dpaa_sec_sym_session_clear, 3591 /* Raw data-path API related operations */ 3592 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size, 3593 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx, 3594 }; 3595 3596 static const struct rte_security_capability * 3597 dpaa_sec_capabilities_get(void *device __rte_unused) 3598 { 3599 return dpaa_sec_security_cap; 3600 } 3601 3602 static const struct rte_security_ops dpaa_sec_security_ops = { 3603 .session_create = dpaa_sec_security_session_create, 3604 .session_update = NULL, 3605 .session_get_size = dpaa_sec_security_session_get_size, 3606 .session_stats_get = NULL, 3607 .session_destroy = dpaa_sec_security_session_destroy, 3608 .set_pkt_metadata = NULL, 3609 .capabilities_get = dpaa_sec_capabilities_get 3610 }; 3611 3612 static int 3613 dpaa_sec_uninit(struct rte_cryptodev *dev) 3614 { 3615 struct dpaa_sec_dev_private *internals; 3616 3617 if (dev == NULL) 3618 return -ENODEV; 3619 3620 internals = dev->data->dev_private; 3621 rte_free(dev->security_ctx); 3622 3623 rte_free(internals); 3624 3625 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", 3626 dev->data->name, rte_socket_id()); 3627 3628 return 0; 3629 } 3630 3631 static int 3632 check_devargs_handler(__rte_unused const char *key, const char *value, 3633 __rte_unused void *opaque) 3634 { 3635 dpaa_sec_dp_dump = atoi(value); 3636 if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) { 3637 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not " 3638 "supported, changing to FULL error prints"); 3639 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP; 3640 } 3641 3642 return 0; 3643 } 3644 3645 static void 3646 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key) 3647 { 3648 struct rte_kvargs *kvlist; 3649 3650 if (!devargs) 3651 return; 3652 3653 kvlist = rte_kvargs_parse(devargs->args, NULL); 3654 if (!kvlist) 3655 return; 3656 3657 if (!rte_kvargs_count(kvlist, key)) { 3658 rte_kvargs_free(kvlist); 3659 return; 3660 } 3661 3662 rte_kvargs_process(kvlist, key, 3663 check_devargs_handler, NULL); 3664 rte_kvargs_free(kvlist); 3665 } 3666 3667 static int 3668 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 3669 { 3670 struct dpaa_sec_dev_private *internals; 3671 struct rte_security_ctx *security_instance; 3672 struct dpaa_sec_qp *qp; 3673 uint32_t i, flags; 3674 int ret; 3675 void *cmd_map; 3676 int map_fd = -1; 3677 3678 PMD_INIT_FUNC_TRACE(); 3679 3680 internals = cryptodev->data->dev_private; 3681 map_fd = open("/dev/mem", O_RDWR); 3682 if (unlikely(map_fd < 0)) { 3683 DPAA_SEC_ERR("Unable to open (/dev/mem)"); 3684 return map_fd; 3685 } 3686 internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, 3687 MAP_SHARED, map_fd, SEC_BASE_ADDR); 3688 if (internals->sec_hw == MAP_FAILED) { 3689 DPAA_SEC_ERR("Memory map failed"); 3690 close(map_fd); 3691 return -EINVAL; 3692 } 3693 cmd_map = (uint8_t *)internals->sec_hw + 3694 (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG; 3695 if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN)) 3696 /* enable QI interface */ 3697 rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map); 3698 3699 ret = munmap(internals->sec_hw, MAP_SIZE); 3700 if (ret) 3701 DPAA_SEC_WARN("munmap failed"); 3702 3703 close(map_fd); 3704 cryptodev->driver_id = dpaa_cryptodev_driver_id; 3705 cryptodev->dev_ops = &crypto_ops; 3706 3707 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 3708 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 3709 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3710 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3711 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3712 RTE_CRYPTODEV_FF_SECURITY | 3713 RTE_CRYPTODEV_FF_SYM_RAW_DP | 3714 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3715 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3716 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3717 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3718 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3719 3720 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 3721 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 3722 3723 /* 3724 * For secondary processes, we don't initialise any further as primary 3725 * has already done this work. Only check we don't need a different 3726 * RX function 3727 */ 3728 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3729 DPAA_SEC_WARN("Device already init by primary process"); 3730 return 0; 3731 } 3732 /* Initialize security_ctx only for primary process*/ 3733 security_instance = rte_malloc("rte_security_instances_ops", 3734 sizeof(struct rte_security_ctx), 0); 3735 if (security_instance == NULL) 3736 return -ENOMEM; 3737 security_instance->device = (void *)cryptodev; 3738 security_instance->ops = &dpaa_sec_security_ops; 3739 security_instance->sess_cnt = 0; 3740 cryptodev->security_ctx = security_instance; 3741 rte_spinlock_init(&internals->lock); 3742 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 3743 /* init qman fq for queue pair */ 3744 qp = &internals->qps[i]; 3745 ret = dpaa_sec_init_tx(&qp->outq); 3746 if (ret) { 3747 DPAA_SEC_ERR("config tx of queue pair %d", i); 3748 goto init_error; 3749 } 3750 } 3751 3752 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 3753 QMAN_FQ_FLAG_TO_DCPORTAL; 3754 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { 3755 /* create rx qman fq for sessions*/ 3756 ret = qman_create_fq(0, flags, &internals->inq[i]); 3757 if (unlikely(ret != 0)) { 3758 DPAA_SEC_ERR("sec qman_create_fq failed"); 3759 goto init_error; 3760 } 3761 } 3762 3763 dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE); 3764 3765 DPAA_SEC_INFO("%s cryptodev init", cryptodev->data->name); 3766 return 0; 3767 3768 init_error: 3769 DPAA_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3770 3771 rte_free(cryptodev->security_ctx); 3772 return -EFAULT; 3773 } 3774 3775 static int 3776 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, 3777 struct rte_dpaa_device *dpaa_dev) 3778 { 3779 struct rte_cryptodev *cryptodev; 3780 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3781 3782 int retval; 3783 3784 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3785 return 0; 3786 3787 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name); 3788 3789 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3790 if (cryptodev == NULL) 3791 return -ENOMEM; 3792 3793 cryptodev->data->dev_private = rte_zmalloc_socket( 3794 "cryptodev private structure", 3795 sizeof(struct dpaa_sec_dev_private), 3796 RTE_CACHE_LINE_SIZE, 3797 rte_socket_id()); 3798 3799 if (cryptodev->data->dev_private == NULL) 3800 rte_panic("Cannot allocate memzone for private " 3801 "device data"); 3802 3803 dpaa_dev->crypto_dev = cryptodev; 3804 cryptodev->device = &dpaa_dev->device; 3805 3806 /* init user callbacks */ 3807 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3808 3809 /* if sec device version is not configured */ 3810 if (!rta_get_sec_era()) { 3811 const struct device_node *caam_node; 3812 3813 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 3814 const uint32_t *prop = of_get_property(caam_node, 3815 "fsl,sec-era", 3816 NULL); 3817 if (prop) { 3818 rta_set_sec_era( 3819 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 3820 break; 3821 } 3822 } 3823 } 3824 3825 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 3826 retval = rte_dpaa_portal_init((void *)1); 3827 if (retval) { 3828 DPAA_SEC_ERR("Unable to initialize portal"); 3829 goto out; 3830 } 3831 } 3832 3833 /* Invoke PMD device initialization function */ 3834 retval = dpaa_sec_dev_init(cryptodev); 3835 if (retval == 0) { 3836 rte_cryptodev_pmd_probing_finish(cryptodev); 3837 return 0; 3838 } 3839 3840 retval = -ENXIO; 3841 out: 3842 /* In case of error, cleanup is done */ 3843 rte_free(cryptodev->data->dev_private); 3844 3845 rte_cryptodev_pmd_release_device(cryptodev); 3846 3847 return retval; 3848 } 3849 3850 static int 3851 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 3852 { 3853 struct rte_cryptodev *cryptodev; 3854 int ret; 3855 3856 cryptodev = dpaa_dev->crypto_dev; 3857 if (cryptodev == NULL) 3858 return -ENODEV; 3859 3860 ret = dpaa_sec_uninit(cryptodev); 3861 if (ret) 3862 return ret; 3863 3864 return rte_cryptodev_pmd_destroy(cryptodev); 3865 } 3866 3867 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 3868 .drv_type = FSL_DPAA_CRYPTO, 3869 .driver = { 3870 .name = "DPAA SEC PMD" 3871 }, 3872 .probe = cryptodev_dpaa_sec_probe, 3873 .remove = cryptodev_dpaa_sec_remove, 3874 }; 3875 3876 static struct cryptodev_driver dpaa_sec_crypto_drv; 3877 3878 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 3879 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, 3880 dpaa_cryptodev_driver_id); 3881 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD, 3882 DRIVER_DUMP_MODE "=<int>"); 3883 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE); 3884