1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sched.h> 11 #include <net/if.h> 12 13 #include <rte_byteorder.h> 14 #include <rte_common.h> 15 #include <rte_cryptodev_pmd.h> 16 #include <rte_crypto.h> 17 #include <rte_cryptodev.h> 18 #include <rte_security_driver.h> 19 #include <rte_cycles.h> 20 #include <rte_dev.h> 21 #include <rte_kvargs.h> 22 #include <rte_malloc.h> 23 #include <rte_mbuf.h> 24 #include <rte_memcpy.h> 25 #include <rte_string_fns.h> 26 27 #include <fsl_usd.h> 28 #include <fsl_qman.h> 29 #include <of.h> 30 31 /* RTA header files */ 32 #include <hw/desc/common.h> 33 #include <hw/desc/algo.h> 34 #include <hw/desc/ipsec.h> 35 36 #include <rte_dpaa_bus.h> 37 #include <dpaa_sec.h> 38 #include <dpaa_sec_log.h> 39 40 enum rta_sec_era rta_sec_era; 41 42 static uint8_t cryptodev_driver_id; 43 44 static __thread struct rte_crypto_op **dpaa_sec_ops; 45 static __thread int dpaa_sec_op_nb; 46 47 static int 48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess); 49 50 static inline void 51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) 52 { 53 if (!ctx->fd_status) { 54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 55 } else { 56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status); 57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 58 } 59 60 /* report op status to sym->op and then free the ctx memeory */ 61 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 62 } 63 64 static inline struct dpaa_sec_op_ctx * 65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses) 66 { 67 struct dpaa_sec_op_ctx *ctx; 68 int retval; 69 70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx)); 71 if (!ctx || retval) { 72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!"); 73 return NULL; 74 } 75 /* 76 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for 79 * each packet, memset is costlier than dcbz_64(). 80 */ 81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]); 82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]); 83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]); 84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); 85 86 ctx->ctx_pool = ses->ctx_pool; 87 ctx->vtop_offset = (uint64_t) ctx 88 - rte_mempool_virt2iova(ctx); 89 90 return ctx; 91 } 92 93 static inline rte_iova_t 94 dpaa_mem_vtop(void *vaddr) 95 { 96 const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); 97 uint64_t vaddr_64, paddr; 98 int i; 99 100 vaddr_64 = (uint64_t)vaddr; 101 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { 102 if (vaddr_64 >= memseg[i].addr_64 && 103 vaddr_64 < memseg[i].addr_64 + memseg[i].len) { 104 paddr = memseg[i].iova + 105 (vaddr_64 - memseg[i].addr_64); 106 107 return (rte_iova_t)paddr; 108 } 109 } 110 return (rte_iova_t)(NULL); 111 } 112 113 /* virtual address conversin when mempool support is available for ctx */ 114 static inline phys_addr_t 115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) 116 { 117 return (uint64_t)vaddr - ctx->vtop_offset; 118 } 119 120 static inline void * 121 dpaa_mem_ptov(rte_iova_t paddr) 122 { 123 const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); 124 int i; 125 126 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { 127 if (paddr >= memseg[i].iova && 128 (char *)paddr < (char *)memseg[i].iova + memseg[i].len) 129 return (void *)(memseg[i].addr_64 + 130 (paddr - memseg[i].iova)); 131 } 132 return NULL; 133 } 134 135 static void 136 ern_sec_fq_handler(struct qman_portal *qm __rte_unused, 137 struct qman_fq *fq, 138 const struct qm_mr_entry *msg) 139 { 140 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n", 141 fq->fqid, msg->ern.rc, msg->ern.seqnum); 142 } 143 144 /* initialize the queue with dest chan as caam chan so that 145 * all the packets in this queue could be dispatched into caam 146 */ 147 static int 148 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, 149 uint32_t fqid_out) 150 { 151 struct qm_mcc_initfq fq_opts; 152 uint32_t flags; 153 int ret = -1; 154 155 /* Clear FQ options */ 156 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq)); 157 158 flags = QMAN_INITFQ_FLAG_SCHED; 159 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | 160 QM_INITFQ_WE_CONTEXTB; 161 162 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc); 163 fq_opts.fqd.context_b = fqid_out; 164 fq_opts.fqd.dest.channel = qm_channel_caam; 165 fq_opts.fqd.dest.wq = 0; 166 167 fq_in->cb.ern = ern_sec_fq_handler; 168 169 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out); 170 171 ret = qman_init_fq(fq_in, flags, &fq_opts); 172 if (unlikely(ret != 0)) 173 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret); 174 175 return ret; 176 } 177 178 /* something is put into in_fq and caam put the crypto result into out_fq */ 179 static enum qman_cb_dqrr_result 180 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused, 181 struct qman_fq *fq __always_unused, 182 const struct qm_dqrr_entry *dqrr) 183 { 184 const struct qm_fd *fd; 185 struct dpaa_sec_job *job; 186 struct dpaa_sec_op_ctx *ctx; 187 188 if (dpaa_sec_op_nb >= DPAA_SEC_BURST) 189 return qman_cb_dqrr_defer; 190 191 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID)) 192 return qman_cb_dqrr_consume; 193 194 fd = &dqrr->fd; 195 /* sg is embedded in an op ctx, 196 * sg[0] is for output 197 * sg[1] for input 198 */ 199 job = dpaa_mem_ptov(qm_fd_addr_get64(fd)); 200 201 ctx = container_of(job, struct dpaa_sec_op_ctx, job); 202 ctx->fd_status = fd->status; 203 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 204 struct qm_sg_entry *sg_out; 205 uint32_t len; 206 207 sg_out = &job->sg[0]; 208 hw_sg_to_cpu(sg_out); 209 len = sg_out->length; 210 ctx->op->sym->m_src->pkt_len = len; 211 ctx->op->sym->m_src->data_len = len; 212 } 213 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op; 214 dpaa_sec_op_ending(ctx); 215 216 return qman_cb_dqrr_consume; 217 } 218 219 /* caam result is put into this queue */ 220 static int 221 dpaa_sec_init_tx(struct qman_fq *fq) 222 { 223 int ret; 224 struct qm_mcc_initfq opts; 225 uint32_t flags; 226 227 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED | 228 QMAN_FQ_FLAG_DYNAMIC_FQID; 229 230 ret = qman_create_fq(0, flags, fq); 231 if (unlikely(ret)) { 232 PMD_INIT_LOG(ERR, "qman_create_fq failed"); 233 return ret; 234 } 235 236 memset(&opts, 0, sizeof(opts)); 237 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | 238 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; 239 240 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */ 241 242 fq->cb.dqrr = dqrr_out_fq_cb_rx; 243 fq->cb.ern = ern_sec_fq_handler; 244 245 ret = qman_init_fq(fq, 0, &opts); 246 if (unlikely(ret)) { 247 PMD_INIT_LOG(ERR, "unable to init caam source fq!"); 248 return ret; 249 } 250 251 return ret; 252 } 253 254 static inline int is_cipher_only(dpaa_sec_session *ses) 255 { 256 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) && 257 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL)); 258 } 259 260 static inline int is_auth_only(dpaa_sec_session *ses) 261 { 262 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) && 263 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL)); 264 } 265 266 static inline int is_aead(dpaa_sec_session *ses) 267 { 268 return ((ses->cipher_alg == 0) && 269 (ses->auth_alg == 0) && 270 (ses->aead_alg != 0)); 271 } 272 273 static inline int is_auth_cipher(dpaa_sec_session *ses) 274 { 275 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) && 276 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) && 277 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC)); 278 } 279 280 static inline int is_proto_ipsec(dpaa_sec_session *ses) 281 { 282 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC); 283 } 284 285 static inline int is_encode(dpaa_sec_session *ses) 286 { 287 return ses->dir == DIR_ENC; 288 } 289 290 static inline int is_decode(dpaa_sec_session *ses) 291 { 292 return ses->dir == DIR_DEC; 293 } 294 295 static inline void 296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a) 297 { 298 switch (ses->auth_alg) { 299 case RTE_CRYPTO_AUTH_NULL: 300 ses->digest_length = 0; 301 break; 302 case RTE_CRYPTO_AUTH_MD5_HMAC: 303 alginfo_a->algtype = 304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 305 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5; 306 alginfo_a->algmode = OP_ALG_AAI_HMAC; 307 break; 308 case RTE_CRYPTO_AUTH_SHA1_HMAC: 309 alginfo_a->algtype = 310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 311 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1; 312 alginfo_a->algmode = OP_ALG_AAI_HMAC; 313 break; 314 case RTE_CRYPTO_AUTH_SHA224_HMAC: 315 alginfo_a->algtype = 316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 317 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224; 318 alginfo_a->algmode = OP_ALG_AAI_HMAC; 319 break; 320 case RTE_CRYPTO_AUTH_SHA256_HMAC: 321 alginfo_a->algtype = 322 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 323 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256; 324 alginfo_a->algmode = OP_ALG_AAI_HMAC; 325 break; 326 case RTE_CRYPTO_AUTH_SHA384_HMAC: 327 alginfo_a->algtype = 328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 329 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384; 330 alginfo_a->algmode = OP_ALG_AAI_HMAC; 331 break; 332 case RTE_CRYPTO_AUTH_SHA512_HMAC: 333 alginfo_a->algtype = 334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 335 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512; 336 alginfo_a->algmode = OP_ALG_AAI_HMAC; 337 break; 338 default: 339 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg); 340 } 341 } 342 343 static inline void 344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c) 345 { 346 switch (ses->cipher_alg) { 347 case RTE_CRYPTO_CIPHER_NULL: 348 break; 349 case RTE_CRYPTO_CIPHER_AES_CBC: 350 alginfo_c->algtype = 351 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 352 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES; 353 alginfo_c->algmode = OP_ALG_AAI_CBC; 354 break; 355 case RTE_CRYPTO_CIPHER_3DES_CBC: 356 alginfo_c->algtype = 357 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 358 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES; 359 alginfo_c->algmode = OP_ALG_AAI_CBC; 360 break; 361 case RTE_CRYPTO_CIPHER_AES_CTR: 362 alginfo_c->algtype = 363 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 364 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES; 365 alginfo_c->algmode = OP_ALG_AAI_CTR; 366 break; 367 default: 368 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg); 369 } 370 } 371 372 static inline void 373 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo) 374 { 375 switch (ses->aead_alg) { 376 case RTE_CRYPTO_AEAD_AES_GCM: 377 alginfo->algtype = OP_ALG_ALGSEL_AES; 378 alginfo->algmode = OP_ALG_AAI_GCM; 379 break; 380 default: 381 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg); 382 } 383 } 384 385 386 /* prepare command block of the session */ 387 static int 388 dpaa_sec_prep_cdb(dpaa_sec_session *ses) 389 { 390 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 391 uint32_t shared_desc_len = 0; 392 struct sec_cdb *cdb = &ses->cdb; 393 int err; 394 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 395 int swap = false; 396 #else 397 int swap = true; 398 #endif 399 400 memset(cdb, 0, sizeof(struct sec_cdb)); 401 402 if (is_cipher_only(ses)) { 403 caam_cipher_alg(ses, &alginfo_c); 404 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 405 PMD_TX_LOG(ERR, "not supported cipher alg\n"); 406 return -ENOTSUP; 407 } 408 409 alginfo_c.key = (uint64_t)ses->cipher_key.data; 410 alginfo_c.keylen = ses->cipher_key.length; 411 alginfo_c.key_enc_flags = 0; 412 alginfo_c.key_type = RTA_DATA_IMM; 413 414 shared_desc_len = cnstr_shdsc_blkcipher( 415 cdb->sh_desc, true, 416 swap, &alginfo_c, 417 NULL, 418 ses->iv.length, 419 ses->dir); 420 } else if (is_auth_only(ses)) { 421 caam_auth_alg(ses, &alginfo_a); 422 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 423 PMD_TX_LOG(ERR, "not supported auth alg\n"); 424 return -ENOTSUP; 425 } 426 427 alginfo_a.key = (uint64_t)ses->auth_key.data; 428 alginfo_a.keylen = ses->auth_key.length; 429 alginfo_a.key_enc_flags = 0; 430 alginfo_a.key_type = RTA_DATA_IMM; 431 432 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true, 433 swap, &alginfo_a, 434 !ses->dir, 435 ses->digest_length); 436 } else if (is_aead(ses)) { 437 caam_aead_alg(ses, &alginfo); 438 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 439 PMD_TX_LOG(ERR, "not supported aead alg\n"); 440 return -ENOTSUP; 441 } 442 alginfo.key = (uint64_t)ses->aead_key.data; 443 alginfo.keylen = ses->aead_key.length; 444 alginfo.key_enc_flags = 0; 445 alginfo.key_type = RTA_DATA_IMM; 446 447 if (ses->dir == DIR_ENC) 448 shared_desc_len = cnstr_shdsc_gcm_encap( 449 cdb->sh_desc, true, swap, 450 &alginfo, 451 ses->iv.length, 452 ses->digest_length); 453 else 454 shared_desc_len = cnstr_shdsc_gcm_decap( 455 cdb->sh_desc, true, swap, 456 &alginfo, 457 ses->iv.length, 458 ses->digest_length); 459 } else { 460 caam_cipher_alg(ses, &alginfo_c); 461 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 462 PMD_TX_LOG(ERR, "not supported cipher alg\n"); 463 return -ENOTSUP; 464 } 465 466 alginfo_c.key = (uint64_t)ses->cipher_key.data; 467 alginfo_c.keylen = ses->cipher_key.length; 468 alginfo_c.key_enc_flags = 0; 469 alginfo_c.key_type = RTA_DATA_IMM; 470 471 caam_auth_alg(ses, &alginfo_a); 472 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { 473 PMD_TX_LOG(ERR, "not supported auth alg\n"); 474 return -ENOTSUP; 475 } 476 477 alginfo_a.key = (uint64_t)ses->auth_key.data; 478 alginfo_a.keylen = ses->auth_key.length; 479 alginfo_a.key_enc_flags = 0; 480 alginfo_a.key_type = RTA_DATA_IMM; 481 482 cdb->sh_desc[0] = alginfo_c.keylen; 483 cdb->sh_desc[1] = alginfo_a.keylen; 484 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 485 MIN_JOB_DESC_SIZE, 486 (unsigned int *)cdb->sh_desc, 487 &cdb->sh_desc[2], 2); 488 489 if (err < 0) { 490 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths"); 491 return err; 492 } 493 if (cdb->sh_desc[2] & 1) 494 alginfo_c.key_type = RTA_DATA_IMM; 495 else { 496 alginfo_c.key = (uint64_t)dpaa_mem_vtop( 497 (void *)alginfo_c.key); 498 alginfo_c.key_type = RTA_DATA_PTR; 499 } 500 if (cdb->sh_desc[2] & (1<<1)) 501 alginfo_a.key_type = RTA_DATA_IMM; 502 else { 503 alginfo_a.key = (uint64_t)dpaa_mem_vtop( 504 (void *)alginfo_a.key); 505 alginfo_a.key_type = RTA_DATA_PTR; 506 } 507 cdb->sh_desc[0] = 0; 508 cdb->sh_desc[1] = 0; 509 cdb->sh_desc[2] = 0; 510 if (is_proto_ipsec(ses)) { 511 if (ses->dir == DIR_ENC) { 512 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 513 cdb->sh_desc, 514 true, swap, &ses->encap_pdb, 515 (uint8_t *)&ses->ip4_hdr, 516 &alginfo_c, &alginfo_a); 517 } else if (ses->dir == DIR_DEC) { 518 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 519 cdb->sh_desc, 520 true, swap, &ses->decap_pdb, 521 &alginfo_c, &alginfo_a); 522 } 523 } else { 524 /* Auth_only_len is set as 0 here and it will be 525 * overwritten in fd for each packet. 526 */ 527 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 528 true, swap, &alginfo_c, &alginfo_a, 529 ses->iv.length, 0, 530 ses->digest_length, ses->dir); 531 } 532 } 533 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 534 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); 535 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); 536 537 return 0; 538 } 539 540 static inline unsigned int 541 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact) 542 { 543 unsigned int pkts = 0; 544 int ret; 545 struct qm_mcr_queryfq_np np; 546 enum qman_fq_state state; 547 uint32_t flags; 548 uint32_t vdqcr; 549 550 qman_query_fq_np(fq, &np); 551 if (np.frm_cnt) { 552 vdqcr = QM_VDQCR_NUMFRAMES_SET(len); 553 if (exact) 554 vdqcr |= QM_VDQCR_EXACT; 555 ret = qman_volatile_dequeue(fq, 0, vdqcr); 556 if (ret) 557 return 0; 558 do { 559 pkts += qman_poll_dqrr(len); 560 qman_fq_state(fq, &state, &flags); 561 } while (flags & QMAN_FQ_STATE_VDQCR); 562 } 563 return pkts; 564 } 565 566 /* qp is lockless, should be accessed by only one thread */ 567 static int 568 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) 569 { 570 struct qman_fq *fq; 571 572 fq = &qp->outq; 573 dpaa_sec_op_nb = 0; 574 dpaa_sec_ops = ops; 575 576 if (unlikely(nb_ops > DPAA_SEC_BURST)) 577 nb_ops = DPAA_SEC_BURST; 578 579 return dpaa_volatile_deq(fq, nb_ops, 1); 580 } 581 582 /** 583 * packet looks like: 584 * |<----data_len------->| 585 * |ip_header|ah_header|icv|payload| 586 * ^ 587 * | 588 * mbuf->pkt.data 589 */ 590 static inline struct dpaa_sec_job * 591 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 592 { 593 struct rte_crypto_sym_op *sym = op->sym; 594 struct rte_mbuf *mbuf = sym->m_src; 595 struct dpaa_sec_job *cf; 596 struct dpaa_sec_op_ctx *ctx; 597 struct qm_sg_entry *sg; 598 rte_iova_t start_addr; 599 uint8_t *old_digest; 600 601 ctx = dpaa_sec_alloc_ctx(ses); 602 if (!ctx) 603 return NULL; 604 605 cf = &ctx->job; 606 ctx->op = op; 607 old_digest = ctx->digest; 608 609 start_addr = rte_pktmbuf_iova(mbuf); 610 /* output */ 611 sg = &cf->sg[0]; 612 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 613 sg->length = ses->digest_length; 614 cpu_to_hw_sg(sg); 615 616 /* input */ 617 sg = &cf->sg[1]; 618 if (is_decode(ses)) { 619 /* need to extend the input to a compound frame */ 620 sg->extension = 1; 621 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); 622 sg->length = sym->auth.data.length + ses->digest_length; 623 sg->final = 1; 624 cpu_to_hw_sg(sg); 625 626 sg = &cf->sg[2]; 627 /* hash result or digest, save digest first */ 628 rte_memcpy(old_digest, sym->auth.digest.data, 629 ses->digest_length); 630 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset); 631 sg->length = sym->auth.data.length; 632 cpu_to_hw_sg(sg); 633 634 /* let's check digest by hw */ 635 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); 636 sg++; 637 qm_sg_entry_set64(sg, start_addr); 638 sg->length = ses->digest_length; 639 sg->final = 1; 640 cpu_to_hw_sg(sg); 641 } else { 642 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset); 643 sg->length = sym->auth.data.length; 644 sg->final = 1; 645 cpu_to_hw_sg(sg); 646 } 647 648 return cf; 649 } 650 651 static inline struct dpaa_sec_job * 652 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) 653 { 654 struct rte_crypto_sym_op *sym = op->sym; 655 struct dpaa_sec_job *cf; 656 struct dpaa_sec_op_ctx *ctx; 657 struct qm_sg_entry *sg; 658 rte_iova_t src_start_addr, dst_start_addr; 659 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 660 ses->iv.offset); 661 662 ctx = dpaa_sec_alloc_ctx(ses); 663 if (!ctx) 664 return NULL; 665 666 cf = &ctx->job; 667 ctx->op = op; 668 669 src_start_addr = rte_pktmbuf_iova(sym->m_src); 670 671 if (sym->m_dst) 672 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 673 else 674 dst_start_addr = src_start_addr; 675 676 /* output */ 677 sg = &cf->sg[0]; 678 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 679 sg->length = sym->cipher.data.length + ses->iv.length; 680 cpu_to_hw_sg(sg); 681 682 /* input */ 683 sg = &cf->sg[1]; 684 685 /* need to extend the input to a compound frame */ 686 sg->extension = 1; 687 sg->final = 1; 688 sg->length = sym->cipher.data.length + ses->iv.length; 689 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); 690 cpu_to_hw_sg(sg); 691 692 sg = &cf->sg[2]; 693 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 694 sg->length = ses->iv.length; 695 cpu_to_hw_sg(sg); 696 697 sg++; 698 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset); 699 sg->length = sym->cipher.data.length; 700 sg->final = 1; 701 cpu_to_hw_sg(sg); 702 703 return cf; 704 } 705 706 static inline struct dpaa_sec_job * 707 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) 708 { 709 struct rte_crypto_sym_op *sym = op->sym; 710 struct dpaa_sec_job *cf; 711 struct dpaa_sec_op_ctx *ctx; 712 struct qm_sg_entry *sg; 713 uint32_t length = 0; 714 rte_iova_t src_start_addr, dst_start_addr; 715 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 716 ses->iv.offset); 717 718 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 719 720 if (sym->m_dst) 721 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 722 else 723 dst_start_addr = src_start_addr; 724 725 ctx = dpaa_sec_alloc_ctx(ses); 726 if (!ctx) 727 return NULL; 728 729 cf = &ctx->job; 730 ctx->op = op; 731 732 /* input */ 733 rte_prefetch0(cf->sg); 734 sg = &cf->sg[2]; 735 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); 736 if (is_encode(ses)) { 737 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 738 sg->length = ses->iv.length; 739 length += sg->length; 740 cpu_to_hw_sg(sg); 741 742 sg++; 743 if (ses->auth_only_len) { 744 qm_sg_entry_set64(sg, 745 dpaa_mem_vtop(sym->aead.aad.data)); 746 sg->length = ses->auth_only_len; 747 length += sg->length; 748 cpu_to_hw_sg(sg); 749 sg++; 750 } 751 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 752 sg->length = sym->aead.data.length; 753 length += sg->length; 754 sg->final = 1; 755 cpu_to_hw_sg(sg); 756 } else { 757 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 758 sg->length = ses->iv.length; 759 length += sg->length; 760 cpu_to_hw_sg(sg); 761 762 sg++; 763 if (ses->auth_only_len) { 764 qm_sg_entry_set64(sg, 765 dpaa_mem_vtop(sym->aead.aad.data)); 766 sg->length = ses->auth_only_len; 767 length += sg->length; 768 cpu_to_hw_sg(sg); 769 sg++; 770 } 771 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); 772 sg->length = sym->aead.data.length; 773 length += sg->length; 774 cpu_to_hw_sg(sg); 775 776 memcpy(ctx->digest, sym->aead.digest.data, 777 ses->digest_length); 778 sg++; 779 780 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); 781 sg->length = ses->digest_length; 782 length += sg->length; 783 sg->final = 1; 784 cpu_to_hw_sg(sg); 785 } 786 /* input compound frame */ 787 cf->sg[1].length = length; 788 cf->sg[1].extension = 1; 789 cf->sg[1].final = 1; 790 cpu_to_hw_sg(&cf->sg[1]); 791 792 /* output */ 793 sg++; 794 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); 795 qm_sg_entry_set64(sg, 796 dst_start_addr + sym->aead.data.offset - ses->auth_only_len); 797 sg->length = sym->aead.data.length + ses->auth_only_len; 798 length = sg->length; 799 if (is_encode(ses)) { 800 cpu_to_hw_sg(sg); 801 /* set auth output */ 802 sg++; 803 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr); 804 sg->length = ses->digest_length; 805 length += sg->length; 806 } 807 sg->final = 1; 808 cpu_to_hw_sg(sg); 809 810 /* output compound frame */ 811 cf->sg[0].length = length; 812 cf->sg[0].extension = 1; 813 cpu_to_hw_sg(&cf->sg[0]); 814 815 return cf; 816 } 817 818 static inline struct dpaa_sec_job * 819 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) 820 { 821 struct rte_crypto_sym_op *sym = op->sym; 822 struct dpaa_sec_job *cf; 823 struct dpaa_sec_op_ctx *ctx; 824 struct qm_sg_entry *sg; 825 rte_iova_t src_start_addr, dst_start_addr; 826 uint32_t length = 0; 827 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 828 ses->iv.offset); 829 830 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; 831 if (sym->m_dst) 832 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; 833 else 834 dst_start_addr = src_start_addr; 835 836 ctx = dpaa_sec_alloc_ctx(ses); 837 if (!ctx) 838 return NULL; 839 840 cf = &ctx->job; 841 ctx->op = op; 842 843 /* input */ 844 rte_prefetch0(cf->sg); 845 sg = &cf->sg[2]; 846 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); 847 if (is_encode(ses)) { 848 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 849 sg->length = ses->iv.length; 850 length += sg->length; 851 cpu_to_hw_sg(sg); 852 853 sg++; 854 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 855 sg->length = sym->auth.data.length; 856 length += sg->length; 857 sg->final = 1; 858 cpu_to_hw_sg(sg); 859 } else { 860 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); 861 sg->length = ses->iv.length; 862 length += sg->length; 863 cpu_to_hw_sg(sg); 864 865 sg++; 866 867 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); 868 sg->length = sym->auth.data.length; 869 length += sg->length; 870 cpu_to_hw_sg(sg); 871 872 memcpy(ctx->digest, sym->auth.digest.data, 873 ses->digest_length); 874 sg++; 875 876 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); 877 sg->length = ses->digest_length; 878 length += sg->length; 879 sg->final = 1; 880 cpu_to_hw_sg(sg); 881 } 882 /* input compound frame */ 883 cf->sg[1].length = length; 884 cf->sg[1].extension = 1; 885 cf->sg[1].final = 1; 886 cpu_to_hw_sg(&cf->sg[1]); 887 888 /* output */ 889 sg++; 890 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); 891 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); 892 sg->length = sym->cipher.data.length; 893 length = sg->length; 894 if (is_encode(ses)) { 895 cpu_to_hw_sg(sg); 896 /* set auth output */ 897 sg++; 898 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); 899 sg->length = ses->digest_length; 900 length += sg->length; 901 } 902 sg->final = 1; 903 cpu_to_hw_sg(sg); 904 905 /* output compound frame */ 906 cf->sg[0].length = length; 907 cf->sg[0].extension = 1; 908 cpu_to_hw_sg(&cf->sg[0]); 909 910 return cf; 911 } 912 913 static inline struct dpaa_sec_job * 914 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses) 915 { 916 struct rte_crypto_sym_op *sym = op->sym; 917 struct dpaa_sec_job *cf; 918 struct dpaa_sec_op_ctx *ctx; 919 struct qm_sg_entry *sg; 920 phys_addr_t src_start_addr, dst_start_addr; 921 922 ctx = dpaa_sec_alloc_ctx(ses); 923 if (!ctx) 924 return NULL; 925 cf = &ctx->job; 926 ctx->op = op; 927 928 src_start_addr = rte_pktmbuf_mtophys(sym->m_src); 929 930 if (sym->m_dst) 931 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst); 932 else 933 dst_start_addr = src_start_addr; 934 935 /* input */ 936 sg = &cf->sg[1]; 937 qm_sg_entry_set64(sg, src_start_addr); 938 sg->length = sym->m_src->pkt_len; 939 sg->final = 1; 940 cpu_to_hw_sg(sg); 941 942 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 943 /* output */ 944 sg = &cf->sg[0]; 945 qm_sg_entry_set64(sg, dst_start_addr); 946 sg->length = sym->m_src->buf_len - sym->m_src->data_off; 947 cpu_to_hw_sg(sg); 948 949 return cf; 950 } 951 952 static int 953 dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp) 954 { 955 struct dpaa_sec_job *cf; 956 dpaa_sec_session *ses; 957 struct qm_fd fd; 958 int ret; 959 uint32_t auth_only_len = op->sym->auth.data.length - 960 op->sym->cipher.data.length; 961 962 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 963 ses = (dpaa_sec_session *)get_session_private_data( 964 op->sym->session, cryptodev_driver_id); 965 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 966 ses = (dpaa_sec_session *)get_sec_session_private_data( 967 op->sym->sec_session); 968 else 969 return -ENOTSUP; 970 971 if (unlikely(!ses->qp || ses->qp != qp)) { 972 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp); 973 if (dpaa_sec_attach_sess_q(qp, ses)) 974 return -1; 975 } 976 977 /* 978 * Segmented buffer is not supported. 979 */ 980 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { 981 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 982 return -ENOTSUP; 983 } 984 if (is_auth_only(ses)) { 985 cf = build_auth_only(op, ses); 986 } else if (is_cipher_only(ses)) { 987 cf = build_cipher_only(op, ses); 988 } else if (is_aead(ses)) { 989 cf = build_cipher_auth_gcm(op, ses); 990 auth_only_len = ses->auth_only_len; 991 } else if (is_auth_cipher(ses)) { 992 cf = build_cipher_auth(op, ses); 993 } else if (is_proto_ipsec(ses)) { 994 cf = build_proto(op, ses); 995 } else { 996 PMD_TX_LOG(ERR, "not supported sec op"); 997 return -ENOTSUP; 998 } 999 if (unlikely(!cf)) 1000 return -ENOMEM; 1001 1002 memset(&fd, 0, sizeof(struct qm_fd)); 1003 qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg)); 1004 fd._format1 = qm_fd_compound; 1005 fd.length29 = 2 * sizeof(struct qm_sg_entry); 1006 /* Auth_only_len is set as 0 in descriptor and it is overwritten 1007 * here in the fd.cmd which will update the DPOVRD reg. 1008 */ 1009 if (auth_only_len) 1010 fd.cmd = 0x80000000 | auth_only_len; 1011 do { 1012 ret = qman_enqueue(ses->inq, &fd, 0); 1013 } while (ret != 0); 1014 1015 return 0; 1016 } 1017 1018 static uint16_t 1019 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1020 uint16_t nb_ops) 1021 { 1022 /* Function to transmit the frames to given device and queuepair */ 1023 uint32_t loop; 1024 int32_t ret; 1025 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1026 uint16_t num_tx = 0; 1027 1028 if (unlikely(nb_ops == 0)) 1029 return 0; 1030 1031 /*Prepare each packet which is to be sent*/ 1032 for (loop = 0; loop < nb_ops; loop++) { 1033 if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1034 PMD_TX_LOG(ERR, "sessionless crypto op not supported"); 1035 return 0; 1036 } 1037 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp); 1038 if (!ret) 1039 num_tx++; 1040 } 1041 dpaa_qp->tx_pkts += num_tx; 1042 dpaa_qp->tx_errs += nb_ops - num_tx; 1043 1044 return num_tx; 1045 } 1046 1047 static uint16_t 1048 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1049 uint16_t nb_ops) 1050 { 1051 uint16_t num_rx; 1052 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp; 1053 1054 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops); 1055 1056 dpaa_qp->rx_pkts += num_rx; 1057 dpaa_qp->rx_errs += nb_ops - num_rx; 1058 1059 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx); 1060 1061 return num_rx; 1062 } 1063 1064 /** Release queue pair */ 1065 static int 1066 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, 1067 uint16_t qp_id) 1068 { 1069 struct dpaa_sec_dev_private *internals; 1070 struct dpaa_sec_qp *qp = NULL; 1071 1072 PMD_INIT_FUNC_TRACE(); 1073 1074 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id); 1075 1076 internals = dev->data->dev_private; 1077 if (qp_id >= internals->max_nb_queue_pairs) { 1078 PMD_INIT_LOG(ERR, "Max supported qpid %d", 1079 internals->max_nb_queue_pairs); 1080 return -EINVAL; 1081 } 1082 1083 qp = &internals->qps[qp_id]; 1084 qp->internals = NULL; 1085 dev->data->queue_pairs[qp_id] = NULL; 1086 1087 return 0; 1088 } 1089 1090 /** Setup a queue pair */ 1091 static int 1092 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1093 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1094 __rte_unused int socket_id, 1095 __rte_unused struct rte_mempool *session_pool) 1096 { 1097 struct dpaa_sec_dev_private *internals; 1098 struct dpaa_sec_qp *qp = NULL; 1099 1100 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", 1101 dev, qp_id, qp_conf); 1102 1103 internals = dev->data->dev_private; 1104 if (qp_id >= internals->max_nb_queue_pairs) { 1105 PMD_INIT_LOG(ERR, "Max supported qpid %d", 1106 internals->max_nb_queue_pairs); 1107 return -EINVAL; 1108 } 1109 1110 qp = &internals->qps[qp_id]; 1111 qp->internals = internals; 1112 dev->data->queue_pairs[qp_id] = qp; 1113 1114 return 0; 1115 } 1116 1117 /** Start queue pair */ 1118 static int 1119 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, 1120 __rte_unused uint16_t queue_pair_id) 1121 { 1122 PMD_INIT_FUNC_TRACE(); 1123 1124 return 0; 1125 } 1126 1127 /** Stop queue pair */ 1128 static int 1129 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, 1130 __rte_unused uint16_t queue_pair_id) 1131 { 1132 PMD_INIT_FUNC_TRACE(); 1133 1134 return 0; 1135 } 1136 1137 /** Return the number of allocated queue pairs */ 1138 static uint32_t 1139 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) 1140 { 1141 PMD_INIT_FUNC_TRACE(); 1142 1143 return dev->data->nb_queue_pairs; 1144 } 1145 1146 /** Returns the size of session structure */ 1147 static unsigned int 1148 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) 1149 { 1150 PMD_INIT_FUNC_TRACE(); 1151 1152 return sizeof(dpaa_sec_session); 1153 } 1154 1155 static int 1156 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, 1157 struct rte_crypto_sym_xform *xform, 1158 dpaa_sec_session *session) 1159 { 1160 session->cipher_alg = xform->cipher.algo; 1161 session->iv.length = xform->cipher.iv.length; 1162 session->iv.offset = xform->cipher.iv.offset; 1163 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1164 RTE_CACHE_LINE_SIZE); 1165 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 1166 PMD_INIT_LOG(ERR, "No Memory for cipher key\n"); 1167 return -ENOMEM; 1168 } 1169 session->cipher_key.length = xform->cipher.key.length; 1170 1171 memcpy(session->cipher_key.data, xform->cipher.key.data, 1172 xform->cipher.key.length); 1173 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1174 DIR_ENC : DIR_DEC; 1175 1176 return 0; 1177 } 1178 1179 static int 1180 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, 1181 struct rte_crypto_sym_xform *xform, 1182 dpaa_sec_session *session) 1183 { 1184 session->auth_alg = xform->auth.algo; 1185 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1186 RTE_CACHE_LINE_SIZE); 1187 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 1188 PMD_INIT_LOG(ERR, "No Memory for auth key\n"); 1189 return -ENOMEM; 1190 } 1191 session->auth_key.length = xform->auth.key.length; 1192 session->digest_length = xform->auth.digest_length; 1193 1194 memcpy(session->auth_key.data, xform->auth.key.data, 1195 xform->auth.key.length); 1196 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1197 DIR_ENC : DIR_DEC; 1198 1199 return 0; 1200 } 1201 1202 static int 1203 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, 1204 struct rte_crypto_sym_xform *xform, 1205 dpaa_sec_session *session) 1206 { 1207 session->aead_alg = xform->aead.algo; 1208 session->iv.length = xform->aead.iv.length; 1209 session->iv.offset = xform->aead.iv.offset; 1210 session->auth_only_len = xform->aead.aad_length; 1211 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 1212 RTE_CACHE_LINE_SIZE); 1213 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 1214 PMD_INIT_LOG(ERR, "No Memory for aead key\n"); 1215 return -ENOMEM; 1216 } 1217 session->aead_key.length = xform->aead.key.length; 1218 session->digest_length = xform->aead.digest_length; 1219 1220 memcpy(session->aead_key.data, xform->aead.key.data, 1221 xform->aead.key.length); 1222 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1223 DIR_ENC : DIR_DEC; 1224 1225 return 0; 1226 } 1227 1228 static struct qman_fq * 1229 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) 1230 { 1231 unsigned int i; 1232 1233 for (i = 0; i < qi->max_nb_sessions; i++) { 1234 if (qi->inq_attach[i] == 0) { 1235 qi->inq_attach[i] = 1; 1236 return &qi->inq[i]; 1237 } 1238 } 1239 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions); 1240 1241 return NULL; 1242 } 1243 1244 static int 1245 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) 1246 { 1247 unsigned int i; 1248 1249 for (i = 0; i < qi->max_nb_sessions; i++) { 1250 if (&qi->inq[i] == fq) { 1251 qi->inq_attach[i] = 0; 1252 return 0; 1253 } 1254 } 1255 return -1; 1256 } 1257 1258 static int 1259 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) 1260 { 1261 int ret; 1262 1263 sess->qp = qp; 1264 ret = dpaa_sec_prep_cdb(sess); 1265 if (ret) { 1266 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb"); 1267 return -1; 1268 } 1269 1270 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb), 1271 qman_fq_fqid(&qp->outq)); 1272 if (ret) 1273 PMD_DRV_LOG(ERR, "Unable to init sec queue"); 1274 1275 return ret; 1276 } 1277 1278 static int 1279 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused, 1280 uint16_t qp_id __rte_unused, 1281 void *ses __rte_unused) 1282 { 1283 PMD_INIT_FUNC_TRACE(); 1284 return 0; 1285 } 1286 1287 static int 1288 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, 1289 uint16_t qp_id __rte_unused, 1290 void *ses) 1291 { 1292 dpaa_sec_session *sess = ses; 1293 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 1294 1295 PMD_INIT_FUNC_TRACE(); 1296 1297 if (sess->inq) 1298 dpaa_sec_detach_rxq(qi, sess->inq); 1299 sess->inq = NULL; 1300 1301 sess->qp = NULL; 1302 1303 return 0; 1304 } 1305 1306 static int 1307 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, 1308 struct rte_crypto_sym_xform *xform, void *sess) 1309 { 1310 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 1311 dpaa_sec_session *session = sess; 1312 1313 PMD_INIT_FUNC_TRACE(); 1314 1315 if (unlikely(sess == NULL)) { 1316 RTE_LOG(ERR, PMD, "invalid session struct\n"); 1317 return -EINVAL; 1318 } 1319 1320 /* Default IV length = 0 */ 1321 session->iv.length = 0; 1322 1323 /* Cipher Only */ 1324 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1325 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1326 dpaa_sec_cipher_init(dev, xform, session); 1327 1328 /* Authentication Only */ 1329 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1330 xform->next == NULL) { 1331 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 1332 dpaa_sec_auth_init(dev, xform, session); 1333 1334 /* Cipher then Authenticate */ 1335 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1336 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1337 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 1338 dpaa_sec_cipher_init(dev, xform, session); 1339 dpaa_sec_auth_init(dev, xform->next, session); 1340 } else { 1341 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); 1342 return -EINVAL; 1343 } 1344 1345 /* Authenticate then Cipher */ 1346 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1347 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1348 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 1349 dpaa_sec_auth_init(dev, xform, session); 1350 dpaa_sec_cipher_init(dev, xform->next, session); 1351 } else { 1352 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); 1353 return -EINVAL; 1354 } 1355 1356 /* AEAD operation for AES-GCM kind of Algorithms */ 1357 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1358 xform->next == NULL) { 1359 dpaa_sec_aead_init(dev, xform, session); 1360 1361 } else { 1362 PMD_DRV_LOG(ERR, "Invalid crypto type"); 1363 return -EINVAL; 1364 } 1365 session->ctx_pool = internals->ctx_pool; 1366 session->inq = dpaa_sec_attach_rxq(internals); 1367 if (session->inq == NULL) { 1368 PMD_DRV_LOG(ERR, "unable to attach sec queue"); 1369 goto err1; 1370 } 1371 1372 return 0; 1373 1374 err1: 1375 rte_free(session->cipher_key.data); 1376 rte_free(session->auth_key.data); 1377 memset(session, 0, sizeof(dpaa_sec_session)); 1378 1379 return -EINVAL; 1380 } 1381 1382 static int 1383 dpaa_sec_session_configure(struct rte_cryptodev *dev, 1384 struct rte_crypto_sym_xform *xform, 1385 struct rte_cryptodev_sym_session *sess, 1386 struct rte_mempool *mempool) 1387 { 1388 void *sess_private_data; 1389 int ret; 1390 1391 PMD_INIT_FUNC_TRACE(); 1392 1393 if (rte_mempool_get(mempool, &sess_private_data)) { 1394 CDEV_LOG_ERR( 1395 "Couldn't get object from session mempool"); 1396 return -ENOMEM; 1397 } 1398 1399 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); 1400 if (ret != 0) { 1401 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure " 1402 "session parameters"); 1403 1404 /* Return session to mempool */ 1405 rte_mempool_put(mempool, sess_private_data); 1406 return ret; 1407 } 1408 1409 set_session_private_data(sess, dev->driver_id, 1410 sess_private_data); 1411 1412 1413 return 0; 1414 } 1415 1416 /** Clear the memory of session so it doesn't leave key material behind */ 1417 static void 1418 dpaa_sec_session_clear(struct rte_cryptodev *dev, 1419 struct rte_cryptodev_sym_session *sess) 1420 { 1421 struct dpaa_sec_dev_private *qi = dev->data->dev_private; 1422 uint8_t index = dev->driver_id; 1423 void *sess_priv = get_session_private_data(sess, index); 1424 1425 PMD_INIT_FUNC_TRACE(); 1426 1427 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 1428 1429 if (sess_priv) { 1430 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1431 1432 if (s->inq) 1433 dpaa_sec_detach_rxq(qi, s->inq); 1434 rte_free(s->cipher_key.data); 1435 rte_free(s->auth_key.data); 1436 memset(s, 0, sizeof(dpaa_sec_session)); 1437 set_session_private_data(sess, index, NULL); 1438 rte_mempool_put(sess_mp, sess_priv); 1439 } 1440 } 1441 1442 static int 1443 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 1444 struct rte_security_session_conf *conf, 1445 void *sess) 1446 { 1447 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 1448 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 1449 struct rte_crypto_auth_xform *auth_xform; 1450 struct rte_crypto_cipher_xform *cipher_xform; 1451 dpaa_sec_session *session = (dpaa_sec_session *)sess; 1452 1453 PMD_INIT_FUNC_TRACE(); 1454 1455 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1456 cipher_xform = &conf->crypto_xform->cipher; 1457 auth_xform = &conf->crypto_xform->next->auth; 1458 } else { 1459 auth_xform = &conf->crypto_xform->auth; 1460 cipher_xform = &conf->crypto_xform->next->cipher; 1461 } 1462 session->proto_alg = conf->protocol; 1463 session->cipher_key.data = rte_zmalloc(NULL, 1464 cipher_xform->key.length, 1465 RTE_CACHE_LINE_SIZE); 1466 if (session->cipher_key.data == NULL && 1467 cipher_xform->key.length > 0) { 1468 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1469 return -ENOMEM; 1470 } 1471 1472 session->cipher_key.length = cipher_xform->key.length; 1473 session->auth_key.data = rte_zmalloc(NULL, 1474 auth_xform->key.length, 1475 RTE_CACHE_LINE_SIZE); 1476 if (session->auth_key.data == NULL && 1477 auth_xform->key.length > 0) { 1478 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1479 rte_free(session->cipher_key.data); 1480 return -ENOMEM; 1481 } 1482 session->auth_key.length = auth_xform->key.length; 1483 memcpy(session->cipher_key.data, cipher_xform->key.data, 1484 cipher_xform->key.length); 1485 memcpy(session->auth_key.data, auth_xform->key.data, 1486 auth_xform->key.length); 1487 1488 switch (auth_xform->algo) { 1489 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1490 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1491 break; 1492 case RTE_CRYPTO_AUTH_MD5_HMAC: 1493 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1494 break; 1495 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1496 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1497 break; 1498 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1499 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1500 break; 1501 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1502 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1503 break; 1504 case RTE_CRYPTO_AUTH_AES_CMAC: 1505 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 1506 break; 1507 case RTE_CRYPTO_AUTH_NULL: 1508 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1509 break; 1510 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1511 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1512 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1513 case RTE_CRYPTO_AUTH_SHA1: 1514 case RTE_CRYPTO_AUTH_SHA256: 1515 case RTE_CRYPTO_AUTH_SHA512: 1516 case RTE_CRYPTO_AUTH_SHA224: 1517 case RTE_CRYPTO_AUTH_SHA384: 1518 case RTE_CRYPTO_AUTH_MD5: 1519 case RTE_CRYPTO_AUTH_AES_GMAC: 1520 case RTE_CRYPTO_AUTH_KASUMI_F9: 1521 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1522 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1523 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1524 auth_xform->algo); 1525 goto out; 1526 default: 1527 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1528 auth_xform->algo); 1529 goto out; 1530 } 1531 1532 switch (cipher_xform->algo) { 1533 case RTE_CRYPTO_CIPHER_AES_CBC: 1534 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1535 break; 1536 case RTE_CRYPTO_CIPHER_3DES_CBC: 1537 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1538 break; 1539 case RTE_CRYPTO_CIPHER_AES_CTR: 1540 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1541 break; 1542 case RTE_CRYPTO_CIPHER_NULL: 1543 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1544 case RTE_CRYPTO_CIPHER_3DES_ECB: 1545 case RTE_CRYPTO_CIPHER_AES_ECB: 1546 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1547 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1548 cipher_xform->algo); 1549 goto out; 1550 default: 1551 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1552 cipher_xform->algo); 1553 goto out; 1554 } 1555 1556 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1557 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) + 1558 sizeof(session->ip4_hdr)); 1559 session->ip4_hdr.ip_v = IPVERSION; 1560 session->ip4_hdr.ip_hl = 5; 1561 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 1562 sizeof(session->ip4_hdr)); 1563 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 1564 session->ip4_hdr.ip_id = 0; 1565 session->ip4_hdr.ip_off = 0; 1566 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 1567 session->ip4_hdr.ip_p = (ipsec_xform->proto == 1568 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP 1569 : IPPROTO_AH; 1570 session->ip4_hdr.ip_sum = 0; 1571 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 1572 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 1573 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 1574 (void *)&session->ip4_hdr, 1575 sizeof(struct ip)); 1576 1577 session->encap_pdb.options = 1578 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 1579 PDBOPTS_ESP_OIHI_PDB_INL | 1580 PDBOPTS_ESP_IVSRC | 1581 PDBHMO_ESP_ENCAP_DTTL; 1582 session->encap_pdb.spi = ipsec_xform->spi; 1583 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 1584 1585 session->dir = DIR_ENC; 1586 } else if (ipsec_xform->direction == 1587 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1588 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 1589 session->decap_pdb.options = sizeof(struct ip) << 16; 1590 session->dir = DIR_DEC; 1591 } else 1592 goto out; 1593 session->ctx_pool = internals->ctx_pool; 1594 session->inq = dpaa_sec_attach_rxq(internals); 1595 if (session->inq == NULL) { 1596 PMD_DRV_LOG(ERR, "unable to attach sec queue"); 1597 goto out; 1598 } 1599 1600 1601 return 0; 1602 out: 1603 rte_free(session->auth_key.data); 1604 rte_free(session->cipher_key.data); 1605 memset(session, 0, sizeof(dpaa_sec_session)); 1606 return -1; 1607 } 1608 1609 static int 1610 dpaa_sec_security_session_create(void *dev, 1611 struct rte_security_session_conf *conf, 1612 struct rte_security_session *sess, 1613 struct rte_mempool *mempool) 1614 { 1615 void *sess_private_data; 1616 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 1617 int ret; 1618 1619 if (rte_mempool_get(mempool, &sess_private_data)) { 1620 CDEV_LOG_ERR( 1621 "Couldn't get object from session mempool"); 1622 return -ENOMEM; 1623 } 1624 1625 switch (conf->protocol) { 1626 case RTE_SECURITY_PROTOCOL_IPSEC: 1627 ret = dpaa_sec_set_ipsec_session(cdev, conf, 1628 sess_private_data); 1629 break; 1630 case RTE_SECURITY_PROTOCOL_MACSEC: 1631 return -ENOTSUP; 1632 default: 1633 return -EINVAL; 1634 } 1635 if (ret != 0) { 1636 PMD_DRV_LOG(ERR, 1637 "DPAA2 PMD: failed to configure session parameters"); 1638 1639 /* Return session to mempool */ 1640 rte_mempool_put(mempool, sess_private_data); 1641 return ret; 1642 } 1643 1644 set_sec_session_private_data(sess, sess_private_data); 1645 1646 return ret; 1647 } 1648 1649 /** Clear the memory of session so it doesn't leave key material behind */ 1650 static int 1651 dpaa_sec_security_session_destroy(void *dev __rte_unused, 1652 struct rte_security_session *sess) 1653 { 1654 PMD_INIT_FUNC_TRACE(); 1655 void *sess_priv = get_sec_session_private_data(sess); 1656 1657 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv; 1658 1659 if (sess_priv) { 1660 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1661 1662 rte_free(s->cipher_key.data); 1663 rte_free(s->auth_key.data); 1664 memset(sess, 0, sizeof(dpaa_sec_session)); 1665 set_sec_session_private_data(sess, NULL); 1666 rte_mempool_put(sess_mp, sess_priv); 1667 } 1668 return 0; 1669 } 1670 1671 1672 static int 1673 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 1674 struct rte_cryptodev_config *config __rte_unused) 1675 { 1676 PMD_INIT_FUNC_TRACE(); 1677 1678 return 0; 1679 } 1680 1681 static int 1682 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused) 1683 { 1684 PMD_INIT_FUNC_TRACE(); 1685 return 0; 1686 } 1687 1688 static void 1689 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) 1690 { 1691 PMD_INIT_FUNC_TRACE(); 1692 } 1693 1694 static int 1695 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 1696 { 1697 PMD_INIT_FUNC_TRACE(); 1698 return 0; 1699 } 1700 1701 static void 1702 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, 1703 struct rte_cryptodev_info *info) 1704 { 1705 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 1706 1707 PMD_INIT_FUNC_TRACE(); 1708 if (info != NULL) { 1709 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 1710 info->feature_flags = dev->feature_flags; 1711 info->capabilities = dpaa_sec_capabilities; 1712 info->sym.max_nb_sessions = internals->max_nb_sessions; 1713 info->sym.max_nb_sessions_per_qp = 1714 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / 1715 RTE_DPAA_MAX_NB_SEC_QPS; 1716 info->driver_id = cryptodev_driver_id; 1717 } 1718 } 1719 1720 static struct rte_cryptodev_ops crypto_ops = { 1721 .dev_configure = dpaa_sec_dev_configure, 1722 .dev_start = dpaa_sec_dev_start, 1723 .dev_stop = dpaa_sec_dev_stop, 1724 .dev_close = dpaa_sec_dev_close, 1725 .dev_infos_get = dpaa_sec_dev_infos_get, 1726 .queue_pair_setup = dpaa_sec_queue_pair_setup, 1727 .queue_pair_release = dpaa_sec_queue_pair_release, 1728 .queue_pair_start = dpaa_sec_queue_pair_start, 1729 .queue_pair_stop = dpaa_sec_queue_pair_stop, 1730 .queue_pair_count = dpaa_sec_queue_pair_count, 1731 .session_get_size = dpaa_sec_session_get_size, 1732 .session_configure = dpaa_sec_session_configure, 1733 .session_clear = dpaa_sec_session_clear, 1734 .qp_attach_session = dpaa_sec_qp_attach_sess, 1735 .qp_detach_session = dpaa_sec_qp_detach_sess, 1736 }; 1737 1738 static const struct rte_security_capability * 1739 dpaa_sec_capabilities_get(void *device __rte_unused) 1740 { 1741 return dpaa_sec_security_cap; 1742 } 1743 1744 struct rte_security_ops dpaa_sec_security_ops = { 1745 .session_create = dpaa_sec_security_session_create, 1746 .session_update = NULL, 1747 .session_stats_get = NULL, 1748 .session_destroy = dpaa_sec_security_session_destroy, 1749 .set_pkt_metadata = NULL, 1750 .capabilities_get = dpaa_sec_capabilities_get 1751 }; 1752 1753 static int 1754 dpaa_sec_uninit(struct rte_cryptodev *dev) 1755 { 1756 struct dpaa_sec_dev_private *internals = dev->data->dev_private; 1757 1758 if (dev == NULL) 1759 return -ENODEV; 1760 1761 rte_free(dev->security_ctx); 1762 1763 rte_mempool_free(internals->ctx_pool); 1764 rte_free(internals); 1765 1766 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n", 1767 dev->data->name, rte_socket_id()); 1768 1769 return 0; 1770 } 1771 1772 static int 1773 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) 1774 { 1775 struct dpaa_sec_dev_private *internals; 1776 struct rte_security_ctx *security_instance; 1777 struct dpaa_sec_qp *qp; 1778 uint32_t i, flags; 1779 int ret; 1780 char str[20]; 1781 1782 PMD_INIT_FUNC_TRACE(); 1783 1784 cryptodev->driver_id = cryptodev_driver_id; 1785 cryptodev->dev_ops = &crypto_ops; 1786 1787 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst; 1788 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst; 1789 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 1790 RTE_CRYPTODEV_FF_HW_ACCELERATED | 1791 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 1792 RTE_CRYPTODEV_FF_SECURITY; 1793 1794 internals = cryptodev->data->dev_private; 1795 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; 1796 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS; 1797 1798 /* 1799 * For secondary processes, we don't initialise any further as primary 1800 * has already done this work. Only check we don't need a different 1801 * RX function 1802 */ 1803 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1804 PMD_INIT_LOG(DEBUG, "Device already init by primary process"); 1805 return 0; 1806 } 1807 1808 /* Initialize security_ctx only for primary process*/ 1809 security_instance = rte_malloc("rte_security_instances_ops", 1810 sizeof(struct rte_security_ctx), 0); 1811 if (security_instance == NULL) 1812 return -ENOMEM; 1813 security_instance->device = (void *)cryptodev; 1814 security_instance->ops = &dpaa_sec_security_ops; 1815 security_instance->sess_cnt = 0; 1816 cryptodev->security_ctx = security_instance; 1817 1818 for (i = 0; i < internals->max_nb_queue_pairs; i++) { 1819 /* init qman fq for queue pair */ 1820 qp = &internals->qps[i]; 1821 ret = dpaa_sec_init_tx(&qp->outq); 1822 if (ret) { 1823 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i); 1824 goto init_error; 1825 } 1826 } 1827 1828 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | 1829 QMAN_FQ_FLAG_TO_DCPORTAL; 1830 for (i = 0; i < internals->max_nb_sessions; i++) { 1831 /* create rx qman fq for sessions*/ 1832 ret = qman_create_fq(0, flags, &internals->inq[i]); 1833 if (unlikely(ret != 0)) { 1834 PMD_INIT_LOG(ERR, "sec qman_create_fq failed"); 1835 goto init_error; 1836 } 1837 } 1838 1839 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id); 1840 internals->ctx_pool = rte_mempool_create((const char *)str, 1841 CTX_POOL_NUM_BUFS, 1842 CTX_POOL_BUF_SIZE, 1843 CTX_POOL_CACHE_SIZE, 0, 1844 NULL, NULL, NULL, NULL, 1845 SOCKET_ID_ANY, 0); 1846 if (!internals->ctx_pool) { 1847 RTE_LOG(ERR, PMD, "%s create failed\n", str); 1848 goto init_error; 1849 } 1850 1851 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); 1852 return 0; 1853 1854 init_error: 1855 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); 1856 1857 dpaa_sec_uninit(cryptodev); 1858 return -EFAULT; 1859 } 1860 1861 static int 1862 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv, 1863 struct rte_dpaa_device *dpaa_dev) 1864 { 1865 struct rte_cryptodev *cryptodev; 1866 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 1867 1868 int retval; 1869 1870 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id); 1871 1872 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 1873 if (cryptodev == NULL) 1874 return -ENOMEM; 1875 1876 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1877 cryptodev->data->dev_private = rte_zmalloc_socket( 1878 "cryptodev private structure", 1879 sizeof(struct dpaa_sec_dev_private), 1880 RTE_CACHE_LINE_SIZE, 1881 rte_socket_id()); 1882 1883 if (cryptodev->data->dev_private == NULL) 1884 rte_panic("Cannot allocate memzone for private " 1885 "device data"); 1886 } 1887 1888 dpaa_dev->crypto_dev = cryptodev; 1889 cryptodev->device = &dpaa_dev->device; 1890 cryptodev->device->driver = &dpaa_drv->driver; 1891 1892 /* init user callbacks */ 1893 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1894 1895 /* if sec device version is not configured */ 1896 if (!rta_get_sec_era()) { 1897 const struct device_node *caam_node; 1898 1899 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 1900 const uint32_t *prop = of_get_property(caam_node, 1901 "fsl,sec-era", 1902 NULL); 1903 if (prop) { 1904 rta_set_sec_era( 1905 INTL_SEC_ERA(rte_cpu_to_be_32(*prop))); 1906 break; 1907 } 1908 } 1909 } 1910 1911 /* Invoke PMD device initialization function */ 1912 retval = dpaa_sec_dev_init(cryptodev); 1913 if (retval == 0) 1914 return 0; 1915 1916 /* In case of error, cleanup is done */ 1917 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1918 rte_free(cryptodev->data->dev_private); 1919 1920 rte_cryptodev_pmd_release_device(cryptodev); 1921 1922 return -ENXIO; 1923 } 1924 1925 static int 1926 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) 1927 { 1928 struct rte_cryptodev *cryptodev; 1929 int ret; 1930 1931 cryptodev = dpaa_dev->crypto_dev; 1932 if (cryptodev == NULL) 1933 return -ENODEV; 1934 1935 ret = dpaa_sec_uninit(cryptodev); 1936 if (ret) 1937 return ret; 1938 1939 return rte_cryptodev_pmd_destroy(cryptodev); 1940 } 1941 1942 static struct rte_dpaa_driver rte_dpaa_sec_driver = { 1943 .drv_type = FSL_DPAA_CRYPTO, 1944 .driver = { 1945 .name = "DPAA SEC PMD" 1946 }, 1947 .probe = cryptodev_dpaa_sec_probe, 1948 .remove = cryptodev_dpaa_sec_remove, 1949 }; 1950 1951 static struct cryptodev_driver dpaa_sec_crypto_drv; 1952 1953 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); 1954 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver, 1955 cryptodev_driver_id); 1956